Merge branch '2015.5' of https://github.com/saltstack/salt into fix_25802

This commit is contained in:
twangboy 2015-08-10 13:36:02 -06:00
commit f05e3e72a3
58 changed files with 6309 additions and 2228 deletions

View file

@ -483,9 +483,9 @@
# will be shown for each state run.
#state_output_profile: True
# Fingerprint of the master public key to double verify the master is valid,
# the master fingerprint can be found by running "salt-key -F master" on the
# salt master.
# Fingerprint of the master public key to validate the identity of your Salt master
# before the initial key exchange. The master fingerprint can be found by running
# "salt-key -F master" on the Salt master.
#master_finger: ''

View file

@ -295,7 +295,7 @@
<!--analytics-->
<script type="text/javascript" language="javascript">llactid=23943</script>
<script type="text/javascript" language="javascript" src="http://t6.trackalyzer.com/trackalyze.js"></script>
<script type="text/javascript" language="javascript" src="https://trackalyzer.com/trackalyze_secure.js"></script>
<script>
var _gaq = _gaq || [];

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-API" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-API" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-api \- salt-api Command
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CALL" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-CALL" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-call \- salt-call Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CLOUD" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-CLOUD" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-cloud \- Salt Cloud Command
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-CP" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-CP" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-cp \- salt-cp Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-KEY" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-KEY" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-key \- salt-key Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MASTER" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-MASTER" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-master \- salt-master Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-MINION" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-MINION" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-minion \- salt-minion Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-RUN" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-RUN" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-run \- salt-run Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SSH" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-SSH" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-ssh \- salt-ssh Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-SYNDIC" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-SYNDIC" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-syndic \- salt-syndic Documentation
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT-UNITY" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT-UNITY" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt-unity \- salt-unity Command
.

View file

@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SALT" "1" "June 26, 2015" "2015.5.2" "Salt"
.TH "SALT" "1" "August 10, 2015" "2015.5.4" "Salt"
.SH NAME
salt \- salt
.

File diff suppressed because it is too large Load diff

View file

@ -101,6 +101,41 @@ Running Salt
There is also a full :doc:`troubleshooting guide</topics/troubleshooting/index>`
available.
.. _key-identity:
Key Identity
============
Salt provides commands to validate the identity of your Salt master
and Salt minions before the initial key exchange. Validating key identity helps
avoid inadvertently connecting to the wrong Salt master, and helps prevent
a potential MiTM attack when establishing the initial connection.
Master Key Fingerprint
----------------------
Print the master key fingerprint by running the following command on the Salt master:
.. code-block:: bash
salt-key -F master
Copy the ``master.pub`` fingerprint from the *Local Keys* section, and then set this value
as the :conf_minion:`master_finger` in the minion configuration file. Save the configuration
file and then restart the Salt minion.
Minion Key Fingerprint
----------------------
Run the following command on each Salt minion to view the minion key fingerprint:
.. code-block:: bash
salt-call --local key.finger
Compare this value to the value that is displayed when you run the
``salt-key --finger <MINION_ID>`` command on the Salt master.
Key Management
==============

View file

@ -868,6 +868,21 @@ minion to clean the keys.
open_mode: False
.. conf_minion:: master_finger
``master_finger``
-----------------
Default: ``''``
Fingerprint of the master public key to validate the identity of your Salt master
before the initial key exchange. The master fingerprint can be found by running
"salt-key -F master" on the Salt master.
.. code-block:: yaml
master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13'
.. conf_minion:: verify_master_pubkey_sign

View file

@ -34,22 +34,24 @@ set up in the cloud configuration at
my-vmware-config:
provider: vmware
user: "DOMAIN\user"
password: "verybadpass"
url: "vcenter01.domain.com"
user: 'DOMAIN\user'
password: 'verybadpass'
url: '10.20.30.40'
vmware-vcenter02:
vcenter01:
provider: vmware
user: "DOMAIN\user"
password: "verybadpass"
url: "vcenter02.domain.com"
user: 'DOMAIN\user'
password: 'verybadpass'
url: 'vcenter01.domain.com'
protocol: 'https'
port: 443
vmware-vcenter03:
vcenter02:
provider: vmware
user: "DOMAIN\user"
password: "verybadpass"
url: "vcenter03.domain.com"
protocol: "http"
user: 'DOMAIN\user'
password: 'verybadpass'
url: 'vcenter02.domain.com'
protocol: 'http'
port: 80
.. note::
@ -68,7 +70,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or
.. code-block:: yaml
vmware-centos6.5:
provider: vmware-vcenter01
provider: vcenter01
clonefrom: test-vm
## Optional arguments

View file

@ -104,48 +104,87 @@ Token expiration time can be set in the Salt master config file.
LDAP and Active Directory
-------------------------
=========================
Salt supports both user and group authentication for LDAP (and Active Directory
accessed via its LDAP interface)
OpenLDAP and similar systems
----------------------------
LDAP configuration happens in the Salt master configuration file.
Server configuration values and their defaults:
.. code-block:: yaml
# Server to auth against
auth.ldap.server: localhost
# Port to connect via
auth.ldap.port: 389
# Use TLS when connecting
auth.ldap.tls: False
# LDAP scope level, almost always 2
auth.ldap.scope: 2
auth.ldap.uri: ''
auth.ldap.tls: False
# Server specified in URI format
auth.ldap.uri: '' # Overrides .ldap.server, .ldap.port, .ldap.tls above
# Verify server's TLS certificate
auth.ldap.no_verify: False
# Bind to LDAP anonymously to determine group membership
# Active Directory does not allow anonymous binds without special configuration
auth.ldap.anonymous: False
# FOR TESTING ONLY, this is a VERY insecure setting.
# If this is True, the LDAP bind password will be ignored and
# access will be determined by group membership alone with
# the group memberships being retrieved via anonymous bind
auth.ldap.auth_by_group_membership_only: False
# Require authenticating user to be part of this Organizational Unit
# This can be blank if your LDAP schema does not use this kind of OU
auth.ldap.groupou: 'Groups'
# Object Class for groups. An LDAP search will be done to find all groups of this
# class to which the authenticating user belongs.
auth.ldap.groupclass: 'posixGroup'
# Unique ID attribute name for the user
auth.ldap.accountattributename: 'memberUid'
# These are only for Active Directory
auth.ldap.activedirectory: False
auth.ldap.persontype: 'person'
Salt also needs to know which Base DN to search for users and groups and
the DN to bind to:
There are two phases to LDAP authentication. First, Salt authenticates to search for a users's Distinguished Name
and group membership. The user it authenticates as in this phase is often a special LDAP system user with
read-only access to the LDAP directory. After Salt searches the directory to determine the actual user's DN
and groups, it re-authenticates as the user running the Salt commands.
If you are already aware of the structure of your DNs and permissions in your LDAP store are set such that
users can look up their own group memberships, then the first and second users can be the same. To tell Salt this is
the case, omit the ``auth.ldap.bindpw`` parameter. You can template the binddn like this:
.. code-block:: yaml
auth.ldap.basedn: dc=saltstack,dc=com
auth.ldap.binddn: cn=admin,dc=saltstack,dc=com
auth.ldap.binddn: uid={{ username }},cn=users,cn=accounts,dc=saltstack,dc=com
To bind to a DN, a password is required
Salt will use the password entered on the salt command line in place of the bindpw.
To use two separate users, specify the LDAP lookup user in the binddn directive, and include a bindpw like so
.. code-block:: yaml
auth.ldap.binddn: uid=ldaplookup,cn=sysaccounts,cn=etc,dc=saltstack,dc=com
auth.ldap.bindpw: mypassword
Salt uses a filter to find the DN associated with a user. Salt
As mentioned before, Salt uses a filter to find the DN associated with a user. Salt
substitutes the ``{{ username }}`` value for the username when querying LDAP
.. code-block:: yaml
@ -161,6 +200,9 @@ the results are filtered against ``auth.ldap.groupclass``, default
auth.ldap.groupou: Groups
Active Directory
----------------
Active Directory handles group membership differently, and does not utilize the
``groupou`` configuration variable. AD needs the following options in
the master config:
@ -186,7 +228,7 @@ of the user is looked up with the following LDAP search:
)
This should return a distinguishedName that we can use to filter for group
membership. Then the following LDAP query is executed:
membership. Then the following LDAP query is executed:
.. code-block:: text

View file

@ -184,13 +184,13 @@ configuration file:
.. code-block:: yaml
schedule:
overstate:
function: state.over
orchestrate:
function: state.orchestrate
seconds: 35
minutes: 30
hours: 3
The above configuration will execute the state.over runner every 3 hours,
The above configuration will execute the state.orchestrate runner every 3 hours,
30 minutes and 35 seconds, or every 12,635 seconds.
Scheduler With Returner

View file

@ -39,7 +39,7 @@ any way with the minion that started it.
To create support for a proxied device one needs to create four things:
1. The `proxytype connection class`_ (located in salt/proxy).
1. The `proxy_connection_module`_ (located in salt/proxy).
2. The `grains support code`_ (located in salt/grains).
3. :ref:`Salt modules <all-salt.modules>` specific to the controlled
device.
@ -156,118 +156,289 @@ to control a particular device. That proxy-minion process will initiate
a connection back to the master to enable control.
.. _proxytype connection class:
.. _proxy_connection_module:
Proxytypes
##########
Proxymodules
############
A proxytype is a Python class called 'Proxyconn' that encapsulates all the code
necessary to interface with a device. Proxytypes are located inside the
salt.proxy module. At a minimum a proxytype object must implement the
following methods:
A proxy module encapsulates all the code necessary to interface with a device.
Proxymodules are located inside the salt.proxy module. At a minimum
a proxymodule object must implement the following functions:
``proxytype(self)``: Returns a string with the name of the proxy type.
``__virtual__()``: This function performs the same duty that it does for other
types of Salt modules. Logic goes here to determine if the module can be
loaded, checking for the presence of Python modules on which the proxy deepends.
Returning ``False`` will prevent the module from loading.
``proxyconn(self, **kwargs)``: Provides the primary way to connect and communicate
with the device. Some proxyconns instantiate a particular object that opens a
network connection to a device and leaves the connection open for communication.
Others simply abstract a serial connection or even implement endpoints to communicate
via REST over HTTP.
``init(opts)``: Perform any initialization that the device needs. This is
a good place to bring up a persistent connection to a device, or authenticate
to create a persistent authorization token.
``id(self, opts)``: Returns a unique, unchanging id for the controlled device. This is
``id(opts)``: Returns a unique, unchanging id for the controlled device. This is
the "name" of the device, and is used by the salt-master for targeting and key
authentication.
Optionally, the class may define a ``shutdown(self, opts)`` method if the
controlled device should be informed when the minion goes away cleanly.
``shutdown()``: Code to cleanly shut down or close a connection to
a controlled device goes here. This function must exist, but can contain only
the keyword ``pass`` if there is no shutdown logic required.
It is highly recommended that the ``test.ping`` execution module also be defined
for a proxytype. The code for ``ping`` should contact the controlled device and make
sure it is really available.
``ping()``: While not required, it is highly recommended that this function also
be defined in the proxymodule. The code for ``ping`` should contact the
controlled device and make sure it is really available.
Here is an example proxytype used to interface to Juniper Networks devices that run
the Junos operating system. Note the additional library requirements--most of the
"hard part" of talking to these devices is handled by the jnpr.junos, jnpr.junos.utils,
and jnpr.junos.cfg modules.
Here is an example proxymodule used to interface to a *very* simple REST
server. Code for the server is in the `salt-contrib GitHub repository <https://github.com/saltstack/salt-contrib/proxyminion_rest_example>`_
This proxymodule enables "service" enumration, starting, stopping, restarting,
and status; "package" installation, and a ping.
.. code-block:: python
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
the bottle-based web service contained in
https://github.com/saltstack/salt-contrib/proxyminion_rest_example
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
import salt.utils.http
import jnpr.junos
import jnpr.junos.utils
import jnpr.junos.cfg
HAS_JUNOS = True
HAS_REST_EXAMPLE = True
class Proxyconn(object):
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['rest_sample']
def __init__(self, details):
self.conn = jnpr.junos.Device(user=details['username'], host=details['host'], password=details['passwd'])
self.conn.open()
self.conn.bind(cu=jnpr.junos.cfg.Resource)
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
GRAINS_CACHE = {}
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
def proxytype(self):
return 'junos'
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
log.debug('rest_sample proxy __virtual__() called...')
return True
# Every proxy module needs an 'init', though you can
# just put a 'pass' here if it doesn't need to do anything.
def init(opts):
log.debug('rest_sample proxy init() called...')
# Save the REST URL
DETAILS['url'] = opts['proxy']['url']
# Make sure the REST URL ends with a '/'
if not DETAILS['url'].endswith('/'):
DETAILS['url'] += '/'
def id(self, opts):
return self.conn.facts['hostname']
def id(opts):
'''
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE.
If it changes while the proxy is running the salt-master will get
really confused and may stop talking to this minion
'''
r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True)
return r['dict']['id'].encode('ascii', 'ignore')
def ping(self):
return self.conn.connected
def grains():
'''
Get the grains from the proxied device
'''
if not GRAINS_CACHE:
r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True)
GRAINS_CACHE = r['dict']
return GRAINS_CACHE
def shutdown(self, opts):
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
GRAINS_CACHE = {}
return grains()
print('Proxy module {} shutting down!!'.format(opts['id']))
try:
self.conn.close()
except Exception:
pass
def service_start(name):
'''
Start a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True)
return r['dict']
def service_stop(name):
'''
Stop a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True)
return r['dict']
def service_restart(name):
'''
Restart a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True)
return r['dict']
def service_list():
'''
List "services" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True)
return r['dict']
def service_status(name):
'''
Check if a service is running on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True)
return r['dict']
def package_list():
'''
List "packages" installed on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True)
return r['dict']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
cmd = DETAILS['url']+'package/install/'+name
if 'version' in kwargs:
cmd += '/'+kwargs['version']
else:
cmd += '/1.0'
r = salt.utils.http.query(cmd, decode_type='json', decode=True)
def package_remove(name):
'''
Remove a "package" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True)
return r['dict']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True)
return r['dict']
def ping():
'''
Is the REST server up?
'''
r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True)
try:
return r['dict'].get('ret', False)
except Exception:
return False
def shutdown(opts):
'''
For this proxy shutdown is a no-op
'''
log.debug('rest_sample proxy shutdown() called...')
pass
.. _grains support code:
Grains are data about minions. Most proxied devices will have a paltry amount
of data as compared to a typical Linux server. Because proxy-minions are
started by a regular minion, they inherit a sizeable number of grain settings
which can be useful, especially when targeting (PYTHONPATH, for example).
of data as compared to a typical Linux server. By default, a proxy minion will
have no grains set at all. Salt core code requires values for ``kernel``,
``os``, and ``os_family``. To add them (and others) to your proxy minion for
a particular device, create a file in salt/grains named [proxytype].py and place
inside it the different functions that need to be run to collect the data you
are interested in. Here's an example:
All proxy minions set a grain called 'proxy'. If it is present, you know the
minion is controlling another device. To add more grains to your proxy minion
for a particular device, create a file in salt/grains named [proxytype].py and
place inside it the different functions that need to be run to collect the data
you are interested in. Here's an example:
.. code: python::
# -*- coding: utf-8 -*-
'''
Generate baseline proxy minion grains
'''
__proxyenabled__ = ['rest_sample']
__virtualname__ = 'rest_sample'
def __virtual__():
if 'proxy' not in __opts__:
return False
else:
return __virtualname__
def kernel():
return {'kernel':'proxy'}
def os():
return {'os':'proxy'}
def location():
return {'location': 'In this darn virtual machine. Let me out!'}
def os_family():
return {'os_family': 'proxy'}
def os_data():
return {'os_data': 'funkyHttp release 1.0.a.4.g'}
The __proxyenabled__ directive
------------------------------
Salt states and execution modules, by, and large, cannot "automatically" work
Salt execution moduless, by, and large, cannot "automatically" work
with proxied devices. Execution modules like ``pkg`` or ``sqlite3`` have no
meaning on a network switch or a housecat. For a state/execution module to be
meaning on a network switch or a housecat. For an execution module to be
available to a proxy-minion, the ``__proxyenabled__`` variable must be defined
in the module as an array containing the names of all the proxytypes that this
module can support. The array can contain the special value ``*`` to indicate
that the module supports all proxies.
If no ``__proxyenabled__`` variable is defined, then by default, the
state/execution module is unavailable to any proxy.
execution module is unavailable to any proxy.
Here is an excerpt from a module that was modified to support proxy-minions:
.. code-block:: python
__proxyenabled__ = ['*']
[...]
def ping():
if 'proxyobject' in __opts__:
if 'proxymodule' in __opts__:
if 'ping' in __opts__['proxyobject'].__attr__():
return __opts['proxyobject'].ping()
else:
@ -275,15 +446,18 @@ Here is an excerpt from a module that was modified to support proxy-minions:
else:
return True
And then in salt.proxy.junos we find
And then in salt.proxy.rest_sample.py we find
.. code-block:: python
def ping(self):
return self.connected
def ping():
'''
Is the REST server up?
'''
r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True)
try:
return r['dict'].get('ret', False)
except Exception:
return False
The Junos API layer lacks the ability to do a traditional 'ping', so the
example simply checks the connection object field that indicates
if the ssh connection was successfully made to the device.

View file

@ -776,7 +776,7 @@ repository:
- git: master https://domain.com/pillar.git root=subdirectory
More information on the git external pillar can be found in the
:mod:`salt.pillar.get_pillar docs <salt.pillar.git_pillar>`.
:mod:`salt.pillar.git_pillar docs <salt.pillar.git_pillar>`.
.. _faq-gitfs-bug:

View file

@ -191,9 +191,13 @@ The easiest way to accept the minion key is to accept all pending keys:
.. note::
Keys should be verified! The secure thing to do before accepting a key is
to run ``salt-key -f minion-id`` to print the fingerprint of the minion's
public key. This fingerprint can then be compared against the fingerprint
Keys should be verified! Print the master key fingerprint by running ``salt-key -F master``
on the Salt master. Copy the ``master.pub`` fingerprint from the Local Keys section,
and then set this value as the :conf_minion:`master_finger` in the minion configuration
file. Restart the Salt minion.
On the minion, run ``salt-key -f minion-id`` to print the fingerprint of the
minion's public key. This fingerprint can then be compared against the fingerprint
generated on the minion.
On the master:

View file

@ -13,7 +13,6 @@ import logging
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
# Import third party libs
from jinja2 import Environment
try:
@ -111,7 +110,7 @@ class _LDAPConnection(object):
)
def _bind(username, password):
def _bind(username, password, anonymous=False):
'''
Authenticate via an LDAP bind
'''
@ -121,8 +120,10 @@ def _bind(username, password):
connargs = {}
# config params (auth.ldap.*)
params = {
'mandatory': ['uri', 'server', 'port', 'tls', 'no_verify', 'anonymous', 'accountattributename', 'activedirectory'],
'additional': ['binddn', 'bindpw', 'filter', 'groupclass'],
'mandatory': ['uri', 'server', 'port', 'tls', 'no_verify', 'anonymous',
'accountattributename', 'activedirectory'],
'additional': ['binddn', 'bindpw', 'filter', 'groupclass',
'auth_by_group_membership_only'],
}
paramvalues = {}
@ -137,6 +138,7 @@ def _bind(username, password):
#except SaltInvocationError:
# pass
paramvalues['anonymous'] = anonymous
if paramvalues['binddn']:
# the binddn can also be composited, e.g.
# - {{ username }}@domain.com
@ -204,7 +206,10 @@ def _bind(username, password):
# Update connection dictionary with the user's password
connargs['bindpw'] = password
# Attempt bind with user dn and password
log.debug('Attempting LDAP bind with user dn: {0}'.format(connargs['binddn']))
if paramvalues['anonymous']:
log.debug('Attempting anonymous LDAP bind')
else:
log.debug('Attempting LDAP bind with user dn: {0}'.format(connargs['binddn']))
try:
ldap_conn = _LDAPConnection(**connargs).ldap
except Exception:
@ -224,8 +229,8 @@ def auth(username, password):
'''
Simple LDAP auth
'''
if _bind(username, password):
if _bind(username, password, anonymous=_config('auth_by_group_membership_only', mandatory=False) and
_config('anonymous', mandatory=False)):
log.debug('LDAP authentication successful')
return True
else:
@ -250,7 +255,8 @@ def groups(username, **kwargs):
'''
group_list = []
bind = _bind(username, kwargs['password'])
bind = _bind(username, kwargs['password'],
anonymous=_config('anonymous', mandatory=False))
if bind:
log.debug('ldap bind to determine group membership succeeded!')
@ -285,15 +291,24 @@ def groups(username, **kwargs):
group_list.append(entry['cn'][0])
log.debug('User {0} is a member of groups: {1}'.format(username, group_list))
else:
search_results = bind.search_s('ou={0},{1}'.format(_config('groupou'), _config('basedn')),
if _config('groupou'):
search_base = 'ou={0},{1}'.format(_config('groupou'), _config('basedn'))
else:
search_base = '{0}'.format(_config('basedn'))
search_string = '(&({0}={1})(objectClass={2}))'.format(_config('accountattributename'),
username, _config('groupclass'))
search_results = bind.search_s(search_base,
ldap.SCOPE_SUBTREE,
'(&({0}={1})(objectClass={2}))'.format(_config('accountattributename'),
username, _config('groupclass')),
search_string,
[_config('accountattributename'), 'cn'])
for _, entry in search_results:
if username in entry[_config('accountattributename')]:
group_list.append(entry['cn'][0])
log.debug('User {0} is a member of groups: {1}'.format(username, group_list))
if not auth(username, kwargs['password']):
log.error('LDAP username and password do not match')
return []
else:
log.error('ldap bind to determine group membership FAILED!')
return group_list

View file

@ -415,8 +415,9 @@ class ProxyMinion(parsers.MinionOptionParser):
'''
If sub-classed, run any shutdown operations on this method.
'''
if 'proxy' in self.minion.opts:
self.minion.opts['proxyobject'].shutdown(self.minion.opts)
if 'proxymodule' in self.minion.opts:
proxy_fn = self.minion.opts['proxymodule'].loaded_base_name + '.shutdown'
self.minion.opts['proxymodule'][proxy_fn](self.minion.opts)
logger.info('The proxy minion is shut down')

View file

@ -202,6 +202,9 @@ class LocalClient(object):
timeout=timeout,
)
if 'jid' in pub_data:
self.event.subscribe(pub_data['jid'])
return pub_data
def _check_pub_data(self, pub_data):
@ -233,6 +236,10 @@ class LocalClient(object):
print('No minions matched the target. '
'No command was sent, no jid was assigned.')
return {}
else:
self.event.subscribe_regex('^syndic/.*/{0}'.format(pub_data['jid']))
self.event.subscribe('salt/job/{0}'.format(pub_data['jid']))
return pub_data
@ -263,9 +270,6 @@ class LocalClient(object):
'''
arg = salt.utils.args.condition_input(arg, kwarg)
# Subscribe to all events and subscribe as early as possible
self.event.subscribe(jid)
try:
pub_data = self.pub(
tgt,
@ -797,8 +801,6 @@ class LocalClient(object):
def get_returns_no_block(
self,
jid,
event=None,
gather_errors=False,
tags_regex=None
):
'''
@ -806,48 +808,16 @@ class LocalClient(object):
Yield either the raw event data or None
Pass a list of additional regular expressions as `tags_regex` to search
the event bus for non-return data, such as minion lists returned from
syndics.
Pass a list of additional regular expressions as `tags_regex` to search
the event bus for non-return data, such as minion lists returned from
syndics.
'''
if event is None:
event = self.event
jid_tag = 'salt/job/{0}'.format(jid)
jid_tag_regex = '^salt/job/{0}'.format(jid)
tag_search = []
tag_search.append(re.compile(jid_tag_regex))
if isinstance(tags_regex, str):
tag_search.append(re.compile(tags_regex))
elif isinstance(tags_regex, list):
for tag in tags_regex:
tag_search.append(re.compile(tag))
while True:
if self.opts.get('transport') == 'zeromq':
try:
raw = event.get_event_noblock()
if gather_errors:
if (raw and
(raw.get('tag', '').startswith('_salt_error') or
any([tag.search(raw.get('tag', '')) for tag in tag_search]))):
yield raw
else:
if raw and raw.get('tag', '').startswith(jid_tag):
yield raw
else:
yield None
except zmq.ZMQError as ex:
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
yield None
else:
raise
else:
raw = event.get_event_noblock()
if raw and raw.get('tag', '').startswith(jid_tag):
yield raw
else:
yield None
# TODO(driskell): This was previously completely nonblocking.
# Should get_event have a nonblock option?
raw = self.event.get_event(wait=0.01, tag='salt/job/{0}'.format(jid), tags_regex=tags_regex, full=True)
yield raw
def get_iter_returns(
self,
@ -857,7 +827,6 @@ class LocalClient(object):
tgt='*',
tgt_type='glob',
expect_minions=False,
gather_errors=True,
block=True,
**kwargs):
'''
@ -894,9 +863,9 @@ class LocalClient(object):
# iterator for this job's return
if self.opts['order_masters']:
# If we are a MoM, we need to gather expected minions from downstreams masters.
ret_iter = self.get_returns_no_block(jid, gather_errors=gather_errors, tags_regex='^syndic/.*/{0}'.format(jid))
ret_iter = self.get_returns_no_block(jid, tags_regex=['^syndic/.*/{0}'.format(jid)])
else:
ret_iter = self.get_returns_no_block(jid, gather_errors=gather_errors)
ret_iter = self.get_returns_no_block(jid)
# iterator for the info of this job
jinfo_iter = []
timeout_at = time.time() + timeout
@ -915,10 +884,6 @@ class LocalClient(object):
# if we got None, then there were no events
if raw is None:
break
if gather_errors:
if raw['tag'] == '_salt_error':
ret = {raw['data']['id']: raw['data']['data']}
yield ret
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue
@ -965,15 +930,6 @@ class LocalClient(object):
# if the jinfo has timed out and some minions are still running the job
# re-do the ping
if time.time() > timeout_at and minions_running:
# need our own event listener, so we don't clobber the class one
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=not self.opts.get('__worker', False))
# start listening for new events, before firing off the pings
event.connect_pub()
# since this is a new ping, no one has responded yet
jinfo = self.gather_job_info(jid, tgt, tgt_type)
minions_running = False
@ -982,7 +938,7 @@ class LocalClient(object):
if 'jid' not in jinfo:
jinfo_iter = []
else:
jinfo_iter = self.get_returns_no_block(jinfo['jid'], event=event)
jinfo_iter = self.get_returns_no_block(jinfo['jid'])
timeout_at = time.time() + self.opts['gather_job_timeout']
# if you are a syndic, wait a little longer
if self.opts['order_masters']:
@ -1044,8 +1000,7 @@ class LocalClient(object):
self,
jid,
minions,
timeout=None,
pending_tags=None):
timeout=None):
'''
Get the returns for the command line interface via the event system
'''

View file

@ -2104,7 +2104,7 @@ class Map(Cloud):
output[name] = self.create(
profile, local_master=local_master
)
if self.opts.get('show_deploy_args', False) is False:
if self.opts.get('show_deploy_args', False) is False and isinstance(output[name], dict):
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(

View file

@ -28,22 +28,24 @@ cloud configuration at
my-vmware-config:
provider: vmware
user: "DOMAIN\\user"
password: "verybadpass"
url: "vcenter01.domain.com"
user: 'DOMAIN\\user'
password: 'verybadpass'
url: '10.20.30.40'
vmware-vcenter02:
vcenter01:
provider: vmware
user: "DOMAIN\\user"
password: "verybadpass"
url: "vcenter02.domain.com"
user: 'DOMAIN\\user'
password: 'verybadpass'
url: 'vcenter01.domain.com'
protocol: 'https'
port: 443
vmware-vcenter03:
vcenter02:
provider: vmware
user: "DOMAIN\\user"
password: "verybadpass"
url: "vcenter03.domain.com"
protocol: "http"
user: 'DOMAIN\\user'
password: 'verybadpass'
url: 'vcenter02.domain.com'
protocol: 'http'
port: 80
.. note::
@ -202,11 +204,13 @@ def _get_si():
port=port
)
ssl._create_default_https_context = default_context
except:
err_msg = exc.msg if isinstance(exc, vim.fault.InvalidLogin) and hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the specified protocol or url or port'
except Exception as exc:
err_msg = exc.msg if hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the debug log for more information'
log.debug(exc)
raise SaltCloudSystemExit(err_msg)
else:
err_msg = exc.msg if isinstance(exc, vim.fault.InvalidLogin) and hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the specified protocol or url or port'
err_msg = exc.msg if hasattr(exc, 'msg') else 'Could not connect to the specified vCenter server. Please check the debug log for more information'
log.debug(exc)
raise SaltCloudSystemExit(err_msg)
atexit.register(Disconnect, si)
@ -865,10 +869,9 @@ def _format_instance_info_select(vm, selection):
if 'tools_status' in selection:
vm_select_info['tools_status'] = str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"
if ('private_ips' or 'mac_address' or 'networks') in selection:
if 'private_ips' in selection or 'networks' in selection:
network_full_info = {}
ip_addresses = []
mac_addresses = []
if "guest.net" in vm:
for net in vm["guest.net"]:
@ -878,48 +881,63 @@ def _format_instance_info_select(vm, selection):
'mac_address': net.macAddress
}
ip_addresses.extend(net.ipAddress)
mac_addresses.append(net.macAddress)
if 'private_ips' in selection:
vm_select_info['private_ips'] = ip_addresses
if 'mac_address' in selection:
vm_select_info['mac_address'] = mac_addresses
if 'networks' in selection:
vm_select_info['networks'] = network_full_info
if 'devices' in selection:
if 'devices' in selection or 'mac_addresses' in selection:
device_full_info = {}
device_mac_addresses = []
if "config.hardware.device" in vm:
for device in vm["config.hardware.device"]:
device_full_info[device.deviceInfo.label] = {
'key': device.key,
'label': device.deviceInfo.label,
'summary': device.deviceInfo.summary,
'type': type(device).__name__.rsplit(".", 1)[1],
'unitNumber': device.unitNumber
}
device_full_info[device.deviceInfo.label] = {}
if 'devices' in selection:
device_full_info[device.deviceInfo.label]['key'] = device.key,
device_full_info[device.deviceInfo.label]['label'] = device.deviceInfo.label,
device_full_info[device.deviceInfo.label]['summary'] = device.deviceInfo.summary,
device_full_info[device.deviceInfo.label]['type'] = type(device).__name__.rsplit(".", 1)[1]
if hasattr(device.backing, 'network'):
device_full_info[device.deviceInfo.label]['addressType'] = device.addressType
if device.unitNumber:
device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber
if hasattr(device, 'connectable') and device.connectable:
device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected
device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl
device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected
device_full_info[device.deviceInfo.label]['status'] = device.connectable.status
if hasattr(device, 'controllerKey') and device.controllerKey:
device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey
if hasattr(device, 'addressType'):
device_full_info[device.deviceInfo.label]['addressType'] = device.addressType
if hasattr(device, 'busNumber'):
device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber
if hasattr(device, 'device'):
device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device
if hasattr(device, 'videoRamSizeInKB'):
device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB
if isinstance(device, vim.vm.device.VirtualDisk):
device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB
device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode
device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName
if hasattr(device, 'macAddress'):
device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress
device_mac_addresses.append(device.macAddress)
if hasattr(device, 'busNumber'):
device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber
if 'devices' in selection:
vm_select_info['devices'] = device_full_info
if hasattr(device, 'device'):
device_full_info[device.deviceInfo.label]['devices'] = device.device
if hasattr(device, 'videoRamSizeInKB'):
device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB
if isinstance(device, vim.vm.device.VirtualDisk):
device_full_info[device.deviceInfo.label]['capacityInKB'] = device.capacityInKB
device_full_info[device.deviceInfo.label]['diskMode'] = device.backing.diskMode
device_full_info[device.deviceInfo.label]['fileName'] = device.backing.fileName
vm_select_info['devices'] = device_full_info
if 'mac_addresses' in selection:
vm_select_info['mac_addresses'] = device_mac_addresses
if 'storage' in selection:
storage_full_info = {
@ -946,26 +964,40 @@ def _format_instance_info_select(vm, selection):
def _format_instance_info(vm):
device_full_info = {}
device_mac_addresses = []
if "config.hardware.device" in vm:
for device in vm["config.hardware.device"]:
device_full_info[device.deviceInfo.label] = {
'key': device.key,
'label': device.deviceInfo.label,
'summary': device.deviceInfo.summary,
'type': type(device).__name__.rsplit(".", 1)[1],
'unitNumber': device.unitNumber
'type': type(device).__name__.rsplit(".", 1)[1]
}
if hasattr(device.backing, 'network'):
if device.unitNumber:
device_full_info[device.deviceInfo.label]['unitNumber'] = device.unitNumber
if hasattr(device, 'connectable') and device.connectable:
device_full_info[device.deviceInfo.label]['startConnected'] = device.connectable.startConnected
device_full_info[device.deviceInfo.label]['allowGuestControl'] = device.connectable.allowGuestControl
device_full_info[device.deviceInfo.label]['connected'] = device.connectable.connected
device_full_info[device.deviceInfo.label]['status'] = device.connectable.status
if hasattr(device, 'controllerKey') and device.controllerKey:
device_full_info[device.deviceInfo.label]['controllerKey'] = device.controllerKey
if hasattr(device, 'addressType'):
device_full_info[device.deviceInfo.label]['addressType'] = device.addressType
if hasattr(device, 'macAddress'):
device_full_info[device.deviceInfo.label]['macAddress'] = device.macAddress
device_mac_addresses.append(device.macAddress)
if hasattr(device, 'busNumber'):
device_full_info[device.deviceInfo.label]['busNumber'] = device.busNumber
if hasattr(device, 'device'):
device_full_info[device.deviceInfo.label]['devices'] = device.device
device_full_info[device.deviceInfo.label]['deviceKeys'] = device.device
if hasattr(device, 'videoRamSizeInKB'):
device_full_info[device.deviceInfo.label]['videoRamSizeInKB'] = device.videoRamSizeInKB
@ -993,7 +1025,6 @@ def _format_instance_info(vm):
network_full_info = {}
ip_addresses = []
mac_addresses = []
if "guest.net" in vm:
for net in vm["guest.net"]:
network_full_info[net.network] = {
@ -1002,7 +1033,6 @@ def _format_instance_info(vm):
'mac_address': net.macAddress
}
ip_addresses.extend(net.ipAddress)
mac_addresses.append(net.macAddress)
cpu = vm["config.hardware.numCPU"] if "config.hardware.numCPU" in vm else "N/A"
ram = "{0} MB".format(vm["config.hardware.memoryMB"]) if "config.hardware.memoryMB" in vm else "N/A"
@ -1018,7 +1048,7 @@ def _format_instance_info(vm):
'files': file_full_info,
'guest_id': str(vm["config.guestId"]) if "config.guestId" in vm else "N/A",
'hostname': str(vm["object"].guest.hostName),
'mac_address': mac_addresses,
'mac_addresses': device_mac_addresses,
'networks': network_full_info,
'path': str(vm["config.files.vmPathName"]) if "config.files.vmPathName" in vm else "N/A",
'tools_status': str(vm["guest.toolsStatus"]) if "guest.toolsStatus" in vm else "N/A"
@ -1531,10 +1561,10 @@ def list_nodes_select(call=None):
if 'state' in selection:
vm_properties.append("summary.runtime.powerState")
if ('private_ips' or 'mac_address' or 'networks') in selection:
if 'private_ips' in selection or 'networks' in selection:
vm_properties.append("guest.net")
if 'devices' in selection:
if 'devices' in selection or 'mac_addresses' in selection:
vm_properties.append("config.hardware.device")
if 'storage' in selection:
@ -3205,17 +3235,17 @@ def add_host(kwargs=None, call=None):
.. code-block:: yaml
vmware-vcenter01:
vcenter01:
provider: vmware
user: "DOMAIN\\user"
password: "verybadpass"
url: "vcenter01.domain.com"
user: 'DOMAIN\\user'
password: 'verybadpass'
url: 'vcenter01.domain.com'
# Required when adding a host system
esxi_host_user: "root"
esxi_host_password: "myhostpassword"
esxi_host_user: 'root'
esxi_host_password: 'myhostpassword'
# Optional fields that can be specified when adding a host system
esxi_host_ssl_thumbprint: "12:A3:45:B6:CD:7E:F8:90:A1:BC:23:45:D6:78:9E:FA:01:2B:34:CD"
esxi_host_ssl_thumbprint: '12:A3:45:B6:CD:7E:F8:90:A1:BC:23:45:D6:78:9E:FA:01:2B:34:CD'
The SSL thumbprint of the host system can be optionally specified by setting
``esxi_host_ssl_thumbprint`` under your provider configuration. To get the SSL

View file

@ -630,10 +630,10 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
for header_name, header_value in ret['headers'].items():
name = header_name.strip()
value = header_value.strip()
if name == 'Last-Modified'.lower():
if str(name).lower() == 'last-modified':
s3_file_mtime = datetime.datetime.strptime(
value, '%a, %d %b %Y %H:%M:%S %Z')
elif name == 'Content-Length'.lower():
elif str(name).lower() == 'content-length':
s3_file_size = int(value)
if (cached_file_size == s3_file_size and
cached_file_mtime > s3_file_mtime):

View file

@ -5,10 +5,14 @@ NOTE this is a little complicated--junos can only be accessed via salt-proxy-min
Thus, some grains make sense to get them from the minion (PYTHONPATH), but others
don't (ip_interfaces)
'''
import logging
__proxyenabled__ = ['junos']
__virtualname__ = 'junos'
log = logging.getLogger(__name__)
def __virtual__():
if 'proxy' not in __opts__:
@ -17,16 +21,31 @@ def __virtual__():
return __virtualname__
def location():
return {'location': 'dc-1-europe'}
def _remove_complex_types(dictionary):
'''
Linode-python is now returning some complex types that
are not serializable by msgpack. Kill those.
'''
for k, v in dictionary.iteritems():
if isinstance(v, dict):
dictionary[k] = _remove_complex_types(v)
elif hasattr(v, 'to_eng_string'):
dictionary[k] = v.to_eng_string()
return dictionary
def defaults():
return {'os': 'proxy', 'kernel': 'unknown', 'osrelease': 'proxy'}
def facts():
log.debug('----------- Trying to get facts')
facts = __opts__['proxymodule']['junos.facts']()
facts['version_info'] = 'override'
return facts
def os_family():
return {'os_family': 'junos'}
def os_data():
facts = {}
facts['version_info'] = {'major': '12,1', 'type': 'I', 'minor': '20131108_srx_12q1_x46_intgr', 'build': '0-613414'}
facts['os_family'] = 'proxy'
return facts

View file

@ -14,6 +14,14 @@ def __virtual__():
return __virtualname__
def kernel():
return {'kernel': 'proxy'}
def os():
return {'os': 'proxy'}
def location():
return {'location': 'In this darn virtual machine. Let me out!'}
@ -23,4 +31,4 @@ def os_family():
def os_data():
return __opts__['proxyobject'].grains()
return {'os_data': 'funkyHttp release 1.0.a.4.g'}

View file

@ -207,7 +207,7 @@ def raw_mod(opts, name, functions, mod='modules'):
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def proxy(opts, functions, whitelist=None):
def proxy(opts, functions, whitelist=None, loaded_base_name=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
@ -216,6 +216,7 @@ def proxy(opts, functions, whitelist=None):
tag='proxy',
whitelist=whitelist,
pack={'__proxy__': functions},
loaded_base_name=loaded_base_name
)
@ -1032,14 +1033,17 @@ class LazyLoader(salt.utils.lazy.LazyDict):
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
if not hasattr(mod, 'render') and 'proxy' in self.opts:
if not hasattr(mod, '__proxyenabled__') or \
self.opts['proxy']['proxytype'] in mod.__proxyenabled__ or \
'*' in mod.__proxyenabled__:
err_string = 'not a proxy_minion enabled module'
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
#
# Render modules and state modules are OK though
if 'proxy' in self.opts:
if self.tag not in ['render', 'states']:
if not hasattr(mod, '__proxyenabled__') or \
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
'*' not in mod.__proxyenabled__):
err_string = 'not a proxy_minion enabled module'
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, '__load__', False) is not False:
log.info(

View file

@ -2126,26 +2126,31 @@ class ClearFuncs(object):
try:
name = self.loadauth.load_name(clear_load)
groups = self.loadauth.get_groups(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
eauth_config = self.opts['external_auth'][clear_load['eauth']]
if '*' not in eauth_config and name not in eauth_config:
found = False
for group in groups:
if "{0}%".format(group) in self.opts['external_auth'][clear_load['eauth']]:
if "{0}%".format(group) in eauth_config:
found = True
break
if not found:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
else:
clear_load['groups'] = groups
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
clear_load['groups'] = groups
return self.loadauth.mk_token(clear_load)
except Exception as exc:
import sys
import traceback
type_, value_, traceback_ = sys.exc_info()
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
log.error(traceback.format_exception(type_, value_, traceback_))
return ''
def get_token(self, clear_load):
@ -2209,43 +2214,45 @@ class ClearFuncs(object):
)
)
return ''
if not token:
# Bail if the token is empty or if the eauth type specified is not allowed
if not token or token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
found = False
for group in token['groups']:
if "{0}%".format(group) in self.opts['external_auth'][token['eauth']]:
found = True
break
if not found:
log.warning('Authentication failure of type "token" occurred.')
return ''
group_perm_keys = filter(lambda(item): item.endswith('%'), self.opts['external_auth'][token['eauth']]) # The configured auth groups
# Fetch eauth config and collect users and groups configured for access
eauth_config = self.opts['external_auth'][token['eauth']]
eauth_users = []
eauth_groups = []
for entry in eauth_config:
if entry.endswith('%'):
eauth_groups.append(entry.rstrip('%'))
else:
eauth_users.append(entry)
# First we need to know if the user is allowed to proceed via any of their group memberships.
# If there are groups in the token, check if any of them are listed in the eauth config
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
try:
for group in token['groups']:
if group == group_config:
if group in eauth_groups:
group_auth_match = True
break
except KeyError:
pass
if '*' not in eauth_users and token['name'] not in eauth_users and not group_auth_match:
log.warning('Authentication failure of type "token" occurred.')
return ''
# Compile list of authorized actions for the user
auth_list = []
if '*' in self.opts['external_auth'][token['eauth']]:
auth_list.extend(self.opts['external_auth'][token['eauth']]['*'])
if token['name'] in self.opts['external_auth'][token['eauth']]:
auth_list.extend(self.opts['external_auth'][token['eauth']][token['name']])
# Add permissions for '*' or user-specific to the auth list
for user_key in ('*', token['name']):
auth_list.extend(eauth_config.get(user_key, []))
# Add any add'l permissions allowed by group membership
if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(self.opts['external_auth'][token['eauth']], token['groups'], auth_list)
auth_list = self.ckminions.fill_auth_list_from_groups(eauth_config, token['groups'], auth_list)
log.trace("compiled auth_list: {0}".format(auth_list))
log.trace("Compiled auth_list: {0}".format(auth_list))
good = self.ckminions.auth_check(
auth_list,
@ -2269,18 +2276,13 @@ class ClearFuncs(object):
)
return ''
try:
# The username with which we are attempting to auth
name = self.loadauth.load_name(extra)
# The groups to which this user belongs
groups = self.loadauth.get_groups(extra)
# The configured auth groups
group_perm_keys = [
item for item in self.opts['external_auth'][extra['eauth']]
if item.endswith('%')
]
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
if groups is None:
groups = []
group_perm_keys = filter(lambda(item): item.endswith('%'), self.opts['external_auth'][extra['eauth']]) # The configured auth groups
# First we need to know if the user is allowed to proceed via
# any of their group memberships.
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
@ -2320,9 +2322,15 @@ class ClearFuncs(object):
return ''
except Exception as exc:
import sys
import traceback
type_, value_, traceback_ = sys.exc_info()
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
log.error(traceback.format_exception(
type_, value_, traceback_))
return ''
# auth_list = self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*']

View file

@ -82,6 +82,7 @@ import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.six import string_types
@ -334,7 +335,10 @@ class SMinion(object):
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts,
include_errors=True)
self.proxy = salt.loader.proxy(self.opts, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
@ -736,22 +740,21 @@ class Minion(MinionBase):
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
log.info('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy'].keys()))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
log.info('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
log.info('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
@ -823,6 +826,13 @@ class Minion(MinionBase):
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
@ -2897,12 +2907,14 @@ class ProxyMinion(Minion):
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
@ -2922,11 +2934,19 @@ class ProxyMinion(Minion):
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts['master'] = self.eval_master(opts,
timeout,
safe)
fq_proxyname = opts['proxy']['proxytype']
# Need to match the function signature of the other loader fns
# which is def proxy(opts, functions, whitelist=None, loaded_base_name=None)
# 'functions' for other loaders is a LazyLoader object
# but since we are not needing to merge functions into another fn dictionary
# we will pass 'None' in
self.proxymodule = salt.loader.proxy(opts, None, loaded_base_name=fq_proxyname)
opts['proxymodule'] = self.proxymodule
opts['grains'] = salt.loader.grains(opts)
opts['id'] = opts['proxymodule'][fq_proxyname+'.id'](opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
@ -2937,6 +2957,7 @@ class ProxyMinion(Minion):
opts['environment'],
pillarenv=opts.get('pillarenv'),
).compile_pillar()
opts['proxymodule'][fq_proxyname+'.init'](opts)
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
@ -2947,7 +2968,26 @@ class ProxyMinion(Minion):
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
# self._running = True
def _prep_mod_opts(self):

View file

@ -103,7 +103,9 @@ def tar(options, tarfile, sources=None, dest=None,
cmd.extend(['-C', '{0}'.format(dest)])
cmd.extend(['-{0}'.format(options), '{0}'.format(tarfile)])
cmd.extend(sources)
if sources:
cmd.extend(sources)
return __salt__['cmd.run'](cmd,
cwd=cwd,

View file

@ -3555,10 +3555,10 @@ def manage_file(name,
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file {0} not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if _urlparse(source).scheme != 'salt':
ret, 'Source file {0!r} not found'.format(source))
# If the downloaded file came from a non salt server or local source
# verify that it matches the intended sum value
if _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = ('File sum set for file {0} of {1} does '

View file

@ -11,7 +11,7 @@ from __future__ import absolute_import
import logging
# Juniper interface libraries
# https://github.com/jeremyschulman/py-junos-eznc
# https://github.com/Juniper/py-junos-eznc
try:
@ -28,6 +28,7 @@ except ImportError:
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'junos'
@ -37,7 +38,7 @@ __proxyenabled__ = ['junos']
def __virtual__():
'''
We need the Junos adapter libraries for this
module to work. We also need a proxyobject object
module to work. We also need a proxymodule entry in __opts__
in the opts dictionary
'''
if HAS_JUNOS and 'proxy' in __opts__:
@ -52,13 +53,17 @@ def facts_refresh():
if the device configuration is changed by some other actor.
'''
return __opts__['proxyobject'].refresh
return __opts__['proxymodule']['junos.refresh']()
def call_rpc():
return __opts__['proxymodule']['junos.rpc']()
def set_hostname(hostname=None, commit_change=True):
conn = __opts__['proxymodule']['junos.conn']()
ret = dict()
conn = __opts__['proxyobject']
if hostname is None:
ret['out'] = False
return ret
@ -79,8 +84,7 @@ def set_hostname(hostname=None, commit_change=True):
def commit():
conn = __opts__['proxyobject']
conn = __opts__['proxymodule']['junos.conn']()
ret = {}
commit_ok = conn.cu.commit_check()
if commit_ok:
@ -99,8 +103,8 @@ def commit():
def rollback():
conn = __opts__['proxyobject']
ret = dict()
conn = __opts__['proxymodule']['junos.conn']()
ret['out'] = conn.cu.rollback(0)
@ -114,8 +118,8 @@ def rollback():
def diff():
conn = __opts__['proxymodule']['junos.conn']()
ret = dict()
conn = __opts__['proxyobject']
ret['out'] = True
ret['message'] = conn.cu.diff()
@ -124,7 +128,7 @@ def diff():
def ping():
conn = __opts__['proxymodule']['junos.conn']()
ret = dict()
conn = __opts__['proxyobject']
ret['message'] = conn.cli('show system uptime')
ret['message'] = conn.probe()
ret['out'] = True

View file

@ -840,7 +840,7 @@ def _network_conf(conf_tuples=None, **kwargs):
# (lxc.network.ipv4.gateway: auto)
if (
distutils.version.LooseVersion(version()) <= '1.0.7' and
True not in ['ipv4.gateway' in a for a in ret]
True not in ['lxc.network.ipv4.gateway' in a for a in ret]
):
ret.append({'lxc.network.ipv4.gateway': 'auto'})
return ret

View file

@ -884,8 +884,17 @@ def remove(name=None,
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs(jail=jail, chroot=chroot)
targets = [x for x in pkg_params if x in old]
targets = []
old = list_pkgs(jail=jail, chroot=chroot, with_origin=True)
for pkg in pkg_params.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
if pkg[0].find("/") > 0:
origin = pkg[0]
pkg = [k for k, v in old.iteritems() if v['origin'] == origin][0]
if pkg[0] in old:
targets.append(pkg[0])
if not targets:
return {}
@ -915,7 +924,7 @@ def remove(name=None,
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace')
__context__.pop(_contextkey(jail, chroot), None)
__context__.pop(_contextkey(jail, chroot, prefix='pkg.origin'), None)
new = list_pkgs(jail=jail, chroot=chroot)
new = list_pkgs(jail=jail, chroot=chroot, with_origin=True)
return salt.utils.compare_dicts(old, new)
# Support pkg.delete to remove packages, since this is the CLI usage

View file

@ -17,11 +17,11 @@ __virtualname__ = 'pkg'
def __virtual__():
'''
Only work on RestExampleOS
Only work on proxy
'''
# Enable on these platforms only.
enable = set((
'RestExampleOS',
'proxy',
))
if __grains__['os'] in enable:
return __virtualname__
@ -29,16 +29,16 @@ def __virtual__():
def list_pkgs(versions_as_list=False, **kwargs):
return __opts__['proxyobject'].package_list()
return __opts__['proxymodule']['rest_sample.package_list']()
def install(name=None, refresh=False, fromrepo=None,
pkgs=None, sources=None, **kwargs):
return __opts__['proxyobject'].package_install(name, **kwargs)
return __opts__['proxymodule']['rest_sample.package_install'](name, **kwargs)
def remove(name=None, pkgs=None, **kwargs):
return __opts__['proxyobject'].package_remove(name)
return __opts__['proxymodule']['rest_sample.package_remove'](name)
def version(*names, **kwargs):
@ -55,7 +55,7 @@ def version(*names, **kwargs):
salt '*' pkg.version <package1> <package2> <package3> ...
'''
if len(names) == 1:
return str(__opts__['proxyobject'].package_status(names))
return str(__opts__['proxymodule']['rest_sample.package_status'](names))
def installed(
@ -68,7 +68,7 @@ def installed(
sources=None,
**kwargs):
p = __opts__['proxyobject'].package_status(name)
p = __opts__['proxymodule']['rest_sample.package_status'](name)
if version is None:
if 'ret' in p:
return str(p['ret'])

View file

@ -241,13 +241,25 @@ def template(tem, queue=False, **kwargs):
salt '*' state.template '<Path to template on the minion>'
'''
if 'env' in kwargs:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
saltenv = kwargs['env']
elif 'saltenv' in kwargs:
saltenv = kwargs['saltenv']
else:
saltenv = ''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
st_ = salt.state.HighState(__opts__)
if not tem.endswith('.sls'):
tem = '{sls}.sls'.format(sls=tem)
high_state, errors = st_.render_state(tem, None, '', None, local=True)
high_state, errors = st_.render_state(tem, saltenv, '', None, local=True)
if errors:
__context__['retcode'] = 1
return errors

View file

@ -0,0 +1,105 @@
# -*- coding: utf-8 -*-
'''
Provide the service module for the proxy-minion REST sample
'''
# Import python libs
from __future__ import absolute_import
import logging
__proxyenabled__ = ['rest_sample']
log = logging.getLogger(__name__)
__func_alias__ = {
'reload_': 'reload'
}
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on systems that are a proxy minion
'''
if __grains__['kernel'] == 'proxy':
return __virtualname__
return False
def get_all():
'''
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
proxy_fn = 'rest_sample'+ '.service_list'
return __opts__['proxymodule'][proxy_fn]()
def start(name):
'''
Start the specified service on the rest_sample
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
proxy_fn = 'rest_sample'+ '.service_start'
return __opts__['proxymodule'][proxy_fn](name)
def stop(name):
'''
Stop the specified service on the rest_sample
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
proxy_fn = 'rest_sample'+ '.service_stop'
return __opts__['proxymodule'][proxy_fn](name)
def restart(name):
'''
Restart the specified service with rest_sample
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
proxy_fn = 'rest_sample'+ '.service_restart'
return __opts__['proxymodule'][proxy_fn](name)
def status(name, sig):
'''
Return the status for a service via rest_sample, returns a bool
whether the service is running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
proxy_fn = 'rest_sample' + '.service_status'
resp = __opts__['proxymodule'][proxy_fn](name)
if resp['comment'] == 'stopped':
return {name: False}
if resp['comment'] == 'running':
return {name: True}

View file

@ -105,8 +105,9 @@ def ping():
salt '*' test.ping
'''
if 'proxyobject' in __opts__:
return __opts__['proxyobject'].ping()
if 'proxymodule' in __opts__:
ping_cmd = __opts__['proxymodule'].loaded_base_name + '.ping'
return __opts__['proxymodule'][ping_cmd]()
else:
return True

View file

@ -1432,19 +1432,18 @@ class Login(LowDataAdapter):
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
perms = []
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
perms = perms or None
else:
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "

View file

@ -261,8 +261,6 @@ class EventListener(object):
opts=opts,
)
self.event.subscribe() # start listening for events immediately
# tag -> list of futures
self.tag_map = defaultdict(list)

View file

@ -7,58 +7,79 @@ Interface with a Junos device via proxy-minion.
from __future__ import print_function
from __future__ import absolute_import
import logging
# Import 3rd-party libs
import jnpr.junos
import jnpr.junos.utils
import jnpr.junos.cfg
# import jnpr.junos
# import jnpr.junos.utils
# import jnpr.junos.utils.config
import json
HAS_JUNOS = True
__proxyenabled__ = ['junos']
thisproxy = {}
class Proxyconn(object):
log = logging.getLogger(__name__)
# def __init__(opts):
# '''
# Open the connection to the Junos device, login, and bind to the
# Resource class
# '''
# log.debug('Opening connection to junos')
# thisproxy['conn'] = jnpr.junos.Device(user=opts['proxy']['username'],
# host=opts['proxy']['host'],
# password=opts['proxy']['passwd'])
# thisproxy['conn'].open()
# thisproxy['conn'].bind(cu=jnpr.junos.utils.config.Config)
def conn():
return thisproxy['conn']
def facts():
return thisproxy['conn'].facts
def refresh():
return thisproxy['conn'].facts_refresh()
def proxytype():
'''
This class provides the persistent connection to the device that is being
controlled.
Returns the name of this proxy
'''
return 'junos'
def id(opts):
'''
Returns a unique ID for this proxy minion
'''
return thisproxy['conn'].facts['hostname']
def ping():
'''
Ping? Pong!
'''
return thisproxy['conn'].connected
def shutdown(opts):
'''
This is called when the proxy-minion is exiting to make sure the
connection to the device is closed cleanly.
'''
def __init__(self, details):
'''
Open the connection to the Junos device, login, and bind to the
Resource class
'''
self.conn = jnpr.junos.Device(user=details['username'],
host=details['host'],
password=details['passwd'])
self.conn.open()
self.conn.bind(cu=jnpr.junos.cfg.Resource)
log.debug('Proxy module {0} shutting down!!'.format(opts['id']))
try:
thisproxy['conn'].close()
except Exception:
pass
def proxytype(self):
'''
Returns the name of this proxy
'''
return 'junos'
def id(self, opts):
'''
Returns a unique ID for this proxy minion
'''
return self.conn.facts['hostname']
def ping(self):
'''
Ping? Pong!
'''
return self.conn.connected
def shutdown(self, opts):
'''
This is called when the proxy-minion is exiting to make sure the
connection to the device is closed cleanly.
'''
print('Proxy module {0} shutting down!!'.format(opts['id']))
try:
self.conn.close()
except Exception:
pass
def rpc():
return json.dumps(thisproxy['conn'].rpc.get_software_information())

View file

@ -1,147 +1,173 @@
# -*- coding: utf-8 -*-
'''
This is a simple proxy-minion designed to connect to and communicate with
the bottle-based web service contained in salt/tests/rest.py.
Note this example needs the 'requests' library.
Requests is not a hard dependency for Salt
the bottle-based web service contained in https://github.com/salt-contrib/proxyminion_rest_example
'''
from __future__ import absolute_import
# Import python libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
import logging
import salt.utils.http
HAS_REST_EXAMPLE = True
# This must be present or the Salt loader won't load this module
__proxyenabled__ = ['rest_sample']
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
GRAINS_CACHE = {}
DETAILS = {}
# Want logging!
log = logging.getLogger(__file__)
# This does nothing, it's here just as an example and to provide a log
# entry when the module is loaded.
def __virtual__():
'''
Only return if all the modules are available
'''
if not HAS_REQUESTS:
log.debug('rest_sample proxy __virtual__() called...')
return True
def init(opts):
'''
Every proxy module needs an 'init', though you can
just put a 'pass' here if it doesn't need to do anything.
'''
log.debug('rest_sample proxy init() called...')
# Save the REST URL
DETAILS['url'] = opts['proxy']['url']
# Make sure the REST URL ends with a '/'
if not DETAILS['url'].endswith('/'):
DETAILS['url'] += '/'
def id(opts):
'''
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE.
If it changes while the proxy is running the salt-master will get
really confused and may stop talking to this minion
'''
r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True)
return r['dict']['id'].encode('ascii', 'ignore')
def grains():
'''
Get the grains from the proxied device
'''
if not GRAINS_CACHE:
r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True)
GRAINS_CACHE = r['dict']
return GRAINS_CACHE
def grains_refresh():
'''
Refresh the grains from the proxied device
'''
GRAINS_CACHE = {}
return grains()
def service_start(name):
'''
Start a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True)
return r['dict']
def service_stop(name):
'''
Stop a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True)
return r['dict']
def service_restart(name):
'''
Restart a "service" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True)
return r['dict']
def service_list():
'''
List "services" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True)
return r['dict']
def service_status(name):
'''
Check if a service is running on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True)
return r['dict']
def package_list():
'''
List "packages" installed on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True)
return r['dict']
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
cmd = DETAILS['url']+'package/install/'+name
if 'version' in kwargs:
cmd += '/'+kwargs['version']
else:
cmd += '/1.0'
r = salt.utils.http.query(cmd, decode_type='json', decode=True)
return r['dict']
def package_remove(name):
'''
Remove a "package" on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True)
return r['dict']
def package_status(name):
'''
Check the installation status of a package on the REST server
'''
r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True)
return r['dict']
def ping():
'''
Is the REST server up?
'''
r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True)
try:
return r['dict'].get('ret', False)
except Exception:
return False
class Proxyconn(object):
def shutdown(opts):
'''
Interface with the REST sample web service (rest.py at
https://github.com/cro/salt-proxy-rest)
For this proxy shutdown is a no-op
'''
def __init__(self, details):
self.url = details['url']
self.grains_cache = {}
def id(self, opts):
'''
Return a unique ID for this proxy minion
'''
r = requests.get(self.url+'id')
return r.text.encode('ascii', 'ignore')
def grains(self):
'''
Get the grains from the proxied device
'''
if not self.grains_cache:
r = requests.get(self.url+'info')
self.grains_cache = r.json()
return self.grains_cache
def grains_refresh(self):
'''
Refresh the grains from the proxied device
'''
self.grains_cache = {}
return self.grains()
def service_start(self, name):
'''
Start a "service" on the REST server
'''
r = requests.get(self.url+'service/start/'+name)
return r.json()
def service_stop(self, name):
'''
Stop a "service" on the REST server
'''
r = requests.get(self.url+'service/stop/'+name)
return r.json()
def service_restart(self, name):
'''
Restart a "service" on the REST server
'''
r = requests.get(self.url+'service/restart/'+name)
return r.json()
def service_list(self):
'''
List "services" on the REST server
'''
r = requests.get(self.url+'service/list')
return r.json()
def service_status(self, name):
'''
Check if a service is running on the REST server
'''
r = requests.get(self.url+'service/status/'+name)
return r.json()
def package_list(self):
'''
List "packages" installed on the REST server
'''
r = requests.get(self.url+'package/list')
return r.json()
def package_install(self, name, **kwargs):
'''
Install a "package" on the REST server
'''
cmd = self.url+'package/install/'+name
if 'version' in kwargs:
cmd += '/'+kwargs['version']
else:
cmd += '/1.0'
r = requests.get(cmd)
def package_remove(self, name):
'''
Remove a "package" on the REST server
'''
r = requests.get(self.url+'package/remove/'+name)
return r.json()
def package_status(self, name):
'''
Check the installation status of a package on the REST server
'''
r = requests.get(self.url+'package/status/'+name)
return r.json()
def ping(self):
'''
Is the REST server up?
'''
r = requests.get(self.url+'ping')
try:
if r.status_code == 200:
return True
else:
return False
except Exception:
return False
def shutdown(self, opts):
'''
For this proxy shutdown is a no-op
'''
pass
log.debug('rest_sample proxy shutdown() called...')

View file

@ -2591,6 +2591,8 @@ class BaseHighState(object):
inc_sls = '.'.join(p_comps[:-1]) + inc_sls
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]

View file

@ -76,7 +76,7 @@ Available Functions
- container: mysuperdocker
- image: corp/mysuperdocker_img
- port_bindings:
"5000/tcp":
- "5000/tcp":
HostIp: ""
HostPort: "5000"

View file

@ -129,6 +129,8 @@ def _find_remove_targets(name=None,
Inspect the arguments to pkg.removed and discover what packages need to
be removed. Return a dict of packages to remove.
'''
if __grains__['os'] == 'FreeBSD':
kwargs['with_origin'] = True
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if pkgs:
to_remove = _repack_pkgs(pkgs)
@ -149,8 +151,15 @@ def _find_remove_targets(name=None,
# Check current versions against specified versions
targets = []
problems = []
for pkgname, pkgver in to_remove.items():
cver = cur_pkgs.get(pkgname, [])
for pkgname, pkgver in six.iteritems(to_remove):
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search('/', pkgname))
if __grains__['os'] == 'FreeBSD' and origin:
cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname]
else:
cver = cur_pkgs.get(pkgname, [])
# Package not installed, no need to remove
if not cver:
continue
@ -228,6 +237,9 @@ def _find_install_targets(name=None,
else:
ignore_types = []
if __grains__['os'] == 'FreeBSD':
kwargs['with_origin'] = True
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if any((pkgs, sources)):
if pkgs:
@ -264,7 +276,14 @@ def _find_install_targets(name=None,
to_unpurge = _find_unpurge_targets(desired)
cver = cur_pkgs.get(name, [])
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search('/', name))
if __grains__['os'] == 'FreeBSD' and origin:
cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name]
else:
cver = cur_pkgs.get(name, [])
if name not in to_unpurge:
if version and version in cver and not pkg_verify:
# The package is installed and is the correct version
@ -405,7 +424,14 @@ def _verify_install(desired, new_pkgs):
ok = []
failed = []
for pkgname, pkgver in desired.items():
cver = new_pkgs.get(pkgname)
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search('/', pkgname))
if __grains__['os'] == 'FreeBSD' and origin:
cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname]
else:
cver = new_pkgs.get(pkgname)
if not cver:
failed.append(pkgname)
continue
@ -1021,6 +1047,8 @@ def installed(
not_modified = [x for x in desired if x not in targets and x not in to_reinstall]
failed = [x for x in targets if x not in modified]
else:
if __grains__['os'] == 'FreeBSD':
kwargs['with_origin'] = True
ok, failed = \
_verify_install(
desired, __salt__['pkg.list_pkgs'](

View file

@ -10,7 +10,7 @@ This state is useful for sending messages to Slack during state runs.
.. code-block:: yaml
slack-message:
slack.send_message:
slack.post_message:
- channel: '#general'
- from_name: SuperAdmin
- message: 'This state was executed successfully.'
@ -44,7 +44,7 @@ def post_message(name,
.. code-block:: yaml
slack-message:
slack.send_message:
slack.post_message:
- channel: '#general'
- from_name: SuperAdmin
- message: 'This state was executed successfully.'

View file

@ -162,7 +162,12 @@ def managed(name,
use_vt=use_vt,
)
ret['result'] = _ret['retcode'] == 0
if _ret['retcode'] != 0:
ret['result'] = False
ret['comment'] = _ret['stdout'] + _ret['stderr']
return ret
ret['result'] = True
ret['changes']['new'] = __salt__['cmd.run_stderr'](
'{0} -V'.format(venv_py)).strip('\n')

View file

@ -59,6 +59,7 @@ import logging
import time
import datetime
import multiprocessing
import re
from collections import MutableMapping
# Import third party libs
@ -169,10 +170,12 @@ class SaltEvent(object):
if sock_dir is None:
sock_dir = opts.get('sock_dir', None)
self.puburi, self.pulluri = self.__load_uri(sock_dir, node)
self.subscribe()
self.pending_tags = []
self.pending_rtags = []
self.pending_events = []
# since ZMQ connect() has no guarantees about the socket actually being
# connected this is a hack to attempt to do so.
self.connect_pub()
self.fire_event({}, tagify('event/new_client'), 0)
self.get_event(wait=1)
@ -223,17 +226,54 @@ class SaltEvent(object):
)
return puburi, pulluri
def subscribe(self, tag=None):
def subscribe(self, tag):
'''
Subscribe to events matching the passed tag.
'''
if not self.cpub:
self.connect_pub()
def unsubscribe(self, tag=None):
If you do not subscribe to a tag, events will be discarded by calls to
get_event that request a different tag. In contexts where many different
jobs are outstanding it is important to subscribe to prevent one call
to get_event from discarding a response required by a subsequent call
to get_event.
'''
self.pending_tags.append(tag)
return
def subscribe_regex(self, tag_regex):
'''
Subscribe to events matching the passed tag expression.
If you do not subscribe to a tag, events will be discarded by calls to
get_event that request a different tag. In contexts where many different
jobs are outstanding it is important to subscribe to prevent one call
to get_event from discarding a response required by a subsequent call
to get_event.
'''
self.pending_rtags.append(re.compile(tag_regex))
return
def unsubscribe(self, tag):
'''
Un-subscribe to events matching the passed tag.
'''
self.pending_tags.remove(tag)
return
def unsubscribe_regex(self, tag_regex):
'''
Un-subscribe to events matching the passed tag.
'''
self.pending_rtags.remove(tag_regex)
old_events = self.pending_events
self.pending_events = []
for evt in old_events:
if any(evt['tag'].startswith(ptag) for ptag in self.pending_tags) or any(rtag.search(evt['tag']) for rtag in self.pending_rtags):
self.pending_events.append(evt)
return
def connect_pub(self):
@ -276,29 +316,32 @@ class SaltEvent(object):
data = serial.loads(mdata)
return mtag, data
def _check_pending(self, tag, pending_tags):
def _check_pending(self, tag, tags_regex):
"""Check the pending_events list for events that match the tag
:param tag: The tag to search for
:type tag: str
:param pending_tags: List of tags to preserve
:type pending_tags: list[str]
:param tags_regex: List of re expressions to search for also
:type tags_regex: list[re.compile()]
:return:
"""
old_events = self.pending_events
self.pending_events = []
ret = None
for evt in old_events:
if evt['tag'].startswith(tag):
if evt['tag'].startswith(tag) or any(rtag.search(evt['tag']) for rtag in tags_regex):
if ret is None:
ret = evt
log.trace('get_event() returning cached event = {0}'.format(ret))
else:
self.pending_events.append(evt)
elif any(evt['tag'].startswith(ptag) for ptag in pending_tags):
elif any(evt['tag'].startswith(ptag) for ptag in self.pending_tags) or any(rtag.search(evt['tag']) for rtag in self.pending_rtags):
self.pending_events.append(evt)
else:
log.trace('get_event() discarding cached event that no longer has any subscriptions = {0}'.format(evt))
return ret
def _get_event(self, wait, tag, pending_tags):
def _get_event(self, wait, tag, tags_regex):
start = time.time()
timeout_at = start + wait
while not wait or time.time() <= timeout_at:
@ -316,8 +359,10 @@ class SaltEvent(object):
else:
raise
if not ret['tag'].startswith(tag): # tag not match
if any(ret['tag'].startswith(ptag) for ptag in pending_tags):
if not ret['tag'].startswith(tag) and not any(rtag.search(ret['tag']) for rtag in tags_regex):
# tag not match
if any(ret['tag'].startswith(ptag) for ptag in self.pending_tags) or any(rtag.search(ret['tag']) for rtag in self.pending_rtags):
log.trace('get_event() caching unwanted event = {0}'.format(ret))
self.pending_events.append(ret)
if wait: # only update the wait timeout if we had one
wait = timeout_at - time.time()
@ -328,7 +373,7 @@ class SaltEvent(object):
return None
def get_event(self, wait=5, tag='', full=False, use_pending=False, pending_tags=None):
def get_event(self, wait=5, tag='', tags_regex=None, full=False):
'''
Get a single publication.
IF no publication available THEN block for up to wait seconds
@ -336,29 +381,32 @@ class SaltEvent(object):
IF wait is 0 then block forever.
New in Boron always checks the list of pending events
A tag specification can be given to only return publications with a tag
STARTING WITH a given string (tag) OR MATCHING one or more string
regular expressions (tags_regex list). If tag is not specified or given
as an empty string, all events are considered.
use_pending
Defines whether to keep all unconsumed events in a pending_events
list, or to discard events that don't match the requested tag. If
set to True, MAY CAUSE MEMORY LEAKS.
Searches cached publications first. If no cached publications are found
that match the given tag specification, new publications are received
and checked.
pending_tags
Add any events matching the listed tags to the pending queue.
Still MAY CAUSE MEMORY LEAKS but less likely than use_pending
assuming you later get_event for the tags you've listed here
If a publication is received that does not match the tag specification,
it is DISCARDED unless it is subscribed to via subscribe() and
subscribe_regex() which will cause it to be cached.
New in Boron
If a caller is not going to call get_event immediately after sending a
request, it MUST subscribe the result to ensure the response is not lost
should other regions of code call get_event for other purposes.
'''
if pending_tags is None:
pending_tags = []
if use_pending:
pending_tags = ['']
if tags_regex is None:
tags_regex = []
else:
tags_regex = [re.compile(rtag) for rtag in tags_regex]
ret = self._check_pending(tag, pending_tags)
ret = self._check_pending(tag, tags_regex)
if ret is None:
ret = self._get_event(wait, tag, pending_tags)
ret = self._get_event(wait, tag, tags_regex)
if ret is None or full:
return ret

View file

@ -201,10 +201,14 @@ def query(url,
continue
header_dict[comps[0].strip()] = comps[1].strip()
if username and password:
auth = (username, password)
if not auth:
if username and password:
auth = (username, password)
else:
auth = None
else:
auth = None
if not username and not password and isinstance(auth, tuple):
(username, password) = auth # pylint: disable=W0633
if requests_lib is True:
sess = requests.Session()
@ -214,6 +218,11 @@ def query(url,
sess_cookies = sess.cookies
sess.verify = verify_ssl
else:
if auth:
password_mgr = urllib_request.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, username, password)
else:
password_mgr = None
sess_cookies = None
if cookies is not None:
@ -275,6 +284,8 @@ def query(url,
urllib_request.HTTPHandler,
urllib_request.HTTPCookieProcessor(sess_cookies)
]
if password_mgr:
handlers.append(urllib_request.HTTPBasicAuthHandler(password_mgr))
if url.startswith('https') or port == 443:
if not HAS_MATCHHOSTNAME:

View file

@ -121,6 +121,6 @@ class WheelClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
>>> wheel.cmd('key.finger', ['jerry'])
{'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
'''
return self.low(fun, kwarg)
Wheel = WheelClient # for backward-compat

View file

@ -1,78 +0,0 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import rest_package
# Globals
rest_package.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RestPkgTestCase(TestCase):
'''
Test cases for salt.modules.rest_package
'''
def test_list_pkgs(self):
'''
Test for list pkgs
'''
with patch.dict(rest_package.__opts__, {'proxyobject': MagicMock()}):
self.assertTrue(rest_package.list_pkgs())
def test_install(self):
'''
Test for install
'''
with patch.dict(rest_package.__opts__, {'proxyobject': MagicMock()}):
self.assertTrue(rest_package.install())
def test_remove(self):
'''
Test for remove
'''
with patch.dict(rest_package.__opts__, {'proxyobject': MagicMock()}):
self.assertTrue(rest_package.remove())
def test_version(self):
'''
Test to return a string representing the package version or
an empty string if not installed.
'''
with patch.dict(rest_package.__opts__, {'proxyobject': MagicMock()}):
self.assertTrue(rest_package.version('A'))
def test_installed(self):
'''
Test for installed
'''
with patch.dict(rest_package.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_package.__opts__['proxyobject'],
'package_status',
MagicMock(return_value={'ret': 'ret'})):
self.assertEqual(rest_package.installed('name'), 'ret')
self.assertTrue(rest_package.installed('name'))
self.assertFalse(rest_package.installed('name', version='v'))
if __name__ == '__main__':
from integration import run_tests
run_tests(RestPkgTestCase, needs_daemon=False)

View file

@ -1,81 +0,0 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import rest_service
# Globals
rest_service.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RestSvcTestCase(TestCase):
'''
Test cases for salt.modules.rest_service
'''
def test_start(self):
'''
Test to start the specified service
'''
with patch.dict(rest_service.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_service.__opts__['proxyobject'],
'service_start', MagicMock(return_value=True)):
self.assertTrue(rest_service.start('name'))
def test_stop(self):
'''
Test to stop the specified service
'''
with patch.dict(rest_service.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_service.__opts__['proxyobject'],
'service_stop', MagicMock(return_value=True)):
self.assertTrue(rest_service.stop('name'))
def test_restart(self):
'''
Test to restart the named service
'''
with patch.dict(rest_service.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_service.__opts__['proxyobject'],
'service_restart', MagicMock(return_value=True)):
self.assertTrue(rest_service.restart('name'))
def test_status(self):
'''
Test to return the status for a service, returns a bool whether
the service is running.
'''
with patch.dict(rest_service.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_service.__opts__['proxyobject'],
'service_status', MagicMock(return_value=True)):
self.assertTrue(rest_service.status('name'))
def test_list_(self):
'''
Test for list services.
'''
with patch.dict(rest_service.__opts__, {'proxyobject': MagicMock()}):
with patch.object(rest_service.__opts__['proxyobject'],
'service_list_', MagicMock(return_value=True)):
self.assertTrue(rest_service.list_())
if __name__ == '__main__':
from integration import run_tests
run_tests(RestSvcTestCase, needs_daemon=False)

View file

@ -150,11 +150,10 @@ class TestSaltEvent(TestCase):
)
)
def test_event_subscription(self):
def test_event_single(self):
'''Test a single event is received'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt1, {'data': 'foo1'})
@ -163,7 +162,6 @@ class TestSaltEvent(TestCase):
'''Test no event is received if the timeout is reached'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt1, {'data': 'foo1'})
@ -174,89 +172,79 @@ class TestSaltEvent(TestCase):
'''Test no wait timeout, we should block forever, until we get one '''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
evt = me.get_event(tag='evt2', wait=0)
with eventsender_process({'data': 'foo2'}, 'evt2', 5):
evt = me.get_event(tag='evt2', wait=0)
self.assertGotEvent(evt, {'data': 'foo2'})
def test_event_subscription_matching(self):
'''Test a subscription startswith matching'''
def test_event_matching(self):
'''Test a startswith match'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me.get_event(tag='evt1')
evt1 = me.get_event(tag='ev')
self.assertGotEvent(evt1, {'data': 'foo1'})
def test_event_subscription_matching_all(self):
'''Test a subscription matching'''
def test_event_matching_regex(self):
'''Test a regex match'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me.get_event(tag='not', tags_regex=['^ev'])
self.assertGotEvent(evt1, {'data': 'foo1'})
def test_event_matching_all(self):
'''Test an all match'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me.get_event(tag='')
self.assertGotEvent(evt1, {'data': 'foo1'})
def test_event_not_subscribed(self):
'''Test get event ignores non-subscribed events'''
'''Test get_event drops non-subscribed events'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
with eventsender_process({'data': 'foo1'}, 'evt1', 5):
me.fire_event({'data': 'foo1'}, 'evt2')
evt1 = me.get_event(tag='evt1', wait=10)
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
evt2 = me.get_event(tag='evt2')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt2, {'data': 'foo2'})
self.assertIsNone(evt1)
def test_event_subscription_cache(self):
'''Test subscriptions cache a message until requested'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe('evt1')
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
evt2 = me.get_event(tag='evt2')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt2, {'data': 'foo2'})
self.assertGotEvent(evt1, {'data': 'foo1'})
def test_event_multiple_subscriptions(self):
'''Test multiple subscriptions do not interfere'''
def test_event_subscriptions_cache_regex(self):
'''Test regex subscriptions cache a message until requested'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
with eventsender_process({'data': 'foo1'}, 'evt1', 5):
me.fire_event({'data': 'foo1'}, 'evt2')
evt1 = me.get_event(tag='evt1', wait=10)
me.subscribe_regex('1$')
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
evt2 = me.get_event(tag='evt2')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt2, {'data': 'foo2'})
self.assertGotEvent(evt1, {'data': 'foo1'})
def test_event_multiple_clients(self):
'''Test event is received by multiple clients'''
with eventpublisher_process():
me1 = event.MasterEvent(SOCK_DIR)
me1.subscribe()
me2 = event.MasterEvent(SOCK_DIR)
me2.subscribe()
me1.fire_event({'data': 'foo1'}, 'evt1')
evt1 = me1.get_event(tag='evt1')
self.assertGotEvent(evt1, {'data': 'foo1'})
# Can't replicate this failure in the wild, need to fix the
# test system bug here
#evt2 = me2.get_event(tag='evt1')
#self.assertGotEvent(evt2, {'data': 'foo1'})
def test_event_nested_subs(self):
'''Test nested event subscriptions do not drop events, issue #8580'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
# Since we now drop unrelated events to avoid memory leaks, see http://goo.gl/2n3L09 commit bcbc5340ef, the
# calls below will return None and will drop the unrelated events
evt2 = me.get_event(tag='evt2')
evt1 = me.get_event(tag='evt1')
self.assertGotEvent(evt2, {'data': 'foo2'})
# This one will be None because we're dripping unrelated events
self.assertIsNone(evt1)
# Fire events again
me.fire_event({'data': 'foo3'}, 'evt3')
me.fire_event({'data': 'foo4'}, 'evt4')
# We not force unrelated pending events not to be dropped, so both of the event bellow work and are not
# None
evt2 = me.get_event(tag='evt4', use_pending=True)
evt1 = me.get_event(tag='evt3', use_pending=True)
self.assertGotEvent(evt2, {'data': 'foo4'})
self.assertGotEvent(evt1, {'data': 'foo3'})
evt2 = me2.get_event(tag='evt1')
self.assertGotEvent(evt2, {'data': 'foo1'})
@expectedFailure
def test_event_nested_sub_all(self):
@ -264,7 +252,6 @@ class TestSaltEvent(TestCase):
# Show why not to call get_event(tag='')
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
me.fire_event({'data': 'foo1'}, 'evt1')
me.fire_event({'data': 'foo2'}, 'evt2')
evt2 = me.get_event(tag='')
@ -276,7 +263,6 @@ class TestSaltEvent(TestCase):
'''Test a large number of events, one at a time'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
for i in xrange(500):
me.fire_event({'data': '{0}'.format(i)}, 'testevents')
evt = me.get_event(tag='testevents')
@ -286,7 +272,6 @@ class TestSaltEvent(TestCase):
'''Test a large number of events, send all then recv all'''
with eventpublisher_process():
me = event.MasterEvent(SOCK_DIR)
me.subscribe()
# Must not exceed zmq HWM
for i in xrange(500):
me.fire_event({'data': '{0}'.format(i)}, 'testevents')