Merge branch 'develop' into issue36942

This commit is contained in:
Corvin Mcpherson 2017-10-01 18:56:58 -04:00
commit c67db52c73
146 changed files with 15080 additions and 1575 deletions

View file

@ -606,11 +606,12 @@
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line. If set to 'mixed', the output
# will be terse unless a state failed, in which case that output will be full.
# If set to 'changes', the output will be full unless the state didn't change.
# The state_output setting controls which results will be output full multi line
# full, terse - each state will be full/terse
# mixed - only states with errors will be full
# changes - states with changes and errors will be full
# full_id, mixed_id, changes_id and terse_id are also allowed;
# when set, the state ID will be used as name in the output
#state_output: full
# The state_output_diff setting changes whether or not the output from

View file

@ -635,9 +635,12 @@
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line.
# The state_output setting controls which results will be output full multi line
# full, terse - each state will be full/terse
# mixed - only states with errors will be full
# changes - states with changes and errors will be full
# full_id, mixed_id, changes_id and terse_id are also allowed;
# when set, the state ID will be used as name in the output
#state_output: full
# The state_output_diff setting changes whether or not the output from
@ -689,6 +692,12 @@
# for a full explanation.
#multiprocessing: True
# Limit the maximum amount of processes or threads created by salt-minion.
# This is useful to avoid resource exhaustion in case the minion receives more
# publications than it is able to handle, as it limits the number of spawned
# processes or threads. -1 is the default and disables the limit.
#process_count_max: -1
##### Logging settings #####
##########################################

View file

@ -498,9 +498,12 @@
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line.
# The state_output setting controls which results will be output full multi line
# full, terse - each state will be full/terse
# mixed - only states with errors will be full
# changes - states with changes and errors will be full
# full_id, mixed_id, changes_id and terse_id are also allowed;
# when set, the state ID will be used as name in the output
#state_output: full
# The state_output_diff setting changes whether or not the output from

View file

@ -577,11 +577,12 @@ syndic_user: salt
# all data that has a result of True and no changes will be suppressed.
#state_verbose: True
# The state_output setting changes if the output is the full multi line
# output for each changed state if set to 'full', but if set to 'terse'
# the output will be shortened to a single line. If set to 'mixed', the output
# will be terse unless a state failed, in which case that output will be full.
# If set to 'changes', the output will be full unless the state didn't change.
# The state_output setting controls which results will be output full multi line
# full, terse - each state will be full/terse
# mixed - only states with errors will be full
# changes - states with changes and errors will be full
# full_id, mixed_id, changes_id and terse_id are also allowed;
# when set, the state ID will be used as name in the output
#state_output: full
# The state_output_diff setting changes whether or not the output from

View file

@ -2123,11 +2123,14 @@ output for states that failed or states that have changes.
Default: ``full``
The state_output setting changes if the output is the full multi line
output for each changed state if set to 'full', but if set to 'terse'
the output will be shortened to a single line. If set to 'mixed', the output
will be terse unless a state failed, in which case that output will be full.
If set to 'changes', the output will be full unless the state didn't change.
The state_output setting controls which results will be output full multi line:
* ``full``, ``terse`` - each state will be full/terse
* ``mixed`` - only states with errors will be full
* ``changes`` - states with changes and errors will be full
``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed;
when set, the state ID will be used as name in the output.
.. code-block:: yaml
@ -4287,7 +4290,9 @@ information.
.. code-block:: yaml
reactor: []
reactor:
- 'salt/minion/*/start':
- salt://reactor/startup_tasks.sls
.. conf_master:: reactor_refresh_interval

View file

@ -1664,15 +1664,19 @@ output for states that failed or states that have changes.
Default: ``full``
The state_output setting changes if the output is the full multi line
output for each changed state if set to 'full', but if set to 'terse'
the output will be shortened to a single line.
The state_output setting controls which results will be output full multi line:
* ``full``, ``terse`` - each state will be full/terse
* ``mixed`` - only states with errors will be full
* ``changes`` - states with changes and errors will be full
``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed;
when set, the state ID will be used as name in the output.
.. code-block:: yaml
state_output: full
.. conf_minion:: state_output_diff
``state_output_diff``
@ -2419,6 +2423,23 @@ executed in a thread.
multiprocessing: True
.. conf_minion:: process_count_max
``process_count_max``
-------
.. versionadded:: Oxygen
Default: ``-1``
Limit the maximum amount of processes or threads created by ``salt-minion``.
This is useful to avoid resource exhaustion in case the minion receives more
publications than it is able to handle, as it limits the number of spawned
processes or threads. ``-1`` is the default and disables the limit.
.. code-block:: yaml
process_count_max: -1
.. _minion-logging-settings:

View file

@ -25,6 +25,9 @@ configuration:
- web*:
- test.*
- pkg.*
# Allow managers to use saltutil module functions
manager_.*:
- saltutil.*
Permission Issues
-----------------

View file

@ -1,5 +1,5 @@
salt.runners.auth module
========================
salt.runners.auth
=================
.. automodule:: salt.runners.auth
:members:

View file

@ -1,5 +1,5 @@
salt.runners.digicertapi module
===============================
salt.runners.digicertapi
========================
.. automodule:: salt.runners.digicertapi
:members:

View file

@ -1,5 +1,5 @@
salt.runners.event module
=========================
salt.runners.event
==================
.. automodule:: salt.runners.event
:members:

View file

@ -1,5 +1,11 @@
salt.runners.mattermost module
==============================
salt.runners.mattermost
=======================
**Note for 2017.7 releases!**
Due to the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module not being available in this release series, importing the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module from the develop branch is required to make this module work.
Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 <https://github.com/saltstack/salt/issues/43479>`_
.. automodule:: salt.runners.mattermost
:members:

View file

@ -1,5 +1,5 @@
salt.runners.smartos_vmadm module
=================================
salt.runners.smartos_vmadm
==========================
.. automodule:: salt.runners.smartos_vmadm
:members:

View file

@ -1,5 +1,5 @@
salt.runners.vault module
=========================
salt.runners.vault
==================
.. automodule:: salt.runners.vault
:members:

View file

@ -1,5 +1,5 @@
salt.runners.venafiapi module
=============================
salt.runners.venafiapi
======================
.. automodule:: salt.runners.venafiapi
:members:

View file

@ -1,5 +1,5 @@
salt.runners.vistara module
===========================
salt.runners.vistara
====================
.. automodule:: salt.runners.vistara
:members:

View file

@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``:
.. note::
You can have only one top level ``reactor`` section, so if one already
exists, add this code to the existing section. See :ref:`Understanding the
Structure of Reactor Formulas <reactor-structure>` to learn more about
reactor SLS syntax.
exists, add this code to the existing section. See :ref:`here
<reactor-sls>` to learn more about reactor SLS syntax.
Start the Salt Master in Debug Mode

View file

@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
port: 5959
proto: tcp
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
.. code-block:: yaml
engines_dirs:
- /home/bob/engines
Writing an Engine
=================

View file

@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and
other systems about operations.
The event system fires events with a very specific criteria. Every event has a
:strong:`tag`. Event tags allow for fast top level filtering of events. In
addition to the tag, each event has a data structure. This data structure is a
dict, which contains information about the event.
**tag**. Event tags allow for fast top-level filtering of events. In addition
to the tag, each event has a data structure. This data structure is a
dictionary, which contains information about the event.
.. _reactor-mapping-events:
@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run.
the :ref:`querystring syntax <querystring-syntax>` (e.g.
``salt://reactor/mycustom.sls?saltenv=reactor``).
Reactor sls files are similar to state and pillar sls files. They are
by default yaml + Jinja templates and are passed familiar context variables.
Reactor SLS files are similar to State and Pillar SLS files. They are by
default YAML + Jinja templates and are passed familiar context variables.
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
variables availble in Jinja templating.
They differ because of the addition of the ``tag`` and ``data`` variables.
- The ``tag`` variable is just the tag in the fired event.
- The ``data`` variable is the event's data dict.
Here is a simple reactor sls:
Here is the SLS for a simple reaction:
.. code-block:: jinja
@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor
system. The only difference is that the data is matched up to the salt command
API and the runner system. In this example, a command is published to the
``mysql1`` minion with a function of :py:func:`state.apply
<salt.modules.state.apply_>`. Similarly, a runner can be called:
<salt.modules.state.apply_>`, which performs a :ref:`highstate
<running-highstate>`. Similarly, a runner can be called:
.. code-block:: jinja
{% if data['data']['custom_var'] == 'runit' %}
call_runit_orch:
runner.state.orchestrate:
- mods: _orch.runit
- args:
- mods: orchestrate.runit
{% endif %}
This example will execute the state.orchestrate runner and intiate an execution
of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using
``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate"
as this is most likely to cause confusion.
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
Writing SLS Files
-----------------
Types of Reactions
==================
Reactor SLS files are stored in the same location as State SLS files. This means
that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are
available to the reactor and orchestrator.
============================== ==================================================================================
Name Description
============================== ==================================================================================
:ref:`local <reactor-local>` Runs a :ref:`remote-execution function <all-salt.modules>` on targeted minions
:ref:`runner <reactor-runner>` Executes a :ref:`runner function <all-salt.runners>`
:ref:`wheel <reactor-wheel>` Executes a :ref:`wheel function <all-salt.wheel>` on the master
:ref:`caller <reactor-caller>` Runs a :ref:`remote-execution function <all-salt.modules>` on a masterless minion
============================== ==================================================================================
It is recommended to keep reactor and orchestrator SLS files in their own uniquely
named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``,
``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to
read through this a few years down the road.
.. note::
The ``local`` and ``caller`` reaction types will be renamed for the Oxygen
release. These reaction types were named after Salt's internal client
interfaces, and are not intuitively named. Both ``local`` and ``caller``
will continue to work in Reactor SLS files, but for the Oxygen release the
documentation will be updated to reflect the new preferred naming.
The Goal of Writing Reactor SLS Files
=====================================
Where to Put Reactor SLS Files
==============================
Reactor SLS files share the familiar syntax from Salt States but there are
important differences. The goal of a Reactor file is to process a Salt event as
quickly as possible and then to optionally start a **new** process in response.
Reactor SLS files can come both from files local to the master, and from any of
backends enabled via the :conf_master:`fileserver_backend` config option. Files
placed in the Salt fileserver can be referenced using a ``salt://`` URL, just
like they can in State SLS files.
1. The Salt Reactor watches Salt's event bus for new events.
2. The event tag is matched against the list of event tags under the
``reactor`` section in the Salt Master config.
3. The SLS files for any matches are Rendered into a data structure that
represents one or more function calls.
4. That data structure is given to a pool of worker threads for execution.
It is recommended to place reactor and orchestrator SLS files in their own
uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``,
``reactor/``, etc., to keep them organized.
.. _reactor-sls:
Writing Reactor SLS
===================
The different reaction types were developed separately and have historically
had different methods for passing arguments. For the 2017.7.2 release a new,
unified configuration schema has been introduced, which applies to all reaction
types.
The old config schema will continue to be supported, and there is no plan to
deprecate it at this time.
.. _reactor-local:
Local Reactions
---------------
A ``local`` reaction runs a :ref:`remote-execution function <all-salt.modules>`
on the targeted minions.
The old config schema required the positional and keyword arguments to be
manually separated by the user under ``arg`` and ``kwarg`` parameters. However,
this is not very user-friendly, as it forces the user to distinguish which type
of argument is which, and make sure that positional arguments are ordered
properly. Therefore, the new config schema is recommended if the master is
running a supported release.
The below two examples are equivalent:
+---------------------------------+-----------------------------+
| Supported in 2017.7.2 and later | Supported in all releases |
+=================================+=============================+
| :: | :: |
| | |
| install_zsh: | install_zsh: |
| local.state.single: | local.state.single: |
| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' |
| - tgt_type: grain | - tgt_type: grain |
| - args: | - arg: |
| - fun: pkg.installed | - pkg.installed |
| - name: zsh | - zsh |
| - fromrepo: updates | - kwarg: |
| | fromrepo: updates |
+---------------------------------+-----------------------------+
This reaction would be equvalent to running the following Salt command:
.. code-block:: bash
salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates
.. note::
Any other parameters in the :py:meth:`LocalClient().cmd_async()
<salt.client.LocalClient.cmd_async>` method can be passed at the same
indentation level as ``tgt``.
.. note::
``tgt_type`` is only required when the target expression defined in ``tgt``
uses a :ref:`target type <targeting>` other than a minion ID glob.
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
2017.7.0.
.. _reactor-runner:
Runner Reactions
----------------
Runner reactions execute :ref:`runner functions <all-salt.runners>` locally on
the master.
The old config schema called for passing arguments to the reaction directly
under the name of the runner function. However, this can cause unpredictable
interactions with the Reactor system's internal arguments. It is also possible
to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above
in :ref:`local reactions <reactor-local>`, but as noted above this is not very
user-friendly. Therefore, the new config schema is recommended if the master
is running a supported release.
The below two examples are equivalent:
+-------------------------------------------------+-------------------------------------------------+
| Supported in 2017.7.2 and later | Supported in all releases |
+=================================================+=================================================+
| :: | :: |
| | |
| deploy_app: | deploy_app: |
| runner.state.orchestrate: | runner.state.orchestrate: |
| - args: | - mods: orchestrate.deploy_app |
| - mods: orchestrate.deploy_app | - kwarg: |
| - pillar: | pillar: |
| event_tag: {{ tag }} | event_tag: {{ tag }} |
| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} |
+-------------------------------------------------+-------------------------------------------------+
Assuming that the event tag is ``foo``, and the data passed to the event is
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
Salt command:
.. code-block:: bash
salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}'
.. _reactor-wheel:
Wheel Reactions
---------------
Wheel reactions run :ref:`wheel functions <all-salt.wheel>` locally on the
master.
Like :ref:`runner reactions <reactor-runner>`, the old config schema called for
wheel reactions to have arguments passed directly under the name of the
:ref:`wheel function <all-salt.wheel>` (or in ``arg`` or ``kwarg`` parameters).
The below two examples are equivalent:
+-----------------------------------+---------------------------------+
| Supported in 2017.7.2 and later | Supported in all releases |
+===================================+=================================+
| :: | :: |
| | |
| remove_key: | remove_key: |
| wheel.key.delete: | wheel.key.delete: |
| - args: | - match: {{ data['id'] }} |
| - match: {{ data['id'] }} | |
+-----------------------------------+---------------------------------+
.. _reactor-caller:
Caller Reactions
----------------
Caller reactions run :ref:`remote-execution functions <all-salt.modules>` on a
minion daemon's Reactor system. To run a Reactor on the minion, it is necessary
to configure the :mod:`Reactor Engine <salt.engines.reactor>` in the minion
config file, and then setup your watched events in a ``reactor`` section in the
minion config file as well.
.. note:: Masterless Minions use this Reactor
This is the only way to run the Reactor if you use masterless minions.
Both the old and new config schemas involve passing arguments under an ``args``
parameter. However, the old config schema only supports positional arguments.
Therefore, the new config schema is recommended if the masterless minion is
running a supported release.
The below two examples are equivalent:
+---------------------------------+---------------------------+
| Supported in 2017.7.2 and later | Supported in all releases |
+=================================+===========================+
| :: | :: |
| | |
| touch_file: | touch_file: |
| caller.file.touch: | caller.file.touch: |
| - args: | - args: |
| - name: /tmp/foo | - /tmp/foo |
+---------------------------------+---------------------------+
This reaction is equvalent to running the following Salt command:
.. code-block:: bash
salt-call file.touch name=/tmp/foo
Best Practices for Writing Reactor SLS Files
============================================
The Reactor works as follows:
1. The Salt Reactor watches Salt's event bus for new events.
2. Each event's tag is matched against the list of event tags configured under
the :conf_master:`reactor` section in the Salt Master config.
3. The SLS files for any matches are rendered into a data structure that
represents one or more function calls.
4. That data structure is given to a pool of worker threads for execution.
Matching and rendering Reactor SLS files is done sequentially in a single
process. Complex Jinja that calls out to slow Execution or Runner modules slows
down the rendering and causes other reactions to pile up behind the current
one. The worker pool is designed to handle complex and long-running processes
such as Salt Orchestrate.
process. For that reason, reactor SLS files should contain few individual
reactions (one, if at all possible). Also, keep in mind that reactions are
fired asynchronously (with the exception of :ref:`caller <reactor-caller>`) and
do *not* support :ref:`requisites <requisites>`.
tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process
started by the worker threads can be long-running. Using the reactor to fire
an orchestrate runner would be ideal.
Complex Jinja templating that calls out to slow :ref:`remote-execution
<all-salt.modules>` or :ref:`runner <all-salt.runners>` functions slows down
the rendering and causes other reactions to pile up behind the current one. The
worker pool is designed to handle complex and long-running processes like
:ref:`orchestration <orchestrate-runner>` jobs.
Therefore, when complex tasks are in order, :ref:`orchestration
<orchestrate-runner>` is a natural fit. Orchestration SLS files can be more
complex, and use requisites. Performing a complex task using orchestration lets
the Reactor system fire off the orchestration job and proceed with processing
other reactions.
.. _reactor-jinja-context:
Jinja Context
-------------
=============
Reactor files only have access to a minimal Jinja context. ``grains`` and
``pillar`` are not available. The ``salt`` object is available for calling
Runner and Execution modules but it should be used sparingly and only for quick
tasks for the reasons mentioned above.
Reactor SLS files only have access to a minimal Jinja context. ``grains`` and
``pillar`` are *not* available. The ``salt`` object is available for calling
:ref:`remote-execution <all-salt.modules>` or :ref:`runner <all-salt.runners>`
functions, but it should be used sparingly and only for quick tasks for the
reasons mentioned above.
In addition to the ``salt`` object, the following variables are available in
the Jinja context:
- ``tag`` - the tag from the event that triggered execution of the Reactor SLS
file
- ``data`` - the event's data dictionary
The ``data`` dict will contain an ``id`` key containing the minion ID, if the
event was fired from a minion, and a ``data`` key containing the data passed to
the event.
Advanced State System Capabilities
----------------------------------
==================================
Reactor SLS files, by design, do not support Requisites, ordering,
``onlyif``/``unless`` conditionals and most other powerful constructs from
Salt's State system.
Reactor SLS files, by design, do not support :ref:`requisites <requisites>`,
ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs
from Salt's State system.
Complex Master-side operations are best performed by Salt's Orchestrate system
so using the Reactor to kick off an Orchestrate run is a very common pairing.
@ -166,7 +370,7 @@ For example:
# /etc/salt/master.d/reactor.conf
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
reactor:
- myco/custom/event:
- my/custom/event:
- /srv/reactor/some_event.sls
.. code-block:: jinja
@ -174,15 +378,15 @@ For example:
# /srv/reactor/some_event.sls
invoke_orchestrate_file:
runner.state.orchestrate:
- mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls
- kwarg:
pillar:
event_tag: {{ tag }}
event_data: {{ data|json() }}
- args:
- mods: orchestrate.do_complex_thing
- pillar:
event_tag: {{ tag }}
event_data: {{ data|json }}
.. code-block:: jinja
# /srv/salt/_orch/do_complex_thing.sls
# /srv/salt/orchestrate/do_complex_thing.sls
{% set tag = salt.pillar.get('event_tag') %}
{% set data = salt.pillar.get('event_data') %}
@ -209,7 +413,7 @@ For example:
.. _beacons-and-reactors:
Beacons and Reactors
--------------------
====================
An event initiated by a beacon, when it arrives at the master will be wrapped
inside a second event, such that the data object containing the beacon
@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file,
you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{
data['id'] }}`` as for events initiated directly on the event bus.
Similarly, the data dictionary attached to the event would be located in
``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``.
See the :ref:`beacon documentation <beacon-example>` for examples.
Fire an event
=============
Manually Firing an Event
========================
To fire an event from a minion call ``event.send``
From the Master
---------------
Use the :py:func:`event.send <salt.runners.event.send>` runner:
.. code-block:: bash
salt-call event.send 'foo' '{orchestrate: refresh}'
salt-run event.send foo '{orchestrate: refresh}'
After this is called, any reactor sls files matching event tag ``foo`` will
execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``.
From the Minion
---------------
See :py:mod:`salt.modules.event` for more information.
To fire an event to the master from a minion, call :py:func:`event.send
<salt.modules.event.send>`:
Knowing what event is being fired
=================================
.. code-block:: bash
The best way to see exactly what events are fired and what data is available in
each event is to use the :py:func:`state.event runner
salt-call event.send foo '{orchestrate: refresh}'
To fire an event to the minion's local event bus, call :py:func:`event.fire
<salt.modules.event.fire>`:
.. code-block:: bash
salt-call event.fire '{orchestrate: refresh}' foo
Referencing Data Passed in Events
---------------------------------
Assuming any of the above examples, any reactor SLS files triggered by watching
the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}``
equal to ``'refresh'``.
Getting Information About Events
================================
The best way to see exactly what events have been fired and what data is
available in each event is to use the :py:func:`state.event runner
<salt.runners.state.event>`.
.. seealso:: :ref:`Common Salt Events <event-master_events>`
@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file).
view the result of referencing Jinja variables. If the result is empty then
Jinja produced an empty result and the Reactor will ignore it.
.. _reactor-structure:
Passing Event Data to Minions or Orchestration as Pillar
--------------------------------------------------------
Understanding the Structure of Reactor Formulas
===============================================
**I.e., when to use `arg` and `kwarg` and when to specify the function
arguments directly.**
While the reactor system uses the same basic data structure as the state
system, the functions that will be called using that data structure are
different functions than are called via Salt's state system. The Reactor can
call Runner modules using the `runner` prefix, Wheel modules using the `wheel`
prefix, and can also cause minions to run Execution modules using the `local`
prefix.
.. versionchanged:: 2014.7.0
The ``cmd`` prefix was renamed to ``local`` for consistency with other
parts of Salt. A backward-compatible alias was added for ``cmd``.
The Reactor runs on the master and calls functions that exist on the master. In
the case of Runner and Wheel functions the Reactor can just call those
functions directly since they exist on the master and are run on the master.
In the case of functions that exist on minions and are run on minions, the
Reactor still needs to call a function on the master in order to send the
necessary data to the minion so the minion can execute that function.
The Reactor calls functions exposed in :ref:`Salt's Python API documentation
<client-apis>`. and thus the structure of Reactor files very transparently
reflects the function signatures of those functions.
Calling Execution modules on Minions
------------------------------------
The Reactor sends commands down to minions in the exact same way Salt's CLI
interface does. It calls a function locally on the master that sends the name
of the function as well as a list of any arguments and a dictionary of any
keyword arguments that the minion should use to execute that function.
Specifically, the Reactor calls the async version of :py:meth:`this function
<salt.client.LocalClient.cmd>`. You can see that function has 'arg' and 'kwarg'
parameters which are both values that are sent down to the minion.
Executing remote commands maps to the :strong:`LocalClient` interface which is
used by the :strong:`salt` command. This interface more specifically maps to
the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This
means that the arguments passed are being passed to the :strong:`cmd_async`
method, not the remote method. A field starts with :strong:`local` to use the
:strong:`LocalClient` subsystem. The result is, to execute a remote command,
a reactor formula would look like this:
.. code-block:: yaml
clean_tmp:
local.cmd.run:
- tgt: '*'
- arg:
- rm -rf /tmp/*
The ``arg`` option takes a list of arguments as they would be presented on the
command line, so the above declaration is the same as running this salt
command:
.. code-block:: bash
salt '*' cmd.run 'rm -rf /tmp/*'
Use the ``tgt_type`` argument to specify a matcher:
.. code-block:: yaml
clean_tmp:
local.cmd.run:
- tgt: 'os:Ubuntu'
- tgt_type: grain
- arg:
- rm -rf /tmp/*
clean_tmp:
local.cmd.run:
- tgt: 'G@roles:hbase_master'
- tgt_type: compound
- arg:
- rm -rf /tmp/*
.. note::
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
2017.7.0 (2016.11.x and earlier).
Any other parameters in the :py:meth:`LocalClient().cmd()
<salt.client.LocalClient.cmd>` method can be specified as well.
Executing Reactors from the Minion
----------------------------------
The minion can be setup to use the Reactor via a reactor engine. This just
sets up and listens to the minions event bus, instead of to the masters.
The biggest difference is that you have to use the caller method on the
Reactor, which is the equivalent of salt-call, to run your commands.
:mod:`Reactor Engine setup <salt.engines.reactor>`
.. code-block:: yaml
clean_tmp:
caller.cmd.run:
- arg:
- rm -rf /tmp/*
.. note:: Masterless Minions use this Reactor
This is the only way to run the Reactor if you use masterless minions.
Calling Runner modules and Wheel modules
----------------------------------------
Calling Runner modules and Wheel modules from the Reactor uses a more direct
syntax since the function is being executed locally instead of sending a
command to a remote system to be executed there. There are no 'arg' or 'kwarg'
parameters (unless the Runner function or Wheel function accepts a parameter
with either of those names.)
For example:
.. code-block:: yaml
clear_the_grains_cache_for_all_minions:
runner.cache.clear_grains
If the :py:func:`the runner takes arguments <salt.runners.cloud.profile>` then
they must be specified as keyword arguments.
.. code-block:: yaml
spin_up_more_web_machines:
runner.cloud.profile:
- prof: centos_6
- instances:
- web11 # These VM names would be generated via Jinja in a
- web12 # real-world example.
To determine the proper names for the arguments, check the documentation
or source code for the runner function you wish to call.
Passing event data to Minions or Orchestrate as Pillar
------------------------------------------------------
An interesting trick to pass data from the Reactor script to
An interesting trick to pass data from the Reactor SLS file to
:py:func:`state.apply <salt.modules.state.apply_>` is to pass it as inline
Pillar data since both functions take a keyword argument named ``pillar``.
@ -484,10 +567,9 @@ from the event to the state file via inline Pillar.
add_new_minion_to_pool:
local.state.apply:
- tgt: 'haproxy*'
- arg:
- haproxy.refresh_pool
- kwarg:
pillar:
- args:
- mods: haproxy.refresh_pool
- pillar:
new_minion: {{ data['id'] }}
{% endif %}
@ -503,17 +585,16 @@ This works with Orchestrate files as well:
call_some_orchestrate_file:
runner.state.orchestrate:
- mods: _orch.some_orchestrate_file
- pillar:
stuff: things
- args:
- mods: orchestrate.some_orchestrate_file
- pillar:
stuff: things
Which is equivalent to the following command at the CLI:
.. code-block:: bash
salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}'
This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls.
salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}'
Finally, that data is available in the state file using the normal Pillar
lookup syntax. The following example is grabbing web server names and IP
@ -564,7 +645,7 @@ includes the minion id, which we can use for matching.
- 'salt/minion/ink*/start':
- /srv/reactor/auth-complete.sls
In this sls file, we say that if the key was rejected we will delete the key on
In this SLS file, we say that if the key was rejected we will delete the key on
the master and then also tell the master to ssh in to the minion and tell it to
restart the minion, since a minion process will die if the key is rejected.
@ -580,19 +661,21 @@ authentication every ten seconds by default.
{% if not data['result'] and data['id'].startswith('ink') %}
minion_remove:
wheel.key.delete:
- match: {{ data['id'] }}
- args:
- match: {{ data['id'] }}
minion_rejoin:
local.cmd.run:
- tgt: salt-master.domain.tld
- arg:
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
- args:
- cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
{% endif %}
{# Ink server is sending new key -- accept this key #}
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %}
minion_add:
wheel.key.accept:
- match: {{ data['id'] }}
- args:
- match: {{ data['id'] }}
{% endif %}
No if statements are needed here because we already limited this action to just

File diff suppressed because it is too large Load diff

View file

@ -8,17 +8,17 @@ Comparison Operators in Package Installation
--------------------------------------------
Salt now supports using comparison operators (e.g. ``>=1.2.3``) when installing
packages on minions which use :mod:`yum/dnf <salt.modules.yumpkg>` or :mod:`apt
<salt.modules.aptpkg>`. This is supported both in the :py:func:`pkg.installed
<salt.states.pkg.installed>` state and in the ``pkg.install`` remote execution
function.
packages on minions which use :mod:`yum/dnf <salt.modules.yumpkg>` or
:mod:`apt <salt.modules.aptpkg>`. This is supported both in the
:py:func:`pkg.installed <salt.states.pkg.installed>` state and in the ``pkg.install``
remote execution function.
:ref:`Master Tops <master-tops-system>` Changes
-----------------------------------------------
When both :ref:`Master Tops <master-tops-system>` and a :ref:`Top File
<states-top>` produce SLS matches for a given minion, the matches were being
merged in an unpredictable manner which did not preserve ordering. This has
When both :ref:`Master Tops <master-tops-system>` and a
:ref:`Top File <states-top>` produce SLS matches for a given minion, the matches
were being merged in an unpredictable manner which did not preserve ordering. This has
been changed. The top file matches now execute in the expected order, followed
by any master tops matches that are not matched via a top file.
@ -46,6 +46,21 @@ noon PST so the Stormpath external authentication module has been removed.
https://stormpath.com/oktaplusstormpath
New Grains
----------
New core grains have been added to expose any storage inititator setting.
The new grains added are:
* ``fc_wwn``: Show all fibre channel world wide port names for a host
* ``iscsi_iqn``: Show the iSCSI IQN name for a host
New Modules
-----------
- :mod:`salt.modules.purefa <salt.modules.purefa>`
New NaCl Renderer
-----------------
@ -55,14 +70,14 @@ New support for Cisco UCS Chassis
---------------------------------
The salt proxy minion now allows for control of Cisco USC chassis. See
the `cimc` modules for details.
the ``cimc`` modules for details.
New salt-ssh roster
-------------------
A new roster has been added that allows users to pull in a list of hosts
for salt-ssh targeting from a ~/.ssh configuration. For full details,
please see the `sshconfig` roster.
for salt-ssh targeting from a ``~/.ssh`` configuration. For full details,
please see the ``sshconfig`` roster.
New GitFS Features
------------------
@ -88,6 +103,13 @@ environments (i.e. ``saltenvs``) have been added:
ignore all tags and use branches only, and also to keep SHAs from being made
available as saltenvs.
Additional output modes
------------------
The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``terse_id``.
Just like ``mixed_id``, these use the state ID as name in the highstate output.
For more information on these output modes, see the docs for the :mod:`Highstate Outputter <salt.output.highstate>`.
Salt Cloud Features
-------------------
@ -110,6 +132,194 @@ file. For example:
These commands will run in sequence **before** the bootstrap script is executed.
New pillar/master_tops module called saltclass
----------------------------------------------
This module clones the behaviour of reclass (http://reclass.pantsfullofunix.net/), without the need of an external app, and add several features to improve flexibility.
Saltclass lets you define your nodes from simple ``yaml`` files (``.yml``) through hierarchical class inheritance with the possibility to override pillars down the tree.
**Features**
- Define your nodes through hierarchical class inheritance
- Reuse your reclass datas with minimal modifications
- applications => states
- parameters => pillars
- Use Jinja templating in your yaml definitions
- Access to the following Salt objects in Jinja
- ``__opts__``
- ``__salt__``
- ``__grains__``
- ``__pillars__``
- ``minion_id``
- Chose how to merge or override your lists using ^ character (see examples)
- Expand variables ${} with possibility to escape them if needed \${} (see examples)
- Ignores missing node/class and will simply return empty without breaking the pillar module completely - will be logged
An example subset of datas is available here: http://git.mauras.ch/salt/saltclass/src/master/examples
========================== ===========
Terms usable in yaml files Description
========================== ===========
classes A list of classes that will be processed in order
states A list of states that will be returned by master_tops function
pillars A yaml dictionnary that will be returned by the ext_pillar function
environment Node saltenv that will be used by master_tops
========================== ===========
A class consists of:
- zero or more parent classes
- zero or more states
- any number of pillars
A child class can override pillars from a parent class.
A node definition is a class in itself with an added ``environment`` parameter for ``saltenv`` definition.
**class names**
Class names mimic salt way of defining states and pillar files.
This means that ``default.users`` class name will correspond to one of these:
- ``<saltclass_path>/classes/default/users.yml``
- ``<saltclass_path>/classes/default/users/init.yml``
**Saltclass tree**
A saltclass tree would look like this:
.. code-block:: text
<saltclass_path>
├── classes
│ ├── app
│ │ ├── borgbackup.yml
│ │ └── ssh
│ │ └── server.yml
│ ├── default
│ │ ├── init.yml
│ │ ├── motd.yml
│ │ └── users.yml
│ ├── roles
│ │ ├── app.yml
│ │ └── nginx
│ │ ├── init.yml
│ │ └── server.yml
│ └── subsidiaries
│ ├── gnv.yml
│ ├── qls.yml
│ └── zrh.yml
└── nodes
├── geneva
│ └── gnv.node1.yml
├── lausanne
│ ├── qls.node1.yml
│ └── qls.node2.yml
├── node127.yml
└── zurich
├── zrh.node1.yml
├── zrh.node2.yml
└── zrh.node3.yml
**Examples**
``<saltclass_path>/nodes/lausanne/qls.node1.yml``
.. code-block:: yaml
environment: base
classes:
{% for class in ['default'] %}
- {{ class }}
{% endfor %}
- subsidiaries.{{ __grains__['id'].split('.')[0] }}
``<saltclass_path>/classes/default/init.yml``
.. code-block:: yaml
classes:
- default.users
- default.motd
states:
- openssh
pillars:
default:
network:
dns:
srv1: 192.168.0.1
srv2: 192.168.0.2
domain: example.com
ntp:
srv1: 192.168.10.10
srv2: 192.168.10.20
``<saltclass_path>/classes/subsidiaries/gnv.yml``
.. code-block:: yaml
pillars:
default:
network:
sub: Geneva
dns:
srv1: 10.20.0.1
srv2: 10.20.0.2
srv3: 192.168.1.1
domain: gnv.example.com
users:
adm1:
uid: 1210
gid: 1210
gecos: 'Super user admin1'
homedir: /srv/app/adm1
adm3:
uid: 1203
gid: 1203
gecos: 'Super user adm
Variable expansions:
Escaped variables are rendered as is - ``${test}``
Missing variables are rendered as is - ``${net:dns:srv2}``
.. code-block:: yaml
pillars:
app:
config:
dns:
srv1: ${default:network:dns:srv1}
srv2: ${net:dns:srv2}
uri: https://application.domain/call?\${test}
prod_parameters:
- p1
- p2
- p3
pkg:
- app-core
- app-backend
List override:
Not using ``^`` as the first entry will simply merge the lists
.. code-block:: yaml
pillars:
app:
pkg:
- ^
- app-frontend
**Known limitation**
Currently you can't have both a variable and an escaped variable in the same string as the escaped one will not be correctly rendered - '\${xx}' will stay as is instead of being rendered as '${xx}'
Newer PyWinRM Versions
----------------------
@ -149,185 +359,200 @@ check the configuration for the correct format and only load if the validation p
- ``avahi_announce`` beacon
Old behavior:
```
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
.. code-block:: yaml
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
New behavior:
```
beacons:
avahi_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
.. code-block:: yaml
beacons:
avahi_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
- ``bonjour_announce`` beacon
Old behavior:
```
beacons:
bonjour_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
.. code-block:: yaml
beacons:
bonjour_announce:
run_once: True
servicetype: _demo._tcp
port: 1234
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
New behavior:
```
beacons:
bonjour_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
```
.. code-block:: yaml
beacons:
bonjour_announce:
- run_once: True
- servicetype: _demo._tcp
- port: 1234
- txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
- ``btmp`` beacon
Old behavior:
```
beacons:
btmp: {}
```
.. code-block:: yaml
beacons:
btmp: {}
New behavior:
```
beacons:
btmp: []
```
.. code-block:: yaml
beacons:
btmp: []
- ``glxinfo`` beacon
Old behavior:
```
beacons:
glxinfo:
user: frank
screen_event: True
```
.. code-block:: yaml
beacons:
glxinfo:
user: frank
screen_event: True
New behavior:
```
beacons:
glxinfo:
- user: frank
- screen_event: True
```
.. code-block:: yaml
beacons:
glxinfo:
- user: frank
- screen_event: True
- ``haproxy`` beacon
Old behavior:
```
beacons:
haproxy:
- www-backend:
threshold: 45
servers:
.. code-block:: yaml
beacons:
haproxy:
- www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
New behavior:
.. code-block:: yaml
beacons:
haproxy:
- backends:
www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
```
New behavior:
```
beacons:
haproxy:
- backends:
www-backend:
threshold: 45
servers:
- web1
- web2
- interval: 120
```
- ``inotify`` beacon
Old behavior:
```
beacons:
inotify:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
coalesce: True
```
.. code-block:: yaml
beacons:
inotify:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
coalesce: True
New behavior:
```
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
```
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
- ``journald`` beacon
Old behavior:
```
beacons:
journald:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
```
New behavior:
```
beacons:
journald:
- services:
.. code-block:: yaml
beacons:
journald:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
```
New behavior:
.. code-block:: yaml
beacons:
journald:
- services:
sshd:
SYSLOG_IDENTIFIER: sshd
PRIORITY: 6
- ``load`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
load:
1m:
@ -341,51 +566,55 @@ check the configuration for the correct format and only load if the validation p
- 1.0
emitatstartup: True
onchangeonly: False
```
New behavior:
```
beacons:
load:
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- emitatstartup: True
- onchangeonly: False
```
.. code-block:: yaml
beacons:
load:
- averages:
1m:
- 0.0
- 2.0
5m:
- 0.0
- 1.5
15m:
- 0.1
- 1.0
- emitatstartup: True
- onchangeonly: False
- ``log`` beacon
Old behavior:
```
beacons:
log:
file: <path>
<tag>:
regex: <pattern>
```
New behavior:
```
beacons:
log:
- file: <path>
- tags:
.. code-block:: yaml
beacons:
log:
file: <path>
<tag>:
regex: <pattern>
```
New behavior:
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
- ``network_info`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
network_info:
- eth0:
@ -398,10 +627,11 @@ check the configuration for the correct format and only load if the validation p
errout: 100
dropin: 100
dropout: 100
```
New behavior:
```
.. code-block:: yaml
beacons:
network_info:
- interfaces:
@ -415,12 +645,13 @@ check the configuration for the correct format and only load if the validation p
errout: 100
dropin: 100
dropout: 100
```
- ``network_settings`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
network_settings:
eth0:
@ -429,10 +660,11 @@ check the configuration for the correct format and only load if the validation p
onvalue: 1
eth1:
linkmode:
```
New behavior:
```
.. code-block:: yaml
beacons:
network_settings:
- interfaces:
@ -442,12 +674,13 @@ check the configuration for the correct format and only load if the validation p
onvalue: 1
- eth1:
linkmode:
```
- ``proxy_example`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
proxy_example:
endpoint: beacon
@ -458,60 +691,66 @@ check the configuration for the correct format and only load if the validation p
beacons:
proxy_example:
- endpoint: beacon
```
- ``ps`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
ps:
- salt-master: running
- mysql: stopped
```
New behavior:
```
.. code-block:: yaml
beacons:
ps:
- processes:
salt-master: running
mysql: stopped
```
- ``salt_proxy`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
salt_proxy:
- p8000: {}
- p8001: {}
```
New behavior:
```
.. code-block:: yaml
beacons:
salt_proxy:
- proxies:
p8000: {}
p8001: {}
```
- ``sensehat`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
sensehat:
humidity: 70%
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
```
New behavior:
```
.. code-block:: yaml
beacons:
sensehat:
- sensors:
@ -519,21 +758,22 @@ check the configuration for the correct format and only load if the validation p
temperature: [20, 40]
temperature_from_pressure: 40
pressure: 1500
```
- ``service`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
service:
salt-master:
mysql:
```
New behavior:
```
.. code-block:: yaml
beacons:
service:
- services:
@ -541,93 +781,102 @@ check the configuration for the correct format and only load if the validation p
onchangeonly: True
delay: 30
uncleanshutdown: /run/nginx.pid
```
- ``sh`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
sh: {}
```
New behavior:
```
.. code-block:: yaml
beacons:
sh: []
```
- ``status`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
status: {}
```
New behavior:
```
.. code-block:: yaml
beacons:
status: []
```
- ``telegram_bot_msg`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
telegram_bot_msg:
token: "<bot access token>"
accept_from:
- "<valid username>"
interval: 10
```
New behavior:
```
.. code-block:: yaml
beacons:
telegram_bot_msg:
- token: "<bot access token>"
- accept_from:
- "<valid username>"
- interval: 10
```
- ``twilio_txt_msg`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
twilio_txt_msg:
account_sid: "<account sid>"
auth_token: "<auth token>"
twilio_number: "+15555555555"
interval: 10
```
New behavior:
```
.. code-block:: yaml
beacons:
twilio_txt_msg:
- account_sid: "<account sid>"
- auth_token: "<auth token>"
- twilio_number: "+15555555555"
- interval: 10
```
- ``wtmp`` beacon
Old behavior:
```
.. code-block:: yaml
beacons:
wtmp: {}
```
New behavior:
```
.. code-block:: yaml
beacons:
wtmp: []
```
Deprecations
------------

View file

@ -132,7 +132,7 @@ fi
###############################################################################
# Remove the salt from the paths.d
###############################################################################
if [ ! -f "/etc/paths.d/salt" ]; then
if [ -f "/etc/paths.d/salt" ]; then
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
rm "/etc/paths.d/salt"
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"

View file

@ -35,8 +35,9 @@ _salt_get_keys(){
}
_salt(){
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:-"$HOME/.cache/salt-comp-cache_functions"}
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:-"last hour"}
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then
mkdir -p "$(dirname ${_salt_cache_functions})"

View file

@ -369,46 +369,13 @@ class LoadAuth(object):
eauth_config = self.opts['external_auth'][eauth]
if not groups:
groups = []
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
if group_config.rstrip('%') in groups:
group_auth_match = True
break
# If a group_auth_match is set it means only that we have a
# user which matches at least one or more of the groups defined
# in the configuration file.
external_auth_in_db = False
for entry in eauth_config:
if entry.startswith('^'):
external_auth_in_db = True
break
# If neither a catchall, a named membership or a group
# membership is found, there is no need to continue. Simply
# deny the user access.
if not ((name in eauth_config) |
('*' in eauth_config) |
group_auth_match | external_auth_in_db):
# Auth successful, but no matching user found in config
log.warning('Authorization failure occurred.')
return None
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = []
if name in eauth_config:
auth_list = eauth_config[name]
elif '*' in eauth_config:
auth_list = eauth_config['*']
if group_auth_match:
auth_list = self.ckminions.fill_auth_list_from_groups(
eauth_config,
groups,
auth_list)
auth_list = self.ckminions.fill_auth_list(
eauth_config,
name,
groups)
auth_list = self.__process_acl(load, auth_list)

View file

@ -10,14 +10,19 @@ Beacon to fire events at failed login of users
# Import python libs
from __future__ import absolute_import
import logging
import os
import struct
import time
# Import Salt Libs
import salt.utils.files
# Import 3rd-party libs
from salt.ext import six
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'btmp'
BTMP = '/var/log/btmp'
@ -37,6 +42,15 @@ FIELDS = [
SIZE = struct.calcsize(FMT)
LOC_KEY = 'btmp.loc'
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import dateutil.parser as dateutil_parser
_TIME_SUPPORTED = True
except ImportError:
_TIME_SUPPORTED = False
def __virtual__():
if os.path.isfile(BTMP):
@ -44,6 +58,20 @@ def __virtual__():
return False
def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
return bool(_start <= now <= _end)
else:
log.error('Dateutil is required.')
return False
def _get_loc():
'''
return the active file location
@ -60,6 +88,45 @@ def validate(config):
if not isinstance(config, list):
return False, ('Configuration for btmp beacon must '
'be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'users' in _config:
if not isinstance(_config['users'], dict):
return False, ('User configuration for btmp beacon must '
'be a dictionary.')
else:
for user in _config['users']:
if _config['users'][user] and \
'time_range' in _config['users'][user]:
_time_range = _config['users'][user]['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
if 'defaults' in _config:
if not isinstance(_config['defaults'], dict):
return False, ('Defaults configuration for btmp beacon must '
'be a dictionary.')
else:
if 'time_range' in _config['defaults']:
_time_range = _config['defaults']['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
return True, 'Valid beacon configuration'
@ -72,8 +139,40 @@ def beacon(config):
beacons:
btmp: []
beacons:
btmp:
- users:
gareth:
- defaults:
time_range:
start: '8am'
end: '4pm'
beacons:
btmp:
- users:
gareth:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
'''
ret = []
users = None
defaults = None
for config_item in config:
if 'users' in config_item:
users = config_item['users']
if 'defaults' in config_item:
defaults = config_item['defaults']
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
@ -83,6 +182,7 @@ def beacon(config):
else:
fp_.seek(loc)
while True:
now = int(time.time())
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
@ -91,7 +191,30 @@ def beacon(config):
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], six.string_types):
event[field] = event[field].strip('\x00')
ret.append(event)
if isinstance(event[field], salt.ext.six.string_types):
if isinstance(event[field], bytes):
event[field] = event[field].decode()
event[field] = event[field].strip('b\x00')
else:
event[field] = event[field].strip('\x00')
if users:
if event['user'] in users:
_user = users[event['user']]
if isinstance(_user, dict) and 'time_range' in _user:
if _check_time_range(_user['time_range'], now):
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'],
now):
ret.append(event)
else:
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'], now):
ret.append(event)
else:
ret.append(event)
return ret

View file

@ -23,7 +23,9 @@ import re
# Import salt libs
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
# Import third party libs
try:

View file

@ -10,14 +10,19 @@ Beacon to fire events at login of users as registered in the wtmp file
# Import Python libs
from __future__ import absolute_import
import logging
import os
import struct
import time
# Import salt libs
import salt.utils.files
# Import 3rd-party libs
from salt.ext import six
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'wtmp'
WTMP = '/var/log/wtmp'
@ -37,9 +42,15 @@ FIELDS = [
SIZE = struct.calcsize(FMT)
LOC_KEY = 'wtmp.loc'
import logging
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import dateutil.parser as dateutil_parser
_TIME_SUPPORTED = True
except ImportError:
_TIME_SUPPORTED = False
def __virtual__():
if os.path.isfile(WTMP):
@ -47,6 +58,20 @@ def __virtual__():
return False
def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
return bool(_start <= now <= _end)
else:
log.error('Dateutil is required.')
return False
def _get_loc():
'''
return the active file location
@ -62,6 +87,44 @@ def validate(config):
# Configuration for wtmp beacon should be a list of dicts
if not isinstance(config, list):
return False, ('Configuration for wtmp beacon must be a list.')
else:
_config = {}
list(map(_config.update, config))
if 'users' in _config:
if not isinstance(_config['users'], dict):
return False, ('User configuration for btmp beacon must '
'be a dictionary.')
else:
for user in _config['users']:
if _config['users'][user] and \
'time_range' in _config['users'][user]:
_time_range = _config['users'][user]['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
if 'defaults' in _config:
if not isinstance(_config['defaults'], dict):
return False, ('Defaults configuration for btmp beacon must '
'be a dictionary.')
else:
if 'time_range' in _config['defaults']:
_time_range = _config['defaults']['time_range']
if not isinstance(_time_range, dict):
return False, ('The time_range parameter for '
'btmp beacon must '
'be a dictionary.')
else:
if not all(k in _time_range for k in ('start', 'end')):
return False, ('The time_range parameter for '
'btmp beacon must contain '
'start & end options.')
return True, 'Valid beacon configuration'
@ -74,8 +137,40 @@ def beacon(config):
beacons:
wtmp: []
'''
beacons:
wtmp:
- users:
gareth:
- defaults:
time_range:
start: '8am'
end: '4pm'
beacons:
wtmp:
- users:
gareth:
time_range:
start: '8am'
end: '4pm'
- defaults:
time_range:
start: '8am'
end: '4pm'
'''
ret = []
users = None
defaults = None
for config_item in config:
if 'users' in config_item:
users = config_item['users']
if 'defaults' in config_item:
defaults = config_item['defaults']
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
@ -85,6 +180,7 @@ def beacon(config):
else:
fp_.seek(loc)
while True:
now = int(time.time())
raw = fp_.read(SIZE)
if len(raw) != SIZE:
return ret
@ -93,7 +189,30 @@ def beacon(config):
event = {}
for ind, field in enumerate(FIELDS):
event[field] = pack[ind]
if isinstance(event[field], six.string_types):
event[field] = event[field].strip('\x00')
ret.append(event)
if isinstance(event[field], salt.ext.six.string_types):
if isinstance(event[field], bytes):
event[field] = event[field].decode()
event[field] = event[field].strip('b\x00')
else:
event[field] = event[field].strip('\x00')
if users:
if event['user'] in users:
_user = users[event['user']]
if isinstance(_user, dict) and 'time_range' in _user:
if _check_time_range(_user['time_range'], now):
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'],
now):
ret.append(event)
else:
ret.append(event)
else:
if defaults and 'time_range' in defaults:
if _check_time_range(defaults['time_range'], now):
ret.append(event)
else:
ret.append(event)
return ret

View file

@ -73,7 +73,7 @@ class Cache(object):
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
else:
self.cachedir = cachedir
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
self.serial = Serial(opts)
self._modules = None
self._kwargs = kwargs

View file

@ -481,18 +481,17 @@ def list_(bank):
Lists entries stored in the specified bank.
'''
redis_server = _get_redis_server()
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
bank_keys = None
bank_redis_key = _get_bank_redis_key(bank)
try:
bank_keys = redis_server.smembers(bank_keys_redis_key)
banks = redis_server.smembers(bank_redis_key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not bank_keys:
if not banks:
return []
return list(bank_keys)
return list(banks)
def contains(bank, key):
@ -500,15 +499,11 @@ def contains(bank, key):
Checks if the specified bank contains the specified key.
'''
redis_server = _get_redis_server()
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
bank_keys = None
bank_redis_key = _get_bank_redis_key(bank)
try:
bank_keys = redis_server.smembers(bank_keys_redis_key)
return redis_server.sismember(bank_redis_key, key)
except (RedisConnectionError, RedisResponseError) as rerr:
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
rerr=rerr)
log.error(mesg)
raise SaltCacheError(mesg)
if not bank_keys:
return False
return key in bank_keys

View file

@ -844,6 +844,10 @@ class LocalClient(object):
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
Normally :py:meth:`cmd_iter` does not yield results for minions that
are not connected. If you want it to return results for disconnected
minions set `expect_minions=True` in `kwargs`.
:return: A generator yielding the individual minion returns
.. code-block:: python

View file

@ -364,29 +364,19 @@ class SyncClientMixin(object):
# packed into the top level object. The plan is to move away from
# that since the caller knows what is an arg vs a kwarg, but while
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in
f_call = None
if u'arg' not in low:
# there are no kwargs in the low object passed in.
if u'arg' in low and u'kwarg' in low:
args = low[u'arg']
kwargs = low[u'kwarg']
else:
f_call = salt.utils.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get(u'args', ())
else:
args = low[u'arg']
if u'kwarg' not in low:
log.critical(
u'kwargs must be passed inside the low data within the '
u'\'kwarg\' key. See usage of '
u'salt.utils.args.parse_input() and '
u'salt.minion.load_args_and_kwargs() elsewhere in the '
u'codebase.'
)
kwargs = {}
else:
kwargs = low[u'kwarg']
kwargs = f_call.get(u'kwargs', {})
# Update the event data with loaded args and kwargs
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])

View file

@ -3543,16 +3543,15 @@ def list_nodes_min(location=None, call=None):
for instance in instances:
if isinstance(instance['instancesSet']['item'], list):
for item in instance['instancesSet']['item']:
state = item['instanceState']['name']
name = _extract_name_tag(item)
id = item['instanceId']
items = instance['instancesSet']['item']
else:
item = instance['instancesSet']['item']
items = [instance['instancesSet']['item']]
for item in items:
state = item['instanceState']['name']
name = _extract_name_tag(item)
id = item['instanceId']
ret[name] = {'state': state, 'id': id}
ret[name] = {'state': state, 'id': id}
return ret

View file

@ -101,7 +101,7 @@ __virtualname__ = 'libvirt'
log = logging.getLogger(__name__)
def libvirt_error_handler(ctx, error):
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
'''
Redirect stderr prints from libvirt to salt logging.
'''

View file

@ -7,6 +7,7 @@ XenServer Cloud Driver
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
@ -157,13 +158,27 @@ def _get_session():
default=False,
search_global=False
)
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug('url: {} user: {} password: {}, originator: {}'.format(
url,
user,
'XXX-pw-redacted-XXX',
originator))
session.xenapi.login_with_password(user, password, api_version, originator)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug('url: {} user: {} password: {}, originator: {}'.format(
url,
user,
'XXX-pw-redacted-XXX',
originator))
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = str(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug('session is -> url: {} user: {} password: {}, originator:{}'.format(
new_url,
user,
'XXX-pw-redacted-XXX',
originator))
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
@ -182,14 +197,19 @@ def list_nodes():
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
ret[record['name_label']] = {
'id': record['uuid'],
'image': record['other_config']['base_template_name'],
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
@ -296,10 +316,17 @@ def list_nodes_full(session=None):
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = record['other_config']['base_template_name']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
@ -455,8 +482,14 @@ def show_instance(name, session=None, call=None):
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
ret = {'id': record['uuid'],
'image': record['other_config']['base_template_name'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
@ -716,7 +749,7 @@ def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is faster and should be used if source and target are
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference

View file

@ -337,6 +337,9 @@ VALID_OPTS = {
# Whether or not processes should be forked when needed. The alternative is to use threading.
'multiprocessing': bool,
# Maximum number of concurrently active processes at any given point in time
'process_count_max': int,
# Whether or not the salt minion should run scheduled mine updates
'mine_enabled': bool,
@ -746,6 +749,10 @@ VALID_OPTS = {
'fileserver_limit_traversal': bool,
'fileserver_verify_config': bool,
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
# applied only if the user didn't matched by other matchers.
'permissive_acl': bool,
# Optionally enables keeping the calculated user's auth list in the token file.
'keep_acl_in_token': bool,
@ -1264,6 +1271,7 @@ DEFAULT_MINION_OPTS = {
'auto_accept': True,
'autosign_timeout': 120,
'multiprocessing': True,
'process_count_max': -1,
'mine_enabled': True,
'mine_return_job': False,
'mine_interval': 60,
@ -1532,6 +1540,7 @@ DEFAULT_MASTER_OPTS = {
'external_auth': {},
'token_expire': 43200,
'token_expire_user_override': False,
'permissive_acl': False,
'keep_acl_in_token': False,
'eauth_acl_module': '',
'eauth_tokens': 'localfs',

219
salt/config/schemas/esxi.py Normal file
View file

@ -0,0 +1,219 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
salt.config.schemas.esxi
~~~~~~~~~~~~~~~~~~~~~~~~
ESXi host configuration schemas
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt libs
from salt.utils.schema import (DefinitionsSchema,
Schema,
ComplexSchemaItem,
ArrayItem,
IntegerItem,
BooleanItem,
StringItem,
OneOfItem)
class VMwareScsiAddressItem(StringItem):
pattern = r'vmhba\d+:C\d+:T\d+:L\d+'
class DiskGroupDiskScsiAddressItem(ComplexSchemaItem):
'''
Schema item of a ESXi host disk group containing disk SCSI addresses
'''
title = 'Diskgroup Disk Scsi Address Item'
description = 'ESXi host diskgroup item containing disk SCSI addresses'
cache_scsi_addr = VMwareScsiAddressItem(
title='Cache Disk Scsi Address',
description='Specifies the SCSI address of the cache disk',
required=True)
capacity_scsi_addrs = ArrayItem(
title='Capacity Scsi Addresses',
description='Array with the SCSI addresses of the capacity disks',
items=VMwareScsiAddressItem(),
min_items=1)
class DiskGroupDiskIdItem(ComplexSchemaItem):
'''
Schema item of a ESXi host disk group containg disk ids
'''
title = 'Diskgroup Disk Id Item'
description = 'ESXi host diskgroup item containing disk ids'
cache_id = StringItem(
title='Cache Disk Id',
description='Specifies the id of the cache disk',
pattern=r'[^\s]+')
capacity_ids = ArrayItem(
title='Capacity Disk Ids',
description='Array with the ids of the capacity disks',
items=StringItem(pattern=r'[^\s]+'),
min_items=1)
class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema):
'''
Schema of ESXi host diskgroups containing disk SCSI addresses
'''
title = 'Diskgroups Disk Scsi Address Schema'
description = 'ESXi host diskgroup schema containing disk SCSI addresses'
diskgroups = ArrayItem(
title='Diskgroups',
description='List of diskgroups in an ESXi host',
min_items=1,
items=DiskGroupDiskScsiAddressItem(),
required=True)
erase_disks = BooleanItem(
title='Erase Diskgroup Disks',
required=True)
class DiskGroupsDiskIdSchema(DefinitionsSchema):
'''
Schema of ESXi host diskgroups containing disk ids
'''
title = 'Diskgroups Disk Id Schema'
description = 'ESXi host diskgroup schema containing disk ids'
diskgroups = ArrayItem(
title='DiskGroups',
description='List of disk groups in an ESXi host',
min_items=1,
items=DiskGroupDiskIdItem(),
required=True)
class VmfsDatastoreDiskIdItem(ComplexSchemaItem):
'''
Schema item of a VMFS datastore referencing a backing disk id
'''
title = 'VMFS Datastore Disk Id Item'
description = 'VMFS datastore item referencing a backing disk id'
name = StringItem(
title='Name',
description='Specifies the name of the VMFS datastore',
required=True)
backing_disk_id = StringItem(
title='Backing Disk Id',
description=('Specifies the id of the disk backing the VMFS '
'datastore'),
pattern=r'[^\s]+',
required=True)
vmfs_version = IntegerItem(
title='VMFS Version',
description='VMFS version',
enum=[1, 2, 3, 5])
class VmfsDatastoreDiskScsiAddressItem(ComplexSchemaItem):
'''
Schema item of a VMFS datastore referencing a backing disk SCSI address
'''
title = 'VMFS Datastore Disk Scsi Address Item'
description = 'VMFS datastore item referencing a backing disk SCSI address'
name = StringItem(
title='Name',
description='Specifies the name of the VMFS datastore',
required=True)
backing_disk_scsi_addr = VMwareScsiAddressItem(
title='Backing Disk Scsi Address',
description=('Specifies the SCSI address of the disk backing the VMFS '
'datastore'),
required=True)
vmfs_version = IntegerItem(
title='VMFS Version',
description='VMFS version',
enum=[1, 2, 3, 5])
class VmfsDatastoreSchema(DefinitionsSchema):
'''
Schema of a VMFS datastore
'''
title = 'VMFS Datastore Schema'
description = 'Schema of a VMFS datastore'
datastore = OneOfItem(
items=[VmfsDatastoreDiskScsiAddressItem(),
VmfsDatastoreDiskIdItem()],
required=True)
class HostCacheSchema(DefinitionsSchema):
'''
Schema of ESXi host cache
'''
title = 'Host Cache Schema'
description = 'Schema of the ESXi host cache'
enabled = BooleanItem(
title='Enabled',
required=True)
datastore = VmfsDatastoreDiskScsiAddressItem(required=True)
swap_size = StringItem(
title='Host cache swap size (in GB or %)',
pattern=r'(\d+GiB)|(([0-9]|([1-9][0-9])|100)%)',
required=True)
erase_backing_disk = BooleanItem(
title='Erase Backup Disk',
required=True)
class SimpleHostCacheSchema(Schema):
'''
Simplified Schema of ESXi host cache
'''
title = 'Simple Host Cache Schema'
description = 'Simplified schema of the ESXi host cache'
enabled = BooleanItem(
title='Enabled',
required=True)
datastore_name = StringItem(title='Datastore Name',
required=True)
swap_size_MiB = IntegerItem(title='Host cache swap size in MiB',
minimum=1)
class EsxiProxySchema(Schema):
'''
Schema of the esxi proxy input
'''
title = 'Esxi Proxy Schema'
description = 'Esxi proxy schema'
additional_properties = False
proxytype = StringItem(required=True,
enum=['esxi'])
host = StringItem(pattern=r'[^\s]+') # Used when connecting directly
vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter
esxi_host = StringItem()
username = StringItem()
passwords = ArrayItem(min_items=1,
items=StringItem(),
unique_items=True)
mechanism = StringItem(enum=['userpass', 'sspi'])
# TODO Should be changed when anyOf is supported for schemas
domain = StringItem()
principal = StringItem()
protocol = StringItem()
port = IntegerItem(minimum=1)

View file

@ -14,6 +14,8 @@ from __future__ import absolute_import
# Import Salt libs
from salt.utils.schema import (Schema,
ArrayItem,
IntegerItem,
StringItem)
@ -31,3 +33,25 @@ class VCenterEntitySchema(Schema):
vcenter = StringItem(title='vCenter',
description='Specifies the vcenter hostname',
required=True)
class VCenterProxySchema(Schema):
'''
Schema for the configuration for the proxy to connect to a VCenter.
'''
title = 'VCenter Proxy Connection Schema'
description = 'Schema that describes the connection to a VCenter'
additional_properties = False
proxytype = StringItem(required=True,
enum=['vcenter'])
vcenter = StringItem(required=True, pattern=r'[^\s]+')
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
username = StringItem()
passwords = ArrayItem(min_items=1,
items=StringItem(),
unique_items=True)
domain = StringItem()
principal = StringItem(default='host')
protocol = StringItem(default='https')
port = IntegerItem(minimum=1)

View file

@ -170,6 +170,14 @@ def clean_old_jobs(opts):
def mk_key(opts, user):
if HAS_PWD:
uid = None
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError:
# User doesn't exist in the system
if opts['client_acl_verify']:
return None
if salt.utils.platform.is_windows():
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
@ -197,9 +205,9 @@ def mk_key(opts, user):
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
os.chmod(keyfile, 0o600)
if HAS_PWD:
if HAS_PWD and uid is not None:
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
os.chown(keyfile, uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
@ -214,27 +222,26 @@ def access_keys(opts):
'''
# TODO: Need a way to get all available users for systems not supported by pwd module.
# For now users pattern matching will not work for publisher_acl.
users = []
keys = {}
publisher_acl = opts['publisher_acl']
acl_users = set(publisher_acl.keys())
if opts.get('user'):
acl_users.add(opts['user'])
acl_users.add(salt.utils.get_user())
for user in acl_users:
log.info('Preparing the %s key for local communication', user)
key = mk_key(opts, user)
if key is not None:
keys[user] = key
# Check other users matching ACL patterns
if opts['client_acl_verify'] and HAS_PWD:
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
for user in pwd.getpwall():
users.append(user.pw_name)
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
for user in acl_users:
log.info('Preparing the %s key for local communication', user)
keys[user] = mk_key(opts, user)
# Check other users matching ACL patterns
if HAS_PWD:
for user in users:
user = user.pw_name
if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users):
keys[user] = mk_key(opts, user)
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
return keys

View file

@ -266,6 +266,12 @@ class SaltCacheError(SaltException):
'''
class TimeoutError(SaltException):
'''
Thrown when an opration cannot be completet within a given time limit.
'''
class SaltReqTimeoutError(SaltException):
'''
Thrown when a salt master request call fails to return within the timeout
@ -436,6 +442,18 @@ class VMwareObjectRetrievalError(VMwareSaltError):
'''
class VMwareObjectExistsError(VMwareSaltError):
'''
Used when a VMware object exists
'''
class VMwareObjectNotFoundError(VMwareSaltError):
'''
Used when a VMware object was not found
'''
class VMwareApiError(VMwareSaltError):
'''
Used when representing a generic VMware API error

View file

@ -16,6 +16,7 @@ import os
import json
import socket
import sys
import glob
import re
import platform
import logging
@ -65,6 +66,7 @@ __salt__ = {
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
'smbios.records': salt.modules.smbios.records,
'smbios.get': salt.modules.smbios.get,
'cmd.run_ps': salt.modules.cmdmod.powershell,
}
log = logging.getLogger(__name__)
@ -1205,6 +1207,10 @@ _OS_FAMILY_MAP = {
'Raspbian': 'Debian',
'Devuan': 'Debian',
'antiX': 'Debian',
'Kali': 'Debian',
'neon': 'Debian',
'Cumulus': 'Debian',
'Deepin': 'Debian',
'NILinuxRT': 'NILinuxRT',
'NILinuxRT-XFCE': 'NILinuxRT',
'KDE neon': 'Debian',
@ -2468,3 +2474,124 @@ def default_gateway():
except Exception as exc:
pass
return grains
def fc_wwn():
'''
Return list of fiber channel HBA WWNs
'''
grains = {}
grains['fc_wwn'] = False
if salt.utils.platform.is_linux():
grains['fc_wwn'] = _linux_wwns()
elif salt.utils.platform.is_windows():
grains['fc_wwn'] = _windows_wwns()
return grains
def iscsi_iqn():
'''
Return iSCSI IQN
'''
grains = {}
grains['iscsi_iqn'] = False
if salt.utils.platform.is_linux():
grains['iscsi_iqn'] = _linux_iqn()
elif salt.utils.platform.is_windows():
grains['iscsi_iqn'] = _windows_iqn()
elif salt.utils.platform.is_aix():
grains['iscsi_iqn'] = _aix_iqn()
return grains
def _linux_iqn():
'''
Return iSCSI IQN from a Linux host.
'''
ret = []
initiator = '/etc/iscsi/initiatorname.iscsi'
if os.path.isfile(initiator):
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
for line in _iscsi:
if line.find('InitiatorName') != -1:
iqn = line.split('=')
final_iqn = iqn[1].rstrip()
ret.extend([final_iqn])
return ret
def _aix_iqn():
'''
Return iSCSI IQN from an AIX host.
'''
ret = []
aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name'
aixret = __salt__['cmd.run'](aixcmd)
if aixret[0].isalpha():
iqn = aixret.split()
final_iqn = iqn[1].rstrip()
ret.extend([final_iqn])
return ret
def _linux_wwns():
'''
Return Fibre Channel port WWNs from a Linux host.
'''
ret = []
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
with salt.utils.files.fopen(fcfile, 'r') as _wwn:
for line in _wwn:
line = line.rstrip()
ret.extend([line[2:]])
return ret
def _windows_iqn():
'''
Return iSCSI IQN from a Windows host.
'''
ret = []
wmic = salt.utils.path.which('wmic')
if not wmic:
return ret
namespace = r'\\root\WMI'
mspath = 'MSiSCSIInitiator_MethodClass'
get = 'iSCSINodeName'
cmdret = __salt__['cmd.run_all'](
'{0} /namespace:{1} path {2} get {3} /format:table'.format(
wmic, namespace, mspath, get))
for line in cmdret['stdout'].splitlines():
if line[0].isalpha():
continue
line = line.rstrip()
ret.extend([line])
return ret
def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []
cmdret = __salt__['cmd.run_ps'](ps_cmd)
for line in cmdret:
line = line.rstrip()
ret.append(line)
return ret

View file

@ -501,7 +501,13 @@ class Key(object):
if os.path.isdir(m_cache):
for minion in os.listdir(m_cache):
if minion not in minions and minion not in preserve_minions:
shutil.rmtree(os.path.join(m_cache, minion))
try:
shutil.rmtree(os.path.join(m_cache, minion))
except (OSError, IOError) as ex:
log.warning('Key: Delete cache for %s got OSError/IOError: %s \n',
minion,
ex)
continue
cache = salt.cache.factory(self.opts)
clist = cache.list(self.ACC)
if clist:
@ -979,7 +985,13 @@ class RaetKey(Key):
if os.path.isdir(m_cache):
for minion in os.listdir(m_cache):
if minion not in minions and minion not in preserve_minions:
shutil.rmtree(os.path.join(m_cache, minion))
try:
shutil.rmtree(os.path.join(m_cache, minion))
except (OSError, IOError) as ex:
log.warning('RaetKey: Delete cache for %s got OSError/IOError: %s \n',
minion,
ex)
continue
cache = salt.cache.factory(self.opts)
clist = cache.list(self.ACC)
if clist:

View file

@ -11,7 +11,18 @@
Fluent Logging Handler
-------------------
In the salt configuration file:
In the `fluent` configuration file:
.. code-block:: text
<source>
type forward
bind localhost
port 24224
</source>
Then, to send logs via fluent in Logstash format, add the
following to the salt (master and/or minion) configuration file:
.. code-block:: yaml
@ -19,14 +30,32 @@
host: localhost
port: 24224
In the `fluent`_ configuration file:
To send logs via fluent in the Graylog raw json format, add the
following to the salt (master and/or minion) configuration file:
.. code-block:: text
.. code-block:: yaml
<source>
type forward
port 24224
</source>
fluent_handler:
host: localhost
port: 24224
payload_type: graylog
tags:
- salt_master.SALT
The above also illustrates the `tags` option, which allows
one to set descriptive (or useful) tags on records being
sent. If not provided, this defaults to the single tag:
'salt'. Also note that, via Graylog "magic", the 'facility'
of the logged message is set to 'SALT' (the portion of the
tag after the first period), while the tag itself will be
set to simply 'salt_master'. This is a feature, not a bug :)
Note:
There is a third emitter, for the GELF format, but it is
largely untested, and I don't currently have a setup supporting
this config, so while it runs cleanly and outputs what LOOKS to
be valid GELF, any real-world feedback on its usefulness, and
correctness, will be appreciated.
Log Level
.........
@ -53,7 +82,7 @@ import time
import datetime
import socket
import threading
import types
# Import salt libs
from salt.log.setup import LOG_LEVELS
@ -91,6 +120,19 @@ __virtualname__ = 'fluent'
_global_sender = None
# Python logger's idea of "level" is wildly at variance with
# Graylog's (and, incidentally, the rest of the civilized world).
syslog_levels = {
'EMERG': 0,
'ALERT': 2,
'CRIT': 2,
'ERR': 3,
'WARNING': 4,
'NOTICE': 5,
'INFO': 6,
'DEBUG': 7
}
def setup(tag, **kwargs):
host = kwargs.get('host', 'localhost')
@ -116,55 +158,133 @@ def __virtual__():
def setup_handlers():
host = port = address = None
host = port = None
if 'fluent_handler' in __opts__:
host = __opts__['fluent_handler'].get('host', None)
port = __opts__['fluent_handler'].get('port', None)
version = __opts__['fluent_handler'].get('version', 1)
payload_type = __opts__['fluent_handler'].get('payload_type', None)
# in general, you want the value of tag to ALSO be a member of tags
tags = __opts__['fluent_handler'].get('tags', ['salt'])
tag = tags[0] if len(tags) else 'salt'
if payload_type == 'graylog':
version = 0
elif payload_type == 'gelf':
# We only support version 1.1 (the latest) of GELF...
version = 1.1
else:
# Default to logstash for backwards compat
payload_type = 'logstash'
version = __opts__['fluent_handler'].get('version', 1)
if host is None and port is None:
log.debug(
'The required \'fluent_handler\' configuration keys, '
'\'host\' and/or \'port\', are not properly configured. Not '
'configuring the fluent logging handler.'
'enabling the fluent logging handler.'
)
else:
logstash_formatter = LogstashFormatter(version=version)
fluent_handler = FluentHandler('salt', host=host, port=port)
fluent_handler.setFormatter(logstash_formatter)
formatter = MessageFormatter(payload_type=payload_type, version=version, tags=tags)
fluent_handler = FluentHandler(tag, host=host, port=port)
fluent_handler.setFormatter(formatter)
fluent_handler.setLevel(
LOG_LEVELS[
__opts__['fluent_handler'].get(
'log_level',
# Not set? Get the main salt log_level setting on the
# configuration file
__opts__.get(
'log_level',
# Also not set?! Default to 'error'
'error'
)
)
]
LOG_LEVELS[__opts__['fluent_handler'].get('log_level', __opts__.get('log_level', 'error'))]
)
yield fluent_handler
if host is None and port is None and address is None:
if host is None and port is None:
yield False
class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
def __init__(self, msg_type='logstash', msg_path='logstash', version=1):
self.msg_path = msg_path
self.msg_type = msg_type
class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None):
self.payload_type = payload_type
self.version = version
self.format = getattr(self, 'format_v{0}'.format(version))
super(LogstashFormatter, self).__init__(fmt=None, datefmt=None)
self.tag = tags[0] if len(tags) else 'salt' # 'salt' for backwards compat
self.tags = tags
self.msg_path = msg_path if msg_path else payload_type
self.msg_type = msg_type if msg_type else payload_type
format_func = 'format_{0}_v{1}'.format(payload_type, version).replace('.', '_')
self.format = getattr(self, format_func)
super(MessageFormatter, self).__init__(fmt=None, datefmt=None)
def formatTime(self, record, datefmt=None):
if self.payload_type == 'gelf': # GELF uses epoch times
return record.created
return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
def format_v0(self, record):
def format_graylog_v0(self, record):
'''
Graylog 'raw' format is essentially the raw record, minimally munged to provide
the bare minimum that td-agent requires to accept and route the event. This is
well suited to a config where the client td-agents log directly to Graylog.
'''
message_dict = {
'message': record.getMessage(),
'timestamp': self.formatTime(record),
# Graylog uses syslog levels, not whatever it is Python does...
'level': syslog_levels.get(record.levelname, 'ALERT'),
'tag': self.tag
}
if record.exc_info:
exc_info = self.formatException(record.exc_info)
message_dict.update({'full_message': exc_info})
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
# These are already handled above or explicitly pruned.
continue
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
val = value
else:
val = repr(value)
message_dict.update({'{0}'.format(key): val})
return message_dict
def format_gelf_v1_1(self, record):
'''
If your agent is (or can be) configured to forward pre-formed GELF to Graylog
with ZERO fluent processing, this function is for YOU, pal...
'''
message_dict = {
'version': self.version,
'host': salt.utils.network.get_fqhostname(),
'short_message': record.getMessage(),
'timestamp': self.formatTime(record),
'level': syslog_levels.get(record.levelname, 'ALERT'),
"_tag": self.tag
}
if record.exc_info:
exc_info = self.formatException(record.exc_info)
message_dict.update({'full_message': exc_info})
# Add any extra attributes to the message field
for key, value in six.iteritems(record.__dict__):
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
# These are already handled above or explicitly avoided.
continue
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
val = value
else:
val = repr(value)
# GELF spec require "non-standard" fields to be prefixed with '_' (underscore).
message_dict.update({'_{0}'.format(key): val})
return message_dict
def format_logstash_v0(self, record):
'''
Messages are formatted in logstash's expected format.
'''
host = salt.utils.network.get_fqhostname()
message_dict = {
'@timestamp': self.formatTime(record),
@ -186,7 +306,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
),
'@source_host': host,
'@source_path': self.msg_path,
'@tags': ['salt'],
'@tags': self.tags,
'@type': self.msg_type,
}
@ -216,7 +336,10 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
message_dict['@fields'][key] = repr(value)
return message_dict
def format_v1(self, record):
def format_logstash_v1(self, record):
'''
Messages are formatted in logstash's expected format.
'''
message_dict = {
'@version': 1,
'@timestamp': self.formatTime(record),
@ -230,7 +353,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
'funcName': record.funcName,
'processName': record.processName,
'message': record.getMessage(),
'tags': ['salt'],
'tags': self.tags,
'type': self.msg_type
}

View file

@ -1333,6 +1333,7 @@ class Minion(MinionBase):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
@ -1365,6 +1366,15 @@ class Minion(MinionBase):
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
@ -1643,13 +1653,24 @@ class Minion(MinionBase):
minion side execution.
'''
salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid']))
ret = {
u'return': {},
u'retcode': {},
u'success': {}
}
for ind in range(0, len(data[u'fun'])):
ret[u'success'][data[u'fun'][ind]] = False
multifunc_ordered = opts.get(u'multifunc_ordered', False)
num_funcs = len(data[u'fun'])
if multifunc_ordered:
ret = {
u'return': [None] * num_funcs,
u'retcode': [None] * num_funcs,
u'success': [False] * num_funcs
}
else:
ret = {
u'return': {},
u'retcode': {},
u'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret[u'success'][data[u'fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
@ -1673,16 +1694,27 @@ class Minion(MinionBase):
data[u'arg'][ind],
data)
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'][data[u'fun'][ind]] = True
if multifunc_ordered:
ret[u'return'][ind] = func(*args, **kwargs)
ret[u'retcode'][ind] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'][ind] = True
else:
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
u'retcode',
0
)
ret[u'success'][data[u'fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(u'The minion function caused an exception: %s', exc)
ret[u'return'][data[u'fun'][ind]] = trb
if multifunc_ordered:
ret[u'return'][ind] = trb
else:
ret[u'return'][data[u'fun'][ind]] = trb
ret[u'jid'] = data[u'jid']
ret[u'fun'] = data[u'fun']
ret[u'fun_args'] = data[u'arg']
@ -2674,6 +2706,8 @@ class SyndicManager(MinionBase):
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
@ -2684,15 +2718,15 @@ class SyndicManager(MinionBase):
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
successful = True
except SaltClientError:
log.error(
u'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
continue
log.critical(u'Unable to call %s on any masters!', func)
if not successful:
log.critical(u'Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''

View file

@ -97,11 +97,15 @@ __virtualname__ = 'pkg'
def __virtual__():
'''
Confirm this module is on a Debian based system
Confirm this module is on a Debian-based system
'''
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon', 'Deepin'):
return __virtualname__
elif __grains__.get('os_family', False) == 'Cumulus':
# If your minion is running an OS which is Debian-based but does not have
# an "os_family" grain of Debian, then the proper fix is NOT to check for
# the minion's "os_family" grain here in the __virtual__. The correct fix
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
# dict in salt/grains/core.py, so that we assign the correct "os_family"
# grain to the minion.
if __grains__.get('os_family') == 'Debian':
return __virtualname__
return (False, 'The pkg module could not be loaded: unsupported OS family')

View file

@ -42,6 +42,7 @@ from __future__ import absolute_import
import logging
import json
import yaml
import time
# Import salt libs
from salt.ext import six
@ -2148,6 +2149,7 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None,
salt myminion boto_iam.list_entities_for_policy mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
retries = 30
params = {}
for arg in ('path_prefix', 'entity_filter'):
@ -2155,21 +2157,26 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None,
params[arg] = locals()[arg]
policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile)
try:
allret = {
'policy_groups': [],
'policy_users': [],
'policy_roles': [],
}
for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params):
for k, v in six.iteritems(allret):
v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k))
return allret
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to list {0} policy entities.'
log.error(msg.format(policy_name))
return {}
while retries:
try:
allret = {
'policy_groups': [],
'policy_users': [],
'policy_roles': [],
}
for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params):
for k, v in six.iteritems(allret):
v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k))
return allret
except boto.exception.BotoServerError as e:
if e.error_code == 'Throttling':
log.debug("Throttled by AWS API, will retry in 5 seconds...")
time.sleep(5)
retries -= 1
continue
log.error('Failed to list {0} policy entities: {1}'.format(policy_name, e.message))
return {}
return {}
def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None,

View file

@ -505,10 +505,17 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot",
param_list = []
for key, value in six.iteritems(parameters):
item = (key, value, apply_method)
item = odict.OrderedDict()
item.update({'ParameterName': key})
item.update({'ApplyMethod': apply_method})
if type(value) is bool:
item.update({'ParameterValue': 'on' if value else 'off'})
else:
item.update({'ParameterValue': str(value)})
param_list.append(item)
if not len(param_list):
return {'results': False}
if not len(param_list):
return {'results': False}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
@ -843,6 +850,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
'message': 'Could not establish a connection to RDS'}
kwargs = {}
kwargs.update({'DBParameterGroupName': name})
for key in ('Marker', 'Source'):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
@ -850,26 +858,23 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None,
if locals()['MaxRecords'] is not None:
kwargs['MaxRecords'] = int(locals()['MaxRecords'])
r = conn.describe_db_parameters(DBParameterGroupName=name, **kwargs)
pag = conn.get_paginator('describe_db_parameters')
pit = pag.paginate(**kwargs)
if not r:
return {'result': False,
'message': 'Failed to get RDS parameters for group {0}.'
.format(name)}
results = r['Parameters']
keys = ['ParameterName', 'ParameterValue', 'Description',
'Source', 'ApplyType', 'DataType', 'AllowedValues',
'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod']
parameters = odict.OrderedDict()
ret = {'result': True}
for result in results:
data = odict.OrderedDict()
for k in keys:
data[k] = result.get(k)
parameters[result.get('ParameterName')] = data
for p in pit:
for result in p['Parameters']:
data = odict.OrderedDict()
for k in keys:
data[k] = result.get(k)
parameters[result.get('ParameterName')] = data
ret['parameters'] = parameters
return ret

View file

@ -599,9 +599,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
try:
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
region=region, key=key, keyid=keyid, profile=profile)
return {'exists': bool(vpc_ids)}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return False.
return {'exists': False}
return {'error': boto_err}
return {'exists': bool(vpc_ids)}
def create(cidr_block, instance_tenancy=None, vpc_name=None,
@ -723,27 +728,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
if not vpc_id:
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
# VPC was not found: handle the error and return None.
return {'vpc': None}
return {'error': boto_err}
filter_parameters = {'vpc_ids': vpc_id}
if not vpc_id:
return {'vpc': None}
filter_parameters = {'vpc_ids': vpc_id}
try:
vpcs = conn.get_all_vpcs(**filter_parameters)
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: {0}'.format(vpc.id))
if vpcs:
vpc = vpcs[0] # Found!
log.debug('Found VPC: {0}'.format(vpc.id))
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r}
else:
return {'vpc': None}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
'dhcp_options_id', 'instance_tenancy')
_r = dict([(k, getattr(vpc, k)) for k in keys])
_r.update({'region': getattr(vpc, 'region').name})
return {'vpc': _r}
else:
return {'vpc': None}
def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
@ -809,7 +821,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
Given subnet properties, find and return matching subnet ids
'''
if not any(subnet_name, tags, cidr):
if not any([subnet_name, tags, cidr]):
raise SaltInvocationError('At least one of the following must be '
'specified: subnet_name, cidr or tags.')
@ -927,34 +939,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
filter_parameters = {'filters': {}}
except BotoServerError as err:
return {'error': salt.utils.boto.get_error(err)}
if subnet_id:
filter_parameters['subnet_ids'] = [subnet_id]
if subnet_name:
filter_parameters['filters']['tag:Name'] = subnet_name
if cidr:
filter_parameters['filters']['cidr'] = cidr
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
if zones:
filter_parameters['filters']['availability_zone'] = zones
filter_parameters = {'filters': {}}
if subnet_id:
filter_parameters['subnet_ids'] = [subnet_id]
if subnet_name:
filter_parameters['filters']['tag:Name'] = subnet_name
if cidr:
filter_parameters['filters']['cidr'] = cidr
if tags:
for tag_name, tag_value in six.iteritems(tags):
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
if zones:
filter_parameters['filters']['availability_zone'] = zones
try:
subnets = conn.get_all_subnets(**filter_parameters)
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
if subnets:
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
return {'exists': True}
else:
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
except BotoServerError as err:
boto_err = salt.utils.boto.get_error(err)
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
# Subnet was not found: handle the error and return False.
return {'exists': False}
except BotoServerError as e:
return {'error': salt.utils.boto.get_error(e)}
return {'error': boto_err}
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
if subnets:
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
return {'exists': True}
else:
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
return {'exists': False}
def get_subnet_association(subnets, region=None, key=None, keyid=None,
@ -2456,11 +2472,10 @@ def describe_route_table(route_table_id=None, route_table_name=None,
salt myminion boto_vpc.describe_route_table route_table_id='rtb-1f382e7d'
'''
salt.utils.versions.warn_until(
'Oxygen',
'The \'describe_route_table\' method has been deprecated and '
'replaced by \'describe_route_tables\'.'
'Neon',
'The \'describe_route_table\' method has been deprecated and '
'replaced by \'describe_route_tables\'.'
)
if not any((route_table_id, route_table_name, tags)):
raise SaltInvocationError('At least one of the following must be specified: '

View file

@ -56,3 +56,7 @@ def cmd(command, *args, **kwargs):
proxy_cmd = proxy_prefix + '.ch_config'
return __proxy__[proxy_cmd](command, *args, **kwargs)
def get_details():
return __proxy__['esxi.get_details']()

View file

@ -2318,14 +2318,14 @@ def replace(path,
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + b'\n')
new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep))
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(b'\n'):
new_file[-1] += b'\n'
new_file.append(not_found_content + b'\n')
if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
@ -2336,9 +2336,9 @@ def replace(path,
raise CommandExecutionError("Exception: {0}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_str(line))
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
@ -2508,9 +2508,10 @@ def blockreplace(path,
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='r')
bufsize=1, mode='rb')
for line in fi_file:
line = salt.utils.to_str(line)
result = line
if marker_start in line:
@ -2523,14 +2524,24 @@ def blockreplace(path,
# end of block detected
in_block = False
# Check for multi-line '\n' terminated content as split will
# introduce an unwanted additional new line.
if content and content[-1] == '\n':
content = content[:-1]
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for cline in content.split('\n'):
new_file.append(cline + '\n')
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
@ -2558,25 +2569,25 @@ def blockreplace(path,
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + '\n')
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + '\n')
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + '\n')
new_file.insert(0, marker_start + os.linesep)
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith('\n'):
new_file[-1] += '\n'
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
# add the markers and content at the end of file
new_file.append(marker_start + '\n')
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + '\n')
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + '\n')
new_file.append(marker_end + os.linesep)
done = True
else:
raise CommandExecutionError(
@ -2607,9 +2618,9 @@ def blockreplace(path,
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(line)
fh_.write(salt.utils.to_bytes(line))
finally:
fh_.close()
@ -3749,6 +3760,14 @@ def source_list(source, source_hash, saltenv):
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
# Fix this for Windows
if salt.utils.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
@ -3760,10 +3779,15 @@ def source_list(source, source_hash, saltenv):
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith('/') and os.path.exists(single_src):
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
@ -3774,14 +3798,26 @@ def source_list(source, source_hash, saltenv):
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and os.path.exists(urlparsed_src.path):
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith('/') and os.path.exists(single):
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
@ -4281,7 +4317,8 @@ def extract_hash(hash_fn,
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
'''
Check the permissions on files, modify attributes and chown if needed
Check the permissions on files, modify attributes and chown if needed. File
attributes are only verified if lsattr(1) is installed.
CLI Example:
@ -4293,6 +4330,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
@ -4318,7 +4356,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir:
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# List attributes on file
perms['lattrs'] = ''.join(lsattr(name)[name])
# Remove attributes on file so changes can be enforced.
@ -4429,7 +4467,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir:
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# Replace attributes on file if it had been removed
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])

View file

@ -101,8 +101,6 @@ def _construct_yaml_str(self, node):
Construct for yaml
'''
return self.construct_scalar(node)
YamlLoader.add_constructor(u'tag:yaml.org,2002:str',
_construct_yaml_str)
YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp',
_construct_yaml_str)

View file

@ -40,11 +40,16 @@ import base64
import logging
import yaml
import tempfile
import signal
from time import sleep
from contextlib import contextmanager
from salt.exceptions import CommandExecutionError
from salt.ext.six import iteritems
import salt.utils.files
import salt.utils.templates
from salt.exceptions import TimeoutError
from salt.ext.six.moves import range # pylint: disable=import-error
try:
import kubernetes # pylint: disable=import-self
@ -78,6 +83,21 @@ def __virtual__():
return False, 'python kubernetes library not found'
if not salt.utils.platform.is_windows():
@contextmanager
def _time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutError
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
POLLING_TIME_LIMIT = 30
# pylint: disable=no-member
def _setup_conn(**kwargs):
'''
@ -692,7 +712,30 @@ def delete_deployment(name, namespace='default', **kwargs):
name=name,
namespace=namespace,
body=body)
return api_response.to_dict()
mutable_api_response = api_response.to_dict()
if not salt.utils.platform.is_windows():
try:
with _time_limit(POLLING_TIME_LIMIT):
while show_deployment(name, namespace) is not None:
sleep(1)
else: # pylint: disable=useless-else-on-loop
mutable_api_response['code'] = 200
except TimeoutError:
pass
else:
# Windows has not signal.alarm implementation, so we are just falling
# back to loop-counting.
for i in range(60):
if show_deployment(name, namespace) is None:
mutable_api_response['code'] = 200
break
else:
sleep(1)
if mutable_api_response['code'] != 200:
log.warning('Reached polling time limit. Deployment is not yet '
'deleted, but we are backing off. Sorry, but you\'ll '
'have to check manually.')
return mutable_api_response
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None

View file

@ -1,6 +1,9 @@
# -*- coding: utf-8 -*-
'''
Support for Linux File Access Control Lists
The Linux ACL module requires the `getfacl` and `setfacl` binaries.
'''
from __future__ import absolute_import

View file

@ -356,3 +356,40 @@ def assemble(name,
return cmd
elif test_mode is False:
return __salt__['cmd.run'](cmd, python_shell=False)
def examine(device):
'''
Show detail for a specified RAID component device
CLI Example:
.. code-block:: bash
salt '*' raid.examine '/dev/sda1'
'''
res = __salt__['cmd.run_stdout']('mdadm -Y -E {0}'.format(device), output_loglevel='trace', python_shell=False)
ret = {}
for line in res.splitlines():
name, var = line.partition("=")[::2]
ret[name] = var
return ret
def add(name, device):
'''
Add new device to RAID array.
CLI Example:
.. code-block:: bash
salt '*' raid.add /dev/md0 /dev/sda1
'''
cmd = 'mdadm --manage {0} --add {1}'.format(name, device)
if __salt__['cmd.retcode'](cmd) == 0:
return True
return False

View file

@ -688,11 +688,20 @@ def file_query(database, file_name, **connection_args):
.. versionadded:: 2017.7.0
database
database to run script inside
file_name
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
CLI Example:
.. code-block:: bash
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
Return data:
@ -701,6 +710,9 @@ def file_query(database, file_name, **connection_args):
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
'''
if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')):
file_name = __salt__['cp.cache_file'](file_name)
if os.path.exists(file_name):
with salt.utils.files.fopen(file_name, 'r') as ifile:
contents = ifile.read()
@ -709,7 +721,7 @@ def file_query(database, file_name, **connection_args):
return False
query_string = ""
ret = {'rows returned': 0, 'columns': 0, 'results': 0, 'rows affected': 0, 'query time': {'raw': 0}}
ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}}
for line in contents.splitlines():
if re.match(r'--', line): # ignore sql comments
continue
@ -729,16 +741,16 @@ def file_query(database, file_name, **connection_args):
if 'rows returned' in query_result:
ret['rows returned'] += query_result['rows returned']
if 'columns' in query_result:
ret['columns'] += query_result['columns']
ret['columns'].append(query_result['columns'])
if 'results' in query_result:
ret['results'] += query_result['results']
ret['results'].append(query_result['results'])
if 'rows affected' in query_result:
ret['rows affected'] += query_result['rows affected']
ret['query time']['human'] = str(round(float(ret['query time']['raw']), 2)) + 's'
ret['query time']['raw'] = round(float(ret['query time']['raw']), 5)
# Remove empty keys in ret
ret = dict((k, v) for k, v in six.iteritems(ret) if v)
ret = {k: v for k, v in six.iteritems(ret) if v}
return ret

View file

@ -26,7 +26,7 @@ _XCCDF_MAP = {
'cmd_pattern': (
"oscap xccdf eval "
"--oval-results --results results.xml --report report.html "
"--profile {0} {1} {2}"
"--profile {0} {1}"
)
}
}
@ -73,7 +73,6 @@ def xccdf(params):
'''
params = shlex.split(params)
policy = params[-1]
del params[-1]
success = True
error = None
@ -90,7 +89,7 @@ def xccdf(params):
error = str(err)
if success:
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, " ".join(argv), policy)
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
tempdir = tempfile.mkdtemp()
proc = Popen(
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)

View file

@ -7,9 +7,11 @@ Module to provide Palo Alto compatibility to Salt.
:depends: none
:platform: unix
.. versionadded:: Oxygen
Configuration
=============
This module accepts connection configuration details either as
parameters, or as configuration settings in pillar as a Salt proxy.
Options passed into opts will be ignored if options are passed into pillar.
@ -19,6 +21,7 @@ Options passed into opts will be ignored if options are passed into pillar.
About
=====
This execution module was designed to handle connections to a Palo Alto based
firewall. This module adds support to send connections directly to the device
through the XML API or through a brokered connection to Panorama.
@ -31,8 +34,9 @@ import logging
import time
# Import Salt Libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
import salt.proxy.panos
import salt.utils.platform
log = logging.getLogger(__name__)
@ -55,11 +59,11 @@ def __virtual__():
def _get_job_results(query=None):
'''
Executes a query that requires a job for completion. This funciton will wait for the job to complete
Executes a query that requires a job for completion. This function will wait for the job to complete
and return the results.
'''
if not query:
raise salt.exception.CommandExecutionError("Query parameters cannot be empty.")
raise CommandExecutionError("Query parameters cannot be empty.")
response = __proxy__['panos.call'](query)
@ -241,10 +245,10 @@ def download_software_file(filename=None, synch=False):
'''
if not filename:
raise salt.exception.CommandExecutionError("Filename option must not be none.")
raise CommandExecutionError("Filename option must not be none.")
if not isinstance(synch, bool):
raise salt.exception.CommandExecutionError("Synch option must be boolean..")
raise CommandExecutionError("Synch option must be boolean..")
if synch is True:
query = {'type': 'op',
@ -276,10 +280,10 @@ def download_software_version(version=None, synch=False):
'''
if not version:
raise salt.exception.CommandExecutionError("Version option must not be none.")
raise CommandExecutionError("Version option must not be none.")
if not isinstance(synch, bool):
raise salt.exception.CommandExecutionError("Synch option must be boolean..")
raise CommandExecutionError("Synch option must be boolean..")
if synch is True:
query = {'type': 'op',
@ -644,7 +648,7 @@ def get_job(jid=None):
'''
if not jid:
raise salt.exception.CommandExecutionError("ID option must not be none.")
raise CommandExecutionError("ID option must not be none.")
query = {'type': 'op', 'cmd': '<show><jobs><id>{0}</id></jobs></show>'.format(jid)}
@ -675,7 +679,7 @@ def get_jobs(state='all'):
elif state.lower() == 'processed':
query = {'type': 'op', 'cmd': '<show><jobs><processed></processed></jobs></show>'}
else:
raise salt.exception.CommandExecutionError("The state parameter must be all, pending, or processed.")
raise CommandExecutionError("The state parameter must be all, pending, or processed.")
return __proxy__['panos.call'](query)
@ -1163,7 +1167,7 @@ def install_antivirus(version=None, latest=False, synch=False, skip_commit=False
'''
if not version and latest is False:
raise salt.exception.CommandExecutionError("Version option must not be none.")
raise CommandExecutionError("Version option must not be none.")
if synch is True:
s = "yes"
@ -1220,7 +1224,7 @@ def install_software(version=None):
'''
if not version:
raise salt.exception.CommandExecutionError("Version option must not be none.")
raise CommandExecutionError("Version option must not be none.")
query = {'type': 'op',
'cmd': '<request><system><software><install>'
@ -1261,7 +1265,7 @@ def refresh_fqdn_cache(force=False):
'''
if not isinstance(force, bool):
raise salt.exception.CommandExecutionError("Force option must be boolean.")
raise CommandExecutionError("Force option must be boolean.")
if force:
query = {'type': 'op',
@ -1312,7 +1316,7 @@ def resolve_address(address=None, vsys=None):
return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version)
if not address:
raise salt.exception.CommandExecutionError("FQDN to resolve must be provided as address.")
raise CommandExecutionError("FQDN to resolve must be provided as address.")
if not vsys:
query = {'type': 'op',
@ -1340,7 +1344,7 @@ def save_device_config(filename=None):
'''
if not filename:
raise salt.exception.CommandExecutionError("Filename must not be empty.")
raise CommandExecutionError("Filename must not be empty.")
query = {'type': 'op', 'cmd': '<save><config><to>{0}</to></config></save>'.format(filename)}
@ -1382,7 +1386,7 @@ def set_authentication_profile(profile=None, deploy=False):
'''
if not profile:
salt.exception.CommandExecutionError("Profile name option must not be none.")
CommandExecutionError("Profile name option must not be none.")
ret = {}
@ -1419,7 +1423,7 @@ def set_hostname(hostname=None, deploy=False):
'''
if not hostname:
salt.exception.CommandExecutionError("Hostname option must not be none.")
CommandExecutionError("Hostname option must not be none.")
ret = {}
@ -1459,7 +1463,7 @@ def set_management_icmp(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1499,7 +1503,7 @@ def set_management_http(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1539,7 +1543,7 @@ def set_management_https(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1579,7 +1583,7 @@ def set_management_ocsp(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1619,7 +1623,7 @@ def set_management_snmp(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1659,7 +1663,7 @@ def set_management_ssh(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1699,7 +1703,7 @@ def set_management_telnet(enabled=True, deploy=False):
elif enabled is False:
value = "yes"
else:
salt.exception.CommandExecutionError("Invalid option provided for service enabled option.")
CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
@ -1892,7 +1896,7 @@ def set_permitted_ip(address=None, deploy=False):
'''
if not address:
salt.exception.CommandExecutionError("Address option must not be empty.")
CommandExecutionError("Address option must not be empty.")
ret = {}
@ -1928,7 +1932,7 @@ def set_timezone(tz=None, deploy=False):
'''
if not tz:
salt.exception.CommandExecutionError("Timezone name option must not be none.")
CommandExecutionError("Timezone name option must not be none.")
ret = {}
@ -1976,7 +1980,7 @@ def unlock_admin(username=None):
'''
if not username:
raise salt.exception.CommandExecutionError("Username option must not be none.")
raise CommandExecutionError("Username option must not be none.")
query = {'type': 'op',
'cmd': '<set><management-server><unlock><admin>{0}</admin></unlock></management-server>'

View file

@ -68,9 +68,7 @@ class _Puppet(object):
self.vardir = 'C:\\ProgramData\\PuppetLabs\\puppet\\var'
self.rundir = 'C:\\ProgramData\\PuppetLabs\\puppet\\run'
self.confdir = 'C:\\ProgramData\\PuppetLabs\\puppet\\etc'
self.useshell = True
else:
self.useshell = False
self.puppet_version = __salt__['cmd.run']('puppet --version')
if 'Enterprise' in self.puppet_version:
self.vardir = '/var/opt/lib/pe-puppet'
@ -106,7 +104,10 @@ class _Puppet(object):
' --{0} {1}'.format(k, v) for k, v in six.iteritems(self.kwargs)]
)
return '{0} {1}'.format(cmd, args)
# Ensure that the puppet call will return 0 in case of exit code 2
if salt.utils.platform.is_windows():
return 'cmd /V:ON /c {0} {1} ^& if !ERRORLEVEL! EQU 2 (EXIT 0) ELSE (EXIT /B)'.format(cmd, args)
return '({0} {1}) || test $? -eq 2'.format(cmd, args)
def arguments(self, args=None):
'''
@ -169,12 +170,7 @@ def run(*args, **kwargs):
puppet.kwargs.update(salt.utils.args.clean_kwargs(**kwargs))
ret = __salt__['cmd.run_all'](repr(puppet), python_shell=puppet.useshell)
if ret['retcode'] in [0, 2]:
ret['retcode'] = 0
else:
ret['retcode'] = 1
ret = __salt__['cmd.run_all'](repr(puppet), python_shell=True)
return ret

View file

@ -27,6 +27,21 @@ Installation Prerequisites
pip install purestorage
- Configure Pure Storage FlashArray authentication. Use one of the following
three methods.
1) From the minion config
.. code-block:: yaml
pure_tags:
fa:
san_ip: management vip or hostname for the FlashArray
api_token: A valid api token for the FlashArray being managed
2) From environment (PUREFA_IP and PUREFA_API)
3) From the pillar (PUREFA_IP and PUREFA_API)
:maintainer: Simon Dodsley (simon@purestorage.com)
:maturity: new
:requires: purestorage
@ -195,7 +210,7 @@ def snap_create(name, suffix=None):
Will return False is volume selected to snap does not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume to snapshot
@ -231,7 +246,7 @@ def snap_delete(name, suffix=None, eradicate=False):
Will return False if selected snapshot does not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -273,7 +288,7 @@ def snap_eradicate(name, suffix=None):
Will retunr False is snapshot is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -306,7 +321,7 @@ def volume_create(name, size=None):
Will return False if volume already exists.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume (truncated to 63 characters)
@ -344,7 +359,7 @@ def volume_delete(name, eradicate=False):
Will return False if volume doesn't exist is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -383,7 +398,7 @@ def volume_eradicate(name):
Will return False is volume is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -413,7 +428,7 @@ def volume_extend(name, size):
Will return False if new size is less than or equal to existing size.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -451,7 +466,7 @@ def snap_volume_create(name, target, overwrite=False):
Will return False if target volume already exists and
overwrite is not specified, or selected snapshot doesn't exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume snapshot
@ -497,7 +512,7 @@ def volume_clone(name, target, overwrite=False):
Will return False if source volume doesn't exist, or
target volume already exists and overwrite not specified.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -541,7 +556,7 @@ def volume_attach(name, host):
Host and volume must exist or else will return False.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -574,7 +589,7 @@ def volume_detach(name, host):
Will return False if either host or volume do not exist, or
if selected volume isn't already connected to the host.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of volume
@ -608,7 +623,7 @@ def host_create(name, iqn=None, wwn=None):
Fibre Channel parameters are not in a valid format.
See Pure Storage FlashArray documentation.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host (truncated to 63 characters)
@ -659,7 +674,7 @@ def host_update(name, iqn=None, wwn=None):
by another host, or are not in a valid format.
See Pure Storage FlashArray documentation.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host
@ -699,7 +714,7 @@ def host_delete(name):
Will return False if the host doesn't exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of host
@ -735,7 +750,7 @@ def hg_create(name, host=None, volume=None):
Will return False if hostgroup already exists, or if
named host or volume do not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup (truncated to 63 characters)
@ -791,7 +806,7 @@ def hg_update(name, host=None, volume=None):
Will return False is hostgroup doesn't exist, or host
or volume do not exist.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -837,7 +852,7 @@ def hg_delete(name):
Will return False is hostgroup is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -875,7 +890,7 @@ def hg_remove(name, volume=None, host=None):
Will return False is hostgroup does not exist, or named host or volume are
not in the hostgroup.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup
@ -936,7 +951,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True):
hostgroups, hosts or volumes
* Named type for protection group does not exist
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1029,7 +1044,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None):
* Incorrect type selected for current protection group type
* Specified type does not exist
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1119,7 +1134,7 @@ def pg_delete(name, eradicate=False):
Will return False if protection group is already in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1156,7 +1171,7 @@ def pg_eradicate(name):
Will return False if protection group is not in a deleted state.
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of protection group
@ -1188,7 +1203,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None):
* Protection group does not exist
* Specified type is not currently associated with the protection group
.. versionadded:: 2017.7.3
.. versionadded:: Oxygen
name : string
name of hostgroup

View file

@ -851,7 +851,7 @@ def list_policies(vhost="/", runas=None):
return ret
def set_policy(vhost, name, pattern, definition, priority=None, runas=None):
def set_policy(vhost, name, pattern, definition, priority=None, apply_to=None, runas=None):
'''
Set a policy based on rabbitmqctl set_policy.
@ -874,6 +874,8 @@ def set_policy(vhost, name, pattern, definition, priority=None, runas=None):
cmd = [RABBITMQCTL, 'set_policy', '-p', vhost]
if priority:
cmd.extend(['--priority', priority])
if apply_to:
cmd.extend(['--apply-to', apply_to])
cmd.extend([name, pattern, definition])
res = __salt__['cmd.run_all'](cmd, runas=runas, python_shell=False)
log.debug('Set policy: {0}'.format(res['stdout']))

View file

@ -375,8 +375,10 @@ def list_semod():
def _validate_filetype(filetype):
'''
Checks if the given filetype is a valid SELinux filetype specification.
Throws an SaltInvocationError if it isn't.
.. versionadded:: 2017.7.0
Checks if the given filetype is a valid SELinux filetype
specification. Throws an SaltInvocationError if it isn't.
'''
if filetype not in _SELINUX_FILETYPES.keys():
raise SaltInvocationError('Invalid filetype given: {0}'.format(filetype))
@ -385,6 +387,8 @@ def _validate_filetype(filetype):
def _context_dict_to_string(context):
'''
.. versionadded:: 2017.7.0
Converts an SELinux file context from a dict to a string.
'''
return '{sel_user}:{sel_role}:{sel_type}:{sel_level}'.format(**context)
@ -392,6 +396,8 @@ def _context_dict_to_string(context):
def _context_string_to_dict(context):
'''
.. versionadded:: 2017.7.0
Converts an SELinux file context from string to dict.
'''
if not re.match('[^:]+:[^:]+:[^:]+:[^:]+$', context):
@ -406,8 +412,11 @@ def _context_string_to_dict(context):
def filetype_id_to_string(filetype='a'):
'''
Translates SELinux filetype single-letter representation
to a more human-readable version (which is also used in `semanage fcontext -l`).
.. versionadded:: 2017.7.0
Translates SELinux filetype single-letter representation to a more
human-readable version (which is also used in `semanage fcontext
-l`).
'''
_validate_filetype(filetype)
return _SELINUX_FILETYPES.get(filetype, 'error')
@ -415,20 +424,27 @@ def filetype_id_to_string(filetype='a'):
def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
Returns the current entry in the SELinux policy list as a dictionary.
Returns None if no exact match was found
.. versionadded:: 2017.7.0
Returns the current entry in the SELinux policy list as a
dictionary. Returns None if no exact match was found.
Returned keys are:
- filespec (the name supplied and matched)
- filetype (the descriptive name of the filetype supplied)
- sel_user, sel_role, sel_type, sel_level (the selinux context)
* filespec (the name supplied and matched)
* filetype (the descriptive name of the filetype supplied)
* sel_user, sel_role, sel_type, sel_level (the selinux context)
For a more in-depth explanation of the selinux context, go to
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Security-Enhanced_Linux/chap-Security-Enhanced_Linux-SELinux_Contexts.html
name: filespec of the file or directory. Regex syntax is allowed.
filetype: The SELinux filetype specification.
Use one of [a, f, d, c, b, s, l, p].
See also `man semanage-fcontext`.
Defaults to 'a' (all files)
name
filespec of the file or directory. Regex syntax is allowed.
filetype
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
(all files).
CLI Example:
@ -448,7 +464,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype)
cmd = 'semanage fcontext -l | egrep ' + \
"'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs)
current_entry_text = __salt__['cmd.shell'](cmd)
current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True)
if current_entry_text == '':
return None
ret = {}
@ -461,20 +477,34 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
'''
Sets or deletes the SELinux policy for a given filespec and other optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new one for a given
filespec and filetype, as adding one with semanage automatically overwrites a
previously configured SELinux context.
.. versionadded:: 2017.7.0
name: filespec of the file or directory. Regex syntax is allowed.
file_type: The SELinux filetype specification.
Use one of [a, f, d, c, b, s, l, p].
See also ``man semanage-fcontext``.
Defaults to 'a' (all files)
sel_type: SELinux context type. There are many.
sel_user: SELinux user. Use ``semanage login -l`` to determine which ones are available to you
sel_level: The MLS range of the SELinux context.
Sets or deletes the SELinux policy for a given filespec and other
optional parameters.
Returns the result of the call to semanage.
Note that you don't have to remove an entry before setting a new
one for a given filespec and filetype, as adding one with semanage
automatically overwrites a previously configured SELinux context.
name
filespec of the file or directory. Regex syntax is allowed.
file_type
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
(all files).
sel_type
SELinux context type. There are many.
sel_user
SELinux user. Use ``semanage login -l`` to determine which ones
are available to you.
sel_level
The MLS range of the SELinux context.
CLI Example:
@ -500,10 +530,14 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
def fcontext_policy_is_applied(name, recursive=False):
'''
Returns an empty string if the SELinux policy for a given filespec is applied,
returns string with differences in policy and actual situation otherwise.
.. versionadded:: 2017.7.0
name: filespec of the file or directory. Regex syntax is allowed.
Returns an empty string if the SELinux policy for a given filespec
is applied, returns string with differences in policy and actual
situation otherwise.
name
filespec of the file or directory. Regex syntax is allowed.
CLI Example:
@ -520,11 +554,17 @@ def fcontext_policy_is_applied(name, recursive=False):
def fcontext_apply_policy(name, recursive=False):
'''
Applies SElinux policies to filespec using `restorecon [-R] filespec`.
Returns dict with changes if succesful, the output of the restorecon command otherwise.
.. versionadded:: 2017.7.0
name: filespec of the file or directory. Regex syntax is allowed.
recursive: Recursively apply SELinux policies.
Applies SElinux policies to filespec using `restorecon [-R]
filespec`. Returns dict with changes if succesful, the output of
the restorecon command otherwise.
name
filespec of the file or directory. Regex syntax is allowed.
recursive
Recursively apply SELinux policies.
CLI Example:

View file

@ -132,7 +132,7 @@ def procs():
uind = 0
pind = 0
cind = 0
plines = __salt__['cmd.run'](__grains__['ps']).splitlines()
plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines()
guide = plines.pop(0).split()
if 'USER' in guide:
uind = guide.index('USER')
@ -1417,7 +1417,7 @@ def pid(sig):
'''
cmd = __grains__['ps']
output = __salt__['cmd.run_stdout'](cmd)
output = __salt__['cmd.run_stdout'](cmd, python_shell=True)
pids = ''
for line in output.splitlines():

29
salt/modules/vcenter.py Normal file
View file

@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
'''
Module used to access the vcenter proxy connection methods
'''
from __future__ import absolute_import
# Import python libs
import logging
import salt.utils
log = logging.getLogger(__name__)
__proxyenabled__ = ['vcenter']
# Define the module's virtual name
__virtualname__ = 'vcenter'
def __virtual__():
'''
Only work on proxy
'''
if salt.utils.is_proxy():
return __virtualname__
return False
def get_details():
return __proxy__['vcenter.get_details']()

File diff suppressed because it is too large Load diff

View file

@ -1280,10 +1280,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
arguments = ['/i', cached_pkg]
if pkginfo[version_num].get('allusers', True):
arguments.append('ALLUSERS="1"')
arguments.extend(salt.utils.shlex_split(install_flags))
arguments.extend(salt.utils.shlex_split(install_flags, posix=False))
else:
cmd = cached_pkg
arguments = salt.utils.shlex_split(install_flags)
arguments = salt.utils.shlex_split(install_flags, posix=False)
# Install the software
# Check Use Scheduler Option
@ -1356,7 +1356,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# Launch the command
result = __salt__['cmd.run_all'](cmd,
cache_path,
output_loglevel='quiet',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:
@ -1615,19 +1614,19 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
#Compute msiexec string
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
# Build cmd and arguments
# cmd and arguments must be separated for use with the task scheduler
if use_msiexec:
cmd = msiexec
arguments = ['/x']
arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False))
else:
cmd = expanded_cached_pkg
arguments = salt.utils.shlex_split(uninstall_flags, posix=False)
# Uninstall the software
# Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False):
# Build Scheduled Task Parameters
if use_msiexec:
cmd = msiexec
arguments = ['/x']
arguments.extend(salt.utils.args.shlex_split(uninstall_flags))
else:
cmd = expanded_cached_pkg
arguments = salt.utils.args.shlex_split(uninstall_flags)
# Create Scheduled Task
__salt__['task.create_task'](name='update-salt-software',
user_name='System',
@ -1648,16 +1647,12 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
ret[pkgname] = {'uninstall status': 'failed'}
else:
# Build the install command
cmd = []
if use_msiexec:
cmd.extend([msiexec, '/x', expanded_cached_pkg])
else:
cmd.append(expanded_cached_pkg)
cmd.extend(salt.utils.args.shlex_split(uninstall_flags))
cmd = [cmd]
cmd.extend(arguments)
# Launch the command
result = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if not result['retcode']:

View file

@ -110,7 +110,7 @@ def available(software=True,
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
Include driver updates in the results (default is True)
summary (bool):
- True: Return a summary of updates available for each category.

View file

@ -1347,6 +1347,7 @@ def install(name=None,
to_install = []
to_downgrade = []
to_reinstall = []
_available = {}
# The above three lists will be populated with tuples containing the
# package name and the string being used for this particular package
# modification. The reason for this method is that the string we use for

View file

@ -77,6 +77,9 @@ def __virtual__():
) == 0:
return 'zfs'
if __grains__['kernel'] == 'OpenBSD':
return False
_zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse')
if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')):
return 'zfs'

View file

@ -16,30 +16,30 @@ state_verbose:
instruct the highstate outputter to omit displaying anything in green, this
means that nothing with a result of True and no changes will not be printed
state_output:
The highstate outputter has six output modes, ``full``, ``terse``,
``mixed``, ``mixed_id``, ``changes`` and ``filter``.
The highstate outputter has six output modes,
``full``, ``terse``, ``mixed``, ``changes`` and ``filter``
* The default is set to ``full``, which will display many lines of detailed
information for each executed chunk.
* If ``terse`` is used, then the output is greatly simplified and shown in
only one line.
* If ``mixed`` is used, then terse output will be used unless a state
failed, in which case full output will be used.
* If ``mixed_id`` is used, then the mixed form will be used, but the value for ``name``
will be drawn from the state ID. This is useful for cases where the name
value might be very long and hard to read.
* If ``changes`` is used, then terse output will be used if there was no
error and no changes, otherwise full output will be used.
* If ``filter`` is used, then either or both of two different filters can be
used: ``exclude`` or ``terse``.
* for ``exclude``, state.highstate expects a list of states to be excluded
(or ``None``)
followed by ``True`` for terse output or ``False`` for regular output.
Because of parsing nuances, if only one of these is used, it must still
contain a comma. For instance: `exclude=True,`.
* for ``terse``, state.highstate expects simply ``True`` or ``False``.
* for ``exclude``, state.highstate expects a list of states to be excluded (or ``None``)
followed by ``True`` for terse output or ``False`` for regular output.
Because of parsing nuances, if only one of these is used, it must still
contain a comma. For instance: `exclude=True,`.
* for ``terse``, state.highstate expects simply ``True`` or ``False``.
These can be set as such from the command line, or in the Salt config as
`state_output_exclude` or `state_output_terse`, respectively.
The output modes have one modifier:
``full_id``, ``terse_id``, ``mixed_id``, ``changes_id`` and ``filter_id``
If ``_id`` is used, then the corresponding form will be used, but the value for ``name``
will be drawn from the state ID. This is useful for cases where the name
value might be very long and hard to read.
state_tabular:
If `state_output` uses the terse output, set this to `True` for an aligned
output format. If you wish to use a custom format, this can be set to a
@ -246,7 +246,7 @@ def _format_host(host, data):
state_output = __opts__.get('state_output', 'full').lower()
comps = [sdecode(comp) for comp in tname.split('_|-')]
if state_output == 'mixed_id':
if state_output.endswith('_id'):
# Swap in the ID for the name. Refs #35137
comps[2] = comps[1]

View file

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
'''
Add all extra minion data to the pillar.
:codeauthor: Alexandru.Bleotu@morganstanley.ms.com
One can filter on the keys to include in the pillar by using the ``include``
parameter. For subkeys the ':' notation is supported (i.e. 'key:subkey')
The keyword ``<all>`` includes all keys.
Complete example in etc/salt/master
=====================================
.. code-block:: yaml
ext_pillar:
- extra_minion_data_in_pillar:
include: *
ext_pillar:
- extra_minion_data_in_pillar:
include:
- key1
- key2:subkey2
ext_pillar:
- extra_minion_data_in_pillar:
include: <all>
'''
from __future__ import absolute_import
import logging
# Set up logging
log = logging.getLogger(__name__)
__virtualname__ = 'extra_minion_data_in_pillar'
def __virtual__():
return __virtualname__
def ext_pillar(minion_id, pillar, include, extra_minion_data=None):
def get_subtree(key, source_dict):
'''
Returns a subtree corresponfing to the specified key.
key
Key. Supports the ':' notation (e.g. 'key:subkey')
source_dict
Source dictionary
'''
ret_dict = aux_dict = {}
subtree = source_dict
subkeys = key.split(':')
# Build an empty intermediate subtree following the subkeys
for subkey in subkeys[:-1]:
# The result will be built in aux_dict
aux_dict[subkey] = {}
aux_dict = aux_dict[subkey]
if subkey not in subtree:
# The subkey is not in
return {}
subtree = subtree[subkey]
if subkeys[-1] not in subtree:
# Final subkey is not in subtree
return {}
# Assign the subtree value to the result
aux_dict[subkeys[-1]] = subtree[subkeys[-1]]
return ret_dict
log.trace('minion_id = {0}'.format(minion_id))
log.trace('include = {0}'.format(include))
log.trace('extra_minion_data = {0}'.format(extra_minion_data))
data = {}
if not extra_minion_data:
return {}
if include in ['*', '<all>']:
return extra_minion_data
data = {}
for key in include:
data.update(get_subtree(key, extra_minion_data))
return data

View file

@ -343,14 +343,15 @@ def ext_pillar(minion_id,
if minion_id in match:
ngroup_dir = os.path.join(
nodegroups_dir, str(nodegroup))
ngroup_pillar.update(
ngroup_pillar = salt.utils.dictupdate.merge(ngroup_pillar,
_construct_pillar(ngroup_dir,
follow_dir_links,
keep_newline,
render_default,
renderer_blacklist,
renderer_whitelist,
template)
template),
strategy='recurse'
)
else:
if debug is True:

View file

@ -374,20 +374,20 @@ def __virtual__():
return False
def ext_pillar(minion_id, repo):
def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument
'''
Checkout the ext_pillar sources and compile the resulting pillar SLS
'''
opts = copy.deepcopy(__opts__)
opts['pillar_roots'] = {}
opts['__git_pillar'] = True
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
git_pillar = salt.utils.gitfs.GitPillar(opts)
git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
if __opts__.get('__role') == 'minion':
# If masterless, fetch the remotes. We'll need to remove this once
# we make the minion daemon able to run standalone.
pillar.fetch_remotes()
pillar.checkout()
git_pillar.fetch_remotes()
git_pillar.checkout()
ret = {}
merge_strategy = __opts__.get(
'pillar_source_merging_strategy',
@ -397,7 +397,14 @@ def ext_pillar(minion_id, repo):
'pillar_merge_lists',
False
)
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs):
# Map env if env == '__env__' before checking the env value
if env == '__env__':
env = opts.get('pillarenv') \
or opts.get('environment') \
or opts.get('git_pillar_base')
log.debug('__env__ maps to %s', env)
# If pillarenv is set, only grab pillars with that match pillarenv
if opts['pillarenv'] and env != opts['pillarenv']:
log.debug(
@ -406,7 +413,7 @@ def ext_pillar(minion_id, repo):
env, pillar_dir, opts['pillarenv']
)
continue
if pillar_dir in pillar.pillar_linked_dirs:
if pillar_dir in git_pillar.pillar_linked_dirs:
log.debug(
'git_pillar is skipping processing on %s as it is a '
'mounted repo', pillar_dir
@ -418,12 +425,6 @@ def ext_pillar(minion_id, repo):
'env \'%s\'', pillar_dir, env
)
if env == '__env__':
env = opts.get('pillarenv') \
or opts.get('environment') \
or opts.get('git_pillar_base')
log.debug('__env__ maps to %s', env)
pillar_roots = [pillar_dir]
if __opts__['git_pillar_includes']:
@ -433,7 +434,7 @@ def ext_pillar(minion_id, repo):
# list, so that its top file is sourced from the correct
# location and not from another git_pillar remote.
pillar_roots.extend(
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
[d for (d, e) in six.iteritems(git_pillar.pillar_dirs)
if env == e and d != pillar_dir]
)

View file

@ -90,7 +90,8 @@ class POSTGRESExtPillar(SqlBaseExtPillar):
conn = psycopg2.connect(host=_options['host'],
user=_options['user'],
password=_options['pass'],
dbname=_options['db'])
dbname=_options['db'],
port=_options['port'])
cursor = conn.cursor()
try:
yield cursor

View file

@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
'''
Provide external pillar data from RethinkDB
.. versionadded:: Oxygen
:depends: rethinkdb (on the salt-master)
salt master rethinkdb configuration
===================================
These variables must be configured in your master configuration file.
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
Defaults to ``'28015'``
* ``rethinkdb.database`` - The database to connect to.
Defaults to ``'salt'``
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
Defaults to ``''``
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
Defaults to ``''``
salt-master ext_pillar configuration
====================================
The ext_pillar function arguments are given in single line dictionary notation.
.. code-block:: yaml
ext_pillar:
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
In the example above the following happens.
* The salt-master will look for external pillars in the 'ext_pillar' table
on the RethinkDB host
* The minion id will be matched against the 'minion_id' field
* Pillars will be retrieved from the nested field 'pillar_root'
* Found pillars will be merged inside a key called 'external_pillar'
Module Documentation
====================
'''
from __future__ import absolute_import
# Import python libraries
import logging
# Import 3rd party libraries
try:
import rethinkdb
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
__virtualname__ = 'rethinkdb'
__opts__ = {
'rethinkdb.host': 'salt',
'rethinkdb.port': '28015',
'rethinkdb.database': 'salt',
'rethinkdb.username': None,
'rethinkdb.password': None
}
def __virtual__():
if not HAS_RETHINKDB:
return False
return True
# Configure logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id,
pillar,
table='pillar',
id_field=None,
field=None,
pillar_key=None):
'''
Collect minion external pillars from a RethinkDB database
Arguments:
* `table`: The RethinkDB table containing external pillar information.
Defaults to ``'pillar'``
* `id_field`: Field in document containing the minion id.
If blank then we assume the table index matches minion ids
* `field`: Specific field in the document used for pillar data, if blank
then the entire document will be used
* `pillar_key`: The salt-master will nest found external pillars under
this key before merging into the minion pillars. If blank, external
pillars will be merged at top level
'''
host = __opts__['rethinkdb.host']
port = __opts__['rethinkdb.port']
database = __opts__['rethinkdb.database']
username = __opts__['rethinkdb.username']
password = __opts__['rethinkdb.password']
log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar'
.format(host, port, username))
# Connect to the database
conn = rethinkdb.connect(host=host,
port=port,
db=database,
user=username,
password=password)
data = None
try:
if id_field:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: {0}, field: {1}, minion: {2}'.format(
table, id_field, minion_id))
if field:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).pluck(field).run(conn)
else:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).run(conn)
else:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: {0}, field: id, minion: {1}'.format(
table, minion_id))
if field:
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
conn)
else:
data = rethinkdb.table(table).get(minion_id).run(conn)
finally:
if conn.is_open():
conn.close()
if data.items:
# Return nothing if multiple documents are found for a minion
if len(data.items) > 1:
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
'minion {0}'.format(minion_id))
return {}
else:
result = data.items.pop()
if pillar_key:
return {pillar_key: result}
return result
else:
# No document found in the database
log.debug('ext_pillar.rethinkdb: no document found')
return {}

62
salt/pillar/saltclass.py Normal file
View file

@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
'''
SaltClass Pillar Module
.. code-block:: yaml
ext_pillar:
- saltclass:
- path: /srv/saltclass
'''
# import python libs
from __future__ import absolute_import
import salt.utils.saltclass as sc
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
This module has no external dependencies
'''
return True
def ext_pillar(minion_id, pillar, *args, **kwargs):
'''
Node definitions path will be retrieved from args - or set to default -
then added to 'salt_data' dict that is passed to the 'get_pillars' function.
'salt_data' dict is a convenient way to pass all the required datas to the function
It contains:
- __opts__
- __salt__
- __grains__
- __pillar__
- minion_id
- path
If successfull the function will return a pillar dict for minion_id
'''
# If path has not been set, make a default
for i in args:
if 'path' not in i:
path = '/srv/saltclass'
args[i]['path'] = path
log.warning('path variable unset, using default: {0}'.format(path))
else:
path = i['path']
# Create a dict that will contain our salt dicts to pass it to reclass
salt_data = {
'__opts__': __opts__,
'__salt__': __salt__,
'__grains__': __grains__,
'__pillar__': pillar,
'minion_id': minion_id,
'path': path
}
return sc.get_pillars(minion_id, salt_data)

View file

@ -273,13 +273,22 @@ for standing up an ESXi host from scratch.
# Import Python Libs
from __future__ import absolute_import
import logging
import os
# Import Salt Libs
from salt.exceptions import SaltSystemExit
from salt.exceptions import SaltSystemExit, InvalidConfigError
from salt.config.schemas.esxi import EsxiProxySchema
from salt.utils.dictupdate import merge
# This must be present or the Salt loader won't load this module.
__proxyenabled__ = ['esxi']
# External libraries
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
@ -288,7 +297,6 @@ DETAILS = {}
# Set up logging
log = logging.getLogger(__file__)
# Define the module's virtual name
__virtualname__ = 'esxi'
@ -297,7 +305,7 @@ def __virtual__():
'''
Only load if the ESXi execution module is available.
'''
if 'vsphere.system_info' in __salt__:
if HAS_JSONSCHEMA:
return __virtualname__
return False, 'The ESXi Proxy Minion module did not load.'
@ -309,32 +317,104 @@ def init(opts):
ESXi devices, the host, login credentials, and, if configured,
the protocol and port are cached.
'''
if 'host' not in opts['proxy']:
log.critical('No \'host\' key found in pillar for this proxy.')
return False
if 'username' not in opts['proxy']:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'passwords' not in opts['proxy']:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
host = opts['proxy']['host']
# Get the correct login details
log.debug('Initting esxi proxy module in process \'{}\''
''.format(os.getpid()))
log.debug('Validating esxi proxy input')
schema = EsxiProxySchema.serialize()
log.trace('esxi_proxy_schema = {}'.format(schema))
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
log.trace('proxy_conf = {0}'.format(proxy_conf))
try:
username, password = find_credentials(host)
except SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
jsonschema.validate(proxy_conf, schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
# Set configuration details
DETAILS['host'] = host
DETAILS['username'] = username
DETAILS['password'] = password
DETAILS['protocol'] = opts['proxy'].get('protocol', 'https')
DETAILS['port'] = opts['proxy'].get('port', '443')
DETAILS['credstore'] = opts['proxy'].get('credstore')
DETAILS['proxytype'] = proxy_conf['proxytype']
if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf):
log.critical('Neither \'host\' nor \'vcenter\' keys found in pillar '
'for this proxy.')
return False
if 'host' in proxy_conf:
# We have started the proxy by connecting directly to the host
if 'username' not in proxy_conf:
log.critical('No \'username\' key found in pillar for this proxy.')
return False
if 'passwords' not in proxy_conf:
log.critical('No \'passwords\' key found in pillar for this proxy.')
return False
host = proxy_conf['host']
# Get the correct login details
try:
username, password = find_credentials(host)
except SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
# Set configuration details
DETAILS['host'] = host
DETAILS['username'] = username
DETAILS['password'] = password
DETAILS['protocol'] = proxy_conf.get('protocol')
DETAILS['port'] = proxy_conf.get('port')
return True
if 'vcenter' in proxy_conf:
vcenter = proxy_conf['vcenter']
if not proxy_conf.get('esxi_host'):
log.critical('No \'esxi_host\' key found in pillar for this proxy.')
DETAILS['esxi_host'] = proxy_conf['esxi_host']
# We have started the proxy by connecting via the vCenter
if 'mechanism' not in proxy_conf:
log.critical('No \'mechanism\' key found in pillar for this proxy.')
return False
mechanism = proxy_conf['mechanism']
# Save mandatory fields in cache
for key in ('vcenter', 'mechanism'):
DETAILS[key] = proxy_conf[key]
if mechanism == 'userpass':
if 'username' not in proxy_conf:
log.critical('No \'username\' key found in pillar for this '
'proxy.')
return False
if 'passwords' not in proxy_conf and \
len(proxy_conf['passwords']) > 0:
log.critical('Mechanism is set to \'userpass\' , but no '
'\'passwords\' key found in pillar for this '
'proxy.')
return False
for key in ('username', 'passwords'):
DETAILS[key] = proxy_conf[key]
elif mechanism == 'sspi':
if 'domain' not in proxy_conf:
log.critical('Mechanism is set to \'sspi\' , but no '
'\'domain\' key found in pillar for this proxy.')
return False
if 'principal' not in proxy_conf:
log.critical('Mechanism is set to \'sspi\' , but no '
'\'principal\' key found in pillar for this '
'proxy.')
return False
for key in ('domain', 'principal'):
DETAILS[key] = proxy_conf[key]
if mechanism == 'userpass':
# Get the correct login details
log.debug('Retrieving credentials and testing vCenter connection'
' for mehchanism \'userpass\'')
try:
username, password = find_credentials(DETAILS['vcenter'])
DETAILS['password'] = password
except SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
# Save optional
DETAILS['protocol'] = proxy_conf.get('protocol', 'https')
DETAILS['port'] = proxy_conf.get('port', '443')
DETAILS['credstore'] = proxy_conf.get('credstore')
def grains():
@ -358,8 +438,9 @@ def grains_refresh():
def ping():
'''
Check to see if the host is responding. Returns False if the host didn't
respond, True otherwise.
Returns True if connection is to be done via a vCenter (no connection is attempted).
Check to see if the host is responding when connecting directly via an ESXi
host.
CLI Example:
@ -367,15 +448,19 @@ def ping():
salt esxi-host test.ping
'''
# find_credentials(DETAILS['host'])
try:
__salt__['vsphere.system_info'](host=DETAILS['host'],
username=DETAILS['username'],
password=DETAILS['password'])
except SaltSystemExit as err:
log.warning(err)
return False
if DETAILS.get('esxi_host'):
return True
else:
# TODO Check connection if mechanism is SSPI
if DETAILS['mechanism'] == 'userpass':
find_credentials(DETAILS['host'])
try:
__salt__['vsphere.system_info'](host=DETAILS['host'],
username=DETAILS['username'],
password=DETAILS['password'])
except SaltSystemExit as err:
log.warning(err)
return False
return True
@ -461,3 +546,14 @@ def _grains(host, protocol=None, port=None):
port=port)
GRAINS_CACHE.update(ret)
return GRAINS_CACHE
def is_connected_via_vcenter():
return True if 'vcenter' in DETAILS else False
def get_details():
'''
Return the proxy details
'''
return DETAILS

338
salt/proxy/vcenter.py Normal file
View file

@ -0,0 +1,338 @@
# -*- coding: utf-8 -*-
'''
Proxy Minion interface module for managing VMWare vCenters.
:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)`
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
Dependencies
============
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
Module was developed against.
Configuration
=============
To use this proxy module, please use on of the following configurations:
.. code-block:: yaml
proxy:
proxytype: vcenter
vcenter: <ip or dns name of parent vcenter>
username: <vCenter username>
mechanism: userpass
passwords:
- first_password
- second_password
- third_password
proxy:
proxytype: vcenter
vcenter: <ip or dns name of parent vcenter>
username: <vCenter username>
domain: <user domain>
mechanism: sspi
principal: <host kerberos principal>
proxytype
^^^^^^^^^
The ``proxytype`` key and value pair is critical, as it tells Salt which
interface to load from the ``proxy`` directory in Salt's install hierarchy,
or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your
own proxy module, for example). To use this Proxy Module, set this to
``vcenter``.
vcenter
^^^^^^^
The location of the VMware vCenter server (host of ip). Required
username
^^^^^^^^
The username used to login to the vcenter, such as ``root``.
Required only for userpass.
mechanism
^^^^^^^^
The mechanism used to connect to the vCenter server. Supported values are
``userpass`` and ``sspi``. Required.
passwords
^^^^^^^^^
A list of passwords to be used to try and login to the vCenter server. At least
one password in this list is required if mechanism is ``userpass``
The proxy integration will try the passwords listed in order.
domain
^^^^^^
User domain. Required if mechanism is ``sspi``
principal
^^^^^^^^
Kerberos principal. Rquired if mechanism is ``sspi``
protocol
^^^^^^^^
If the vCenter is not using the default protocol, set this value to an
alternate protocol. Default is ``https``.
port
^^^^
If the ESXi host is not using the default port, set this value to an
alternate port. Default is ``443``.
Salt Proxy
----------
After your pillar is in place, you can test the proxy. The proxy can run on
any machine that has network connectivity to your Salt Master and to the
vCenter server in the pillar. SaltStack recommends that the machine running the
salt-proxy process also run a regular minion, though it is not strictly
necessary.
On the machine that will run the proxy, make sure there is an ``/etc/salt/proxy``
file with at least the following in it:
.. code-block:: yaml
master: <ip or hostname of salt-master>
You can then start the salt-proxy process with:
.. code-block:: bash
salt-proxy --proxyid <id of the cluster>
You may want to add ``-l debug`` to run the above in the foreground in
debug mode just to make sure everything is OK.
Next, accept the key for the proxy on your salt-master, just like you
would for a regular minion:
.. code-block:: bash
salt-key -a <id you gave the vcenter host>
You can confirm that the pillar data is in place for the proxy:
.. code-block:: bash
salt <id> pillar.items
And now you should be able to ping the ESXi host to make sure it is
responding:
.. code-block:: bash
salt <id> test.ping
At this point you can execute one-off commands against the vcenter. For
example, you can get if the proxy can actually connect to the vCenter:
.. code-block:: bash
salt <id> vsphere.test_vcenter_connection
Note that you don't need to provide credentials or an ip/hostname. Salt
knows to use the credentials you stored in Pillar.
It's important to understand how this particular proxy works.
:mod:`Salt.modules.vsphere </ref/modules/all/salt.modules.vsphere>` is a
standard Salt execution module.
If you pull up the docs for it you'll see
that almost every function in the module takes credentials and a targets either
a vcenter or a host. When credentials and a host aren't passed, Salt runs commands
through ``pyVmomi`` against the local machine. If you wanted, you could run
functions from this module on any host where an appropriate version of
``pyVmomi`` is installed, and that host would reach out over the network
and communicate with the ESXi host.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
# Import Salt Libs
import salt.exceptions
from salt.config.schemas.vcenter import VCenterProxySchema
from salt.utils.dictupdate import merge
# This must be present or the Salt loader won't load this module.
__proxyenabled__ = ['vcenter']
# External libraries
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
# Variables are scoped to this module so we can have persistent data
# across calls to fns in here.
DETAILS = {}
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'vcenter'
def __virtual__():
'''
Only load if the vsphere execution module is available.
'''
if HAS_JSONSCHEMA:
return __virtualname__
return False, 'The vcenter proxy module did not load.'
def init(opts):
'''
This function gets called when the proxy starts up.
For login the protocol and port are cached.
'''
log.info('Initting vcenter proxy module in process {0}'
''.format(os.getpid()))
log.trace('VCenter Proxy Validating vcenter proxy input')
schema = VCenterProxySchema.serialize()
log.trace('schema = {}'.format(schema))
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
log.trace('proxy_conf = {0}'.format(proxy_conf))
try:
jsonschema.validate(proxy_conf, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidConfigError(exc)
# Save mandatory fields in cache
for key in ('vcenter', 'mechanism'):
DETAILS[key] = proxy_conf[key]
# Additional validation
if DETAILS['mechanism'] == 'userpass':
if 'username' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\' , but no '
'\'username\' key found in proxy config')
if 'passwords' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\' , but no '
'\'passwords\' key found in proxy config')
for key in ('username', 'passwords'):
DETAILS[key] = proxy_conf[key]
else:
if 'domain' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\' , but no '
'\'domain\' key found in proxy config')
if 'principal' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\' , but no '
'\'principal\' key found in proxy config')
for key in ('domain', 'principal'):
DETAILS[key] = proxy_conf[key]
# Save optional
DETAILS['protocol'] = proxy_conf.get('protocol')
DETAILS['port'] = proxy_conf.get('port')
# Test connection
if DETAILS['mechanism'] == 'userpass':
# Get the correct login details
log.info('Retrieving credentials and testing vCenter connection for '
'mehchanism \'userpass\'')
try:
username, password = find_credentials()
DETAILS['password'] = password
except salt.exceptions.SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
return True
def ping():
'''
Returns True.
CLI Example:
.. code-block:: bash
salt vcenter test.ping
'''
return True
def shutdown():
'''
Shutdown the connection to the proxy device. For this proxy,
shutdown is a no-op.
'''
log.debug('VCenter proxy shutdown() called...')
def find_credentials():
'''
Cycle through all the possible credentials and return the first one that
works.
'''
# if the username and password were already found don't fo though the
# connection process again
if 'username' in DETAILS and 'password' in DETAILS:
return DETAILS['username'], DETAILS['password']
passwords = __pillar__['proxy']['passwords']
for password in passwords:
DETAILS['password'] = password
if not __salt__['vsphere.test_vcenter_connection']():
# We are unable to authenticate
continue
# If we have data returned from above, we've successfully authenticated.
return DETAILS['username'], password
# We've reached the end of the list without successfully authenticating.
raise salt.exceptions.VMwareConnectionError('Cannot complete login due to '
'incorrect credentials.')
def get_details():
'''
Function that returns the cached details
'''
return DETAILS

View file

@ -6,9 +6,10 @@ Module for sending messages to Mattermost
:configuration: This module can be used by either passing an api_url and hook
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
master/minion config. For example:
.. code-block:: yaml
mattermost:
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_url: https://example.com

View file

@ -77,10 +77,25 @@ def serialize(obj, **options):
raise SerializationError(error)
class EncryptedString(str):
yaml_tag = u'!encrypted'
@staticmethod
def yaml_constructor(loader, tag, node):
return EncryptedString(loader.construct_scalar(node))
@staticmethod
def yaml_dumper(dumper, data):
return dumper.represent_scalar(EncryptedString.yaml_tag, data.__str__())
class Loader(BaseLoader): # pylint: disable=W0232
'''Overwrites Loader as not for pollute legacy Loader'''
pass
Loader.add_multi_constructor(EncryptedString.yaml_tag, EncryptedString.yaml_constructor)
Loader.add_multi_constructor('tag:yaml.org,2002:null', Loader.construct_yaml_null)
Loader.add_multi_constructor('tag:yaml.org,2002:bool', Loader.construct_yaml_bool)
Loader.add_multi_constructor('tag:yaml.org,2002:int', Loader.construct_yaml_int)
@ -100,6 +115,7 @@ class Dumper(BaseDumper): # pylint: disable=W0232
'''Overwrites Dumper as not for pollute legacy Dumper'''
pass
Dumper.add_multi_representer(EncryptedString, EncryptedString.yaml_dumper)
Dumper.add_multi_representer(type(None), Dumper.represent_none)
Dumper.add_multi_representer(str, Dumper.represent_str)
if six.PY2:

View file

@ -414,7 +414,7 @@ def extracted(name,
.. versionadded:: 2017.7.3
keep : True
Same as ``keep_source``.
Same as ``keep_source``, kept for backward-compatibility.
.. note::
If both ``keep_source`` and ``keep`` are used, ``keep`` will be
@ -648,6 +648,21 @@ def extracted(name,
# Remove pub kwargs as they're irrelevant here.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '
'do the same thing, \'keep\' was ignored.'
)
keep_source = bool(kwargs.pop('keep_source'))
kwargs.pop('keep')
elif 'keep_source' in kwargs:
keep_source = bool(kwargs.pop('keep_source'))
elif 'keep' in kwargs:
keep_source = bool(kwargs.pop('keep'))
else:
# Neither was passed, default is True
keep_source = True
if 'keep_source' in kwargs and 'keep' in kwargs:
ret.setdefault('warnings', []).append(
'Both \'keep_source\' and \'keep\' were used. Since these both '

View file

@ -697,7 +697,10 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N
changed = {}
for items in parameters:
for k, value in items.items():
params[k] = value
if type(value) is bool:
params[k] = 'on' if value else 'off'
else:
params[k] = str(value)
logging.debug('Parameters from user are : {0}.'.format(params))
options = __salt__['boto_rds.describe_parameters'](name=name, region=region, key=key, keyid=keyid, profile=profile)
if not options.get('result'):
@ -705,8 +708,8 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N
ret['comment'] = os.linesep.join([ret['comment'], 'Faled to get parameters for group {0}.'.format(name)])
return ret
for parameter in options['parameters'].values():
if parameter['ParameterName'] in params and str(params.get(parameter['ParameterName'])) != str(parameter['ParameterValue']):
logging.debug('Values that are being compared are {0}:{1} .'.format(params.get(parameter['ParameterName']), parameter['ParameterValue']))
if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != str(parameter['ParameterValue']):
logging.debug('Values that are being compared for {0} are {1}:{2} .'.format(parameter['ParameterName'], params.get(parameter['ParameterName']), parameter['ParameterValue']))
changed[parameter['ParameterName']] = params.get(parameter['ParameterName'])
if len(changed) > 0:
if __opts__['test']:
@ -715,9 +718,9 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N
return ret
update = __salt__['boto_rds.update_parameter_group'](name, parameters=changed, apply_method=apply_method, tags=tags, region=region,
key=key, keyid=keyid, profile=profile)
if not update:
if 'error' in update:
ret['result'] = False
ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}.'.format(changed, name)])
ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}:'.format(changed, name), update['error']['message']])
return ret
ret['changes']['Parameters'] = changed
ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are changed.'.format(changed, name)])

View file

@ -97,29 +97,61 @@ def installed(name, version=None, source=None, force=False, pre_versions=False,
ret['changes'] = {name: 'Version {0} will be installed'
''.format(version)}
else:
ret['changes'] = {name: 'Will be installed'}
ret['changes'] = {name: 'Latest version will be installed'}
# Package installed
else:
version_info = __salt__['chocolatey.version'](name, check_remote=True)
full_name = name
lower_name = name.lower()
for pkg in version_info:
if lower_name == pkg.lower():
if name.lower() == pkg.lower():
full_name = pkg
available_version = version_info[full_name]['available'][0]
version = version if version else available_version
installed_version = version_info[full_name]['installed'][0]
if force:
ret['changes'] = {name: 'Version {0} will be forcibly installed'
''.format(version)}
elif allow_multiple:
ret['changes'] = {name: 'Version {0} will be installed side by side'
''.format(version)}
if version:
if salt.utils.compare_versions(
ver1=installed_version, oper="==", ver2=version):
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}' \
''.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed' \
''.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
else:
if allow_multiple:
ret['changes'] = {
name: 'Version {0} will be installed side by side with '
'Version {1} if supported'
''.format(version, installed_version)}
ret['comment'] = 'Install {0} {1} side-by-side with {0} {2}' \
''.format(full_name, version, installed_version)
else:
ret['changes'] = {
name: 'Version {0} will be installed over Version {1} '
''.format(version, installed_version)}
ret['comment'] = 'Install {0} {1} over {0} {2}' \
''.format(full_name, version, installed_version)
force = True
else:
ret['comment'] = 'The Package {0} is already installed'.format(name)
return ret
version = installed_version
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}' \
''.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed' \
''.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
if __opts__['test']:
ret['result'] = None

717
salt/states/dvs.py Normal file
View file

@ -0,0 +1,717 @@
# -*- coding: utf-8 -*-
'''
Manage VMware distributed virtual switches (DVSs) and their distributed virtual
portgroups (DVportgroups).
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstaley.com>`
Examples
========
Several settings can be changed for DVSs and DVporgroups. Here are two examples
covering all of the settings. Fewer settings can be used
DVS
---
.. code-block:: python
'name': 'dvs1',
'max_mtu': 1000,
'uplink_names': [
'dvUplink1',
'dvUplink2',
'dvUplink3'
],
'capability': {
'portgroup_operation_supported': false,
'operation_supported': true,
'port_operation_supported': false
},
'lacp_api_version': 'multipleLag',
'contact_email': 'foo@email.com',
'product_info': {
'version':
'6.0.0',
'vendor':
'VMware,
Inc.',
'name':
'DVS'
},
'network_resource_management_enabled': true,
'contact_name': 'me@email.com',
'infrastructure_traffic_resource_pools': [
{
'reservation': 0,
'limit': 1000,
'share_level': 'high',
'key': 'management',
'num_shares': 100
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'faultTolerance',
'num_shares': 50
},
{
'reservation': 0,
'limit': 32000,
'share_level': 'normal',
'key': 'vmotion',
'num_shares': 50
},
{
'reservation': 10000,
'limit': -1,
'share_level': 'normal',
'key': 'virtualMachine',
'num_shares': 50
},
{
'reservation': 0,
'limit': -1,
'share_level': 'custom',
'key': 'iSCSI',
'num_shares': 75
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'nfs',
'num_shares': 50
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'hbr',
'num_shares': 50
},
{
'reservation': 8750,
'limit': 15000,
'share_level': 'high',
'key': 'vsan',
'num_shares': 100
},
{
'reservation': 0,
'limit': -1,
'share_level': 'normal',
'key': 'vdp',
'num_shares': 50
}
],
'link_discovery_protocol': {
'operation':
'listen',
'protocol':
'cdp'
},
'network_resource_control_version': 'version3',
'description': 'Managed by Salt. Random settings.'
Note: The mandatory attribute is: ``name``.
Portgroup
---------
.. code-block:: python
'security_policy': {
'allow_promiscuous': true,
'mac_changes': false,
'forged_transmits': true
},
'name': 'vmotion-v702',
'out_shaping': {
'enabled': true,
'average_bandwidth': 1500,
'burst_size': 4096,
'peak_bandwidth': 1500
},
'num_ports': 128,
'teaming': {
'port_order': {
'active': [
'dvUplink2'
],
'standby': [
'dvUplink1'
]
},
'notify_switches': false,
'reverse_policy': true,
'rolling_order': false,
'policy': 'failover_explicit',
'failure_criteria': {
'check_error_percent': true,
'full_duplex': false,
'check_duplex': false,
'percentage': 50,
'check_speed': 'minimum',
'speed': 20,
'check_beacon': true
}
},
'type': 'earlyBinding',
'vlan_id': 100,
'description': 'Managed by Salt. Random settings.'
Note: The mandatory attributes are: ``name``, ``type``.
Dependencies
============
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9,
or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
Module was developed against.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import traceback
import sys
# Import Salt Libs
import salt.exceptions
from salt.ext.six.moves import range
# Import Third Party Libs
try:
from pyVmomi import VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_PYVMOMI:
return False, 'State module did not load: pyVmomi not found'
# We check the supported vim versions to infer the pyVmomi version
if 'vim25/6.0' in VmomiSupport.versionMap and \
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
return False, ('State module did not load: Incompatible versions '
'of Python and pyVmomi present. See Issue #29537.')
return 'dvs'
def mod_init(low):
'''
Init function
'''
return True
def _get_datacenter_name():
'''
Returns the datacenter name configured on the proxy
Supported proxies: esxcluster, esxdatacenter
'''
proxy_type = __salt__['vsphere.get_proxy_type']()
details = None
if proxy_type == 'esxcluster':
details = __salt__['esxcluster.get_details']()
elif proxy_type == 'esxdatacenter':
details = __salt__['esxdatacenter.get_details']()
if not details:
raise salt.exceptions.CommandExecutionError(
'details for proxy type \'{0}\' not loaded'.format(proxy_type))
return details['datacenter']
def dvs_configured(name, dvs):
'''
Configures a DVS.
Creates a new DVS, if it doesn't exist in the provided datacenter or
reconfigures it if configured differently.
dvs
DVS dict representations (see module sysdocs)
'''
datacenter_name = _get_datacenter_name()
dvs_name = dvs['name'] if dvs.get('name') else name
log.info('Running state {0} for DVS \'{1}\' in datacenter '
'\'{2}\''.format(name, dvs_name, datacenter_name))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None}
comments = []
changes = {}
changes_required = False
try:
#TODO dvs validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name],
service_instance=si)
if not dvss:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new DVS '
'\'{1}\' in datacenter \'{2}\''
''.format(name, dvs_name, datacenter_name))
log.info(comments[-1])
else:
dvs['name'] = dvs_name
__salt__['vsphere.create_dvs'](dvs_dict=dvs,
dvs_name=dvs_name,
service_instance=si)
comments.append('Created a new DVS \'{0}\' in datacenter '
'\'{1}\''.format(dvs_name, datacenter_name))
log.info(comments[-1])
changes.update({'dvs': {'new': dvs}})
else:
# DVS already exists. Checking various aspects of the config
props = ['description', 'contact_email', 'contact_name',
'lacp_api_version', 'link_discovery_protocol',
'max_mtu', 'network_resource_control_version',
'network_resource_management_enabled']
log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking '
'for any updates in '
'{2}'.format(dvs_name, datacenter_name, props))
props_to_original_values = {}
props_to_updated_values = {}
current_dvs = dvss[0]
for prop in props:
if prop in dvs and dvs[prop] != current_dvs.get(prop):
props_to_original_values[prop] = current_dvs.get(prop)
props_to_updated_values[prop] = dvs[prop]
# Simple infrastructure traffic resource control compare doesn't
# work because num_shares is optional if share_level is not custom
# We need to do a dedicated compare for this property
infra_prop = 'infrastructure_traffic_resource_pools'
original_infra_res_pools = []
updated_infra_res_pools = []
if infra_prop in dvs:
if not current_dvs.get(infra_prop):
updated_infra_res_pools = dvs[infra_prop]
else:
for idx in range(len(dvs[infra_prop])):
if 'num_shares' not in dvs[infra_prop][idx] and \
current_dvs[infra_prop][idx]['share_level'] != \
'custom' and \
'num_shares' in current_dvs[infra_prop][idx]:
del current_dvs[infra_prop][idx]['num_shares']
if dvs[infra_prop][idx] != \
current_dvs[infra_prop][idx]:
original_infra_res_pools.append(
current_dvs[infra_prop][idx])
updated_infra_res_pools.append(
dict(dvs[infra_prop][idx]))
if updated_infra_res_pools:
props_to_original_values[
'infrastructure_traffic_resource_pools'] = \
original_infra_res_pools
props_to_updated_values[
'infrastructure_traffic_resource_pools'] = \
updated_infra_res_pools
if props_to_updated_values:
if __opts__['test']:
changes_string = ''
for p in props_to_updated_values:
if p == 'infrastructure_traffic_resource_pools':
changes_string += \
'\tinfrastructure_traffic_resource_pools:\n'
for idx in range(len(props_to_updated_values[p])):
d = props_to_updated_values[p][idx]
s = props_to_original_values[p][idx]
changes_string += \
('\t\t{0} from \'{1}\' to \'{2}\'\n'
''.format(d['key'], s, d))
else:
changes_string += \
('\t{0} from \'{1}\' to \'{2}\'\n'
''.format(p, props_to_original_values[p],
props_to_updated_values[p]))
comments.append(
'State dvs_configured will update DVS \'{0}\' '
'in datacenter \'{1}\':\n{2}'
''.format(dvs_name, datacenter_name, changes_string))
log.info(comments[-1])
else:
__salt__['vsphere.update_dvs'](
dvs_dict=props_to_updated_values,
dvs=dvs_name,
service_instance=si)
comments.append('Updated DVS \'{0}\' in datacenter \'{1}\''
''.format(dvs_name, datacenter_name))
log.info(comments[-1])
changes.update({'dvs': {'new': props_to_updated_values,
'old': props_to_original_values}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': str(exc),
'result': False if not __opts__['test'] else None})
return ret
if not comments:
# We have no changes
ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is '
'correctly configured. Nothing to be done.'
''.format(dvs_name, datacenter_name)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret
def _get_diff_dict(dict1, dict2):
'''
Returns a dictionary with the diffs between two dictionaries
It will ignore any key that doesn't exist in dict2
'''
ret_dict = {}
for p in dict2.keys():
if p not in dict1:
ret_dict.update({p: {'val1': None, 'val2': dict2[p]}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = _get_diff_dict(dict1[p], dict2[p])
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}})
return ret_dict
def _get_val2_dict_from_diff_dict(diff_dict):
'''
Returns a dictionaries with the values stored in val2 of a diff dict.
'''
ret_dict = {}
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if 'val2' in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p]['val2']})
else:
ret_dict.update(
{p: _get_val2_dict_from_diff_dict(diff_dict[p])})
return ret_dict
def _get_val1_dict_from_diff_dict(diff_dict):
'''
Returns a dictionaries with the values stored in val1 of a diff dict.
'''
ret_dict = {}
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if 'val1' in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p]['val1']})
else:
ret_dict.update(
{p: _get_val1_dict_from_diff_dict(diff_dict[p])})
return ret_dict
def _get_changes_from_diff_dict(diff_dict):
'''
Returns a list of string message of the differences in a diff dict.
Each inner message is tabulated one tab deeper
'''
changes_strings = []
for p in diff_dict.keys():
if not isinstance(diff_dict[p], dict):
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
if sorted(diff_dict[p].keys()) == ['val1', 'val2']:
# Some string formatting
from_str = diff_dict[p]['val1']
if isinstance(diff_dict[p]['val1'], str):
from_str = '\'{0}\''.format(diff_dict[p]['val1'])
elif isinstance(diff_dict[p]['val1'], list):
from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1']))
to_str = diff_dict[p]['val2']
if isinstance(diff_dict[p]['val2'], str):
to_str = '\'{0}\''.format(diff_dict[p]['val2'])
elif isinstance(diff_dict[p]['val2'], list):
to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2']))
changes_strings.append('{0} from {1} to {2}'.format(
p, from_str, to_str))
else:
sub_changes = _get_changes_from_diff_dict(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend(['\t{0}'.format(c)
for c in sub_changes])
return changes_strings
def portgroups_configured(name, dvs, portgroups):
'''
Configures portgroups on a DVS.
Creates/updates/removes portgroups in a provided DVS
dvs
Name of the DVS
portgroups
Portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running state {0} on DVS \'{1}\', datacenter '
'\'{2}\''.format(name, dvs, datacenter))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_pgs = __salt__['vsphere.list_dvportgroups'](
dvs=dvs, service_instance=si)
expected_pg_names = []
for pg in portgroups:
pg_name = pg['name']
expected_pg_names.append(pg_name)
del pg['name']
log.info('Checking pg \'{0}\''.format(pg_name))
filtered_current_pgs = \
[p for p in current_pgs if p.get('name') == pg_name]
if not filtered_current_pgs:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new portgroup '
'\'{1}\' in DVS \'{2}\', datacenter '
'\'{3}\''.format(name, pg_name, dvs,
datacenter))
else:
__salt__['vsphere.create_dvportgroup'](
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
service_instance=si)
comments.append('Created a new portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update({pg_name: {'new': pg}})
else:
# Porgroup already exists. Checking the config
log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter '
'\'{2}\'. Checking for any updates.'
''.format(pg_name, dvs, datacenter))
current_pg = filtered_current_pgs[0]
diff_dict = _get_diff_dict(current_pg, pg)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = '
'{0}'.format(changes_strings))
comments.append(
'State {0} will update portgroup \'{1}\' in '
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
''.format(name, pg_name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
service_instance=si)
comments.append('Updated portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update(
{pg_name: {'new':
_get_val2_dict_from_diff_dict(diff_dict),
'old':
_get_val1_dict_from_diff_dict(diff_dict)}})
# Add the uplink portgroup to the expected pg names
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
expected_pg_names.append(uplink_pg['name'])
# Remove any extra portgroups
for current_pg in current_pgs:
if current_pg['name'] not in expected_pg_names:
changes_required = True
if __opts__['test']:
comments.append('State {0} will remove '
'the portgroup \'{1}\' from DVS \'{2}\', '
'datacenter \'{3}\''
''.format(name, current_pg['name'], dvs,
datacenter))
else:
__salt__['vsphere.remove_dvportgroup'](
portgroup=current_pg['name'], dvs=dvs,
service_instance=si)
comments.append('Removed the portgroup \'{0}\' from DVS '
'\'{1}\', datacenter \'{2}\''
''.format(current_pg['name'], dvs,
datacenter))
log.info(comments[-1])
changes.update({current_pg['name']:
{'old': current_pg}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
'\'{1}\' exist and are correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret
def uplink_portgroup_configured(name, dvs, uplink_portgroup):
'''
Configures the uplink portgroup on a DVS. The state assumes there is only
one uplink portgroup.
dvs
Name of the DVS
upling_portgroup
Uplink portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\''
''.format(name, dvs, datacenter))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
log.trace('current_uplink_portgroup = '
'{0}'.format(current_uplink_portgroup))
diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = '
'{0}'.format(changes_strings))
comments.append(
'State {0} will update the '
'uplink portgroup in DVS \'{1}\', datacenter '
'\'{2}\':\n{3}'
''.format(name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=uplink_portgroup,
portgroup=current_uplink_portgroup['name'],
dvs=dvs,
service_instance=si)
comments.append('Updated the uplink portgroup in DVS '
'\'{0}\', datacenter \'{1}\''
''.format(dvs, datacenter))
log.info(comments[-1])
changes.update(
{'uplink_portgroup':
{'new': _get_val2_dict_from_diff_dict(diff_dict),
'old': _get_val1_dict_from_diff_dict(diff_dict)}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter '
'\'{1}\' is correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret

View file

@ -90,20 +90,47 @@ ESXi Proxy Minion, please refer to the
configuration examples, dependency installation instructions, how to run remote
execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state
example.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import sys
import re
# Import Salt Libs
from salt.ext import six
import salt.utils.files
from salt.exceptions import CommandExecutionError
from salt.exceptions import CommandExecutionError, InvalidConfigError, \
VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \
ArgumentValueError
from salt.utils.decorators import depends
from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \
HostCacheSchema
# External libraries
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
# Get Logging Started
log = logging.getLogger(__name__)
try:
from pyVmomi import VmomiSupport
# We check the supported vim versions to infer the pyVmomi version
if 'vim25/6.0' in VmomiSupport.versionMap and \
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
log.error('pyVmomi not loaded: Incompatible versions '
'of Python. See Issue #29537.')
raise ImportError()
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def __virtual__():
return 'esxi.cmd' in __salt__
@ -998,6 +1025,577 @@ def syslog_configured(name,
return ret
@depends(HAS_PYVMOMI)
@depends(HAS_JSONSCHEMA)
def diskgroups_configured(name, diskgroups, erase_disks=False):
'''
Configures the disk groups to use for vsan.
It will do the following:
(1) checks for if all disks in the diskgroup spec exist and errors if they
don't
(2) creates diskgroups with the correct disk configurations if diskgroup
(identified by the cache disk canonical name) doesn't exist
(3) adds extra capacity disks to the existing diskgroup
State input example
-------------------
.. code:: python
{
'cache_scsi_addr': 'vmhba1:C0:T0:L0',
'capacity_scsi_addrs': [
'vmhba2:C0:T0:L0',
'vmhba3:C0:T0:L0',
'vmhba4:C0:T0:L0',
]
}
name
Mandatory state name.
diskgroups
Disk group representation containing scsi disk addresses.
Scsi addresses are expected for disks in the diskgroup:
erase_disks
Specifies whether to erase all partitions on all disks member of the
disk group before the disk group is created. Default vaule is False.
'''
proxy_details = __salt__['esxi.get_details']()
hostname = proxy_details['host'] if not proxy_details.get('vcenter') \
else proxy_details['esxi_host']
log.info('Running state {0} for host \'{1}\''.format(name, hostname))
# Variable used to return the result of the invocation
ret = {'name': name, 'result': None, 'changes': {},
'pchanges': {}, 'comments': None}
# Signals if errors have been encountered
errors = False
# Signals if changes are required
changes = False
comments = []
diskgroup_changes = {}
si = None
try:
log.trace('Validating diskgroups_configured input')
schema = DiskGroupsDiskScsiAddressSchema.serialize()
try:
jsonschema.validate({'diskgroups': diskgroups,
'erase_disks': erase_disks}, schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
si = __salt__['vsphere.get_service_instance_via_proxy']()
host_disks = __salt__['vsphere.list_disks'](service_instance=si)
if not host_disks:
raise VMwareObjectRetrievalError(
'No disks retrieved from host \'{0}\''.format(hostname))
scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks}
log.trace('scsi_addr_to_disk_map = {0}'.format(scsi_addr_to_disk_map))
existing_diskgroups = \
__salt__['vsphere.list_diskgroups'](service_instance=si)
cache_disk_to_existing_diskgroup_map = \
{dg['cache_disk']: dg for dg in existing_diskgroups}
except CommandExecutionError as err:
log.error('Error: {0}'.format(err))
if si:
__salt__['vsphere.disconnect'](si)
ret.update({
'result': False if not __opts__['test'] else None,
'comment': str(err)})
return ret
# Iterate through all of the disk groups
for idx, dg in enumerate(diskgroups):
# Check for cache disk
if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map:
comments.append('No cache disk with scsi address \'{0}\' was '
'found.'.format(dg['cache_scsi_addr']))
log.error(comments[-1])
errors = True
continue
# Check for capacity disks
cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id']
cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'],
cache_disk_id)
bad_scsi_addrs = []
capacity_disk_ids = []
capacity_disk_displays = []
for scsi_addr in dg['capacity_scsi_addrs']:
if scsi_addr not in scsi_addr_to_disk_map:
bad_scsi_addrs.append(scsi_addr)
continue
capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id'])
capacity_disk_displays.append(
'{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1]))
if bad_scsi_addrs:
comments.append('Error in diskgroup #{0}: capacity disks with '
'scsi addresses {1} were not found.'
''.format(idx,
', '.join(['\'{0}\''.format(a)
for a in bad_scsi_addrs])))
log.error(comments[-1])
errors = True
continue
if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id):
# A new diskgroup needs to be created
log.trace('erase_disks = {0}'.format(erase_disks))
if erase_disks:
if __opts__['test']:
comments.append('State {0} will '
'erase all disks of disk group #{1}; '
'cache disk: \'{2}\', '
'capacity disk(s): {3}.'
''.format(name, idx, cache_disk_display,
', '.join(
['\'{}\''.format(a) for a in
capacity_disk_displays])))
else:
# Erase disk group disks
for disk_id in [cache_disk_id] + capacity_disk_ids:
__salt__['vsphere.erase_disk_partitions'](
disk_id=disk_id, service_instance=si)
comments.append('Erased disks of diskgroup #{0}; '
'cache disk: \'{1}\', capacity disk(s): '
'{2}'.format(
idx, cache_disk_display,
', '.join(['\'{0}\''.format(a) for a in
capacity_disk_displays])))
log.info(comments[-1])
if __opts__['test']:
comments.append('State {0} will create '
'the disk group #{1}; cache disk: \'{2}\', '
'capacity disk(s): {3}.'
.format(name, idx, cache_disk_display,
', '.join(['\'{0}\''.format(a) for a in
capacity_disk_displays])))
log.info(comments[-1])
changes = True
continue
try:
__salt__['vsphere.create_diskgroup'](cache_disk_id,
capacity_disk_ids,
safety_checks=False,
service_instance=si)
except VMwareSaltError as err:
comments.append('Error creating disk group #{0}: '
'{1}.'.format(idx, err))
log.error(comments[-1])
errors = True
continue
comments.append('Created disk group #\'{0}\'.'.format(idx))
log.info(comments[-1])
diskgroup_changes[str(idx)] = \
{'new': {'cache': cache_disk_display,
'capacity': capacity_disk_displays}}
changes = True
continue
# The diskgroup exists; checking the capacity disks
log.debug('Disk group #{0} exists. Checking capacity disks: '
'{1}.'.format(idx, capacity_disk_displays))
existing_diskgroup = \
cache_disk_to_existing_diskgroup_map.get(cache_disk_id)
existing_capacity_disk_displays = \
['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks
if d['id'] == disk_id][0], disk_id)
for disk_id in existing_diskgroup['capacity_disks']]
# Populate added disks and removed disks and their displays
added_capacity_disk_ids = []
added_capacity_disk_displays = []
removed_capacity_disk_ids = []
removed_capacity_disk_displays = []
for disk_id in capacity_disk_ids:
if disk_id not in existing_diskgroup['capacity_disks']:
disk_scsi_addr = [d['scsi_address'] for d in host_disks
if d['id'] == disk_id][0]
added_capacity_disk_ids.append(disk_id)
added_capacity_disk_displays.append(
'{0} (id:{1})'.format(disk_scsi_addr, disk_id))
for disk_id in existing_diskgroup['capacity_disks']:
if disk_id not in capacity_disk_ids:
disk_scsi_addr = [d['scsi_address'] for d in host_disks
if d['id'] == disk_id][0]
removed_capacity_disk_ids.append(disk_id)
removed_capacity_disk_displays.append(
'{0} (id:{1})'.format(disk_scsi_addr, disk_id))
log.debug('Disk group #{0}: existing capacity disk ids: {1}; added '
'capacity disk ids: {2}; removed capacity disk ids: {3}'
''.format(idx, existing_capacity_disk_displays,
added_capacity_disk_displays,
removed_capacity_disk_displays))
#TODO revisit this when removing capacity disks is supported
if removed_capacity_disk_ids:
comments.append(
'Error removing capacity disk(s) {0} from disk group #{1}; '
'operation is not supported.'
''.format(', '.join(['\'{0}\''.format(id) for id in
removed_capacity_disk_displays]), idx))
log.error(comments[-1])
errors = True
continue
if added_capacity_disk_ids:
# Capacity disks need to be added to disk group
# Building a string representation of the capacity disks
# that need to be added
s = ', '.join(['\'{0}\''.format(id) for id in
added_capacity_disk_displays])
if __opts__['test']:
comments.append('State {0} will add '
'capacity disk(s) {1} to disk group #{2}.'
''.format(name, s, idx))
log.info(comments[-1])
changes = True
continue
try:
__salt__['vsphere.add_capacity_to_diskgroup'](
cache_disk_id,
added_capacity_disk_ids,
safety_checks=False,
service_instance=si)
except VMwareSaltError as err:
comments.append('Error adding capacity disk(s) {0} to '
'disk group #{1}: {2}.'.format(s, idx, err))
log.error(comments[-1])
errors = True
continue
com = ('Added capacity disk(s) {0} to disk group #{1}'
''.format(s, idx))
log.info(com)
comments.append(com)
diskgroup_changes[str(idx)] = \
{'new': {'cache': cache_disk_display,
'capacity': capacity_disk_displays},
'old': {'cache': cache_disk_display,
'capacity': existing_capacity_disk_displays}}
changes = True
continue
# No capacity needs to be added
s = ('Disk group #{0} is correctly configured. Nothing to be done.'
''.format(idx))
log.info(s)
comments.append(s)
__salt__['vsphere.disconnect'](si)
#Build the final return message
result = (True if not (changes or errors) else # no changes/errors
None if __opts__['test'] else # running in test mode
False if errors else True) # found errors; defaults to True
ret.update({'result': result,
'comment': '\n'.join(comments)})
if changes:
if __opts__['test']:
ret['pchanges'] = diskgroup_changes
elif changes:
ret['changes'] = diskgroup_changes
return ret
@depends(HAS_PYVMOMI)
@depends(HAS_JSONSCHEMA)
def host_cache_configured(name, enabled, datastore, swap_size='100%',
dedicated_backing_disk=False,
erase_backing_disk=False):
'''
Configures the host cache used for swapping.
It will do the following:
(1) checks if backing disk exists
(2) creates the VMFS datastore if doesn't exist (datastore partition will
be created and use the entire disk
(3) raises an error if dedicated_backing_disk is True and partitions
already exist on the backing disk
(4) configures host_cache to use a portion of the datastore for caching
(either a specific size or a percentage of the datastore)
State input examples
--------------------
Percentage swap size (can't be 100%)
.. code:: python
{
'enabled': true,
'datastore': {
'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0',
'vmfs_version': 5,
'name': 'hostcache'
}
'dedicated_backing_disk': false
'swap_size': '98%',
}
.. code:: python
Fixed sized swap size
{
'enabled': true,
'datastore': {
'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0',
'vmfs_version': 5,
'name': 'hostcache'
}
'dedicated_backing_disk': true
'swap_size': '10GiB',
}
name
Mandatory state name.
enabled
Specifies whether the host cache is enabled.
datastore
Specifies the host cache datastore.
swap_size
Specifies the size of the host cache swap. Can be a percentage or a
value in GiB. Default value is ``100%``.
dedicated_backing_disk
Specifies whether the backing disk is dedicated to the host cache which
means it must have no other partitions. Default is False
erase_backing_disk
Specifies whether to erase all partitions on the backing disk before
the datastore is created. Default vaule is False.
'''
log.trace('enabled = {0}'.format(enabled))
log.trace('datastore = {0}'.format(datastore))
log.trace('swap_size = {0}'.format(swap_size))
log.trace('erase_backing_disk = {0}'.format(erase_backing_disk))
# Variable used to return the result of the invocation
proxy_details = __salt__['esxi.get_details']()
hostname = proxy_details['host'] if not proxy_details.get('vcenter') \
else proxy_details['esxi_host']
log.trace('hostname = {0}'.format(hostname))
log.info('Running host_cache_swap_configured for host '
'\'{0}\''.format(hostname))
ret = {'name': hostname, 'comment': 'Default comments',
'result': None, 'changes': {}, 'pchanges': {}}
result = None if __opts__['test'] else True # We assume success
needs_setting = False
comments = []
changes = {}
si = None
try:
log.debug('Validating host_cache_configured input')
schema = HostCacheSchema.serialize()
try:
jsonschema.validate({'enabled': enabled,
'datastore': datastore,
'swap_size': swap_size,
'erase_backing_disk': erase_backing_disk},
schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
m = re.match(r'(\d+)(%|GiB)', swap_size)
swap_size_value = int(m.group(1))
swap_type = m.group(2)
log.trace('swap_size_value = {0}; swap_type = {1}'.format(
swap_size_value, swap_type))
si = __salt__['vsphere.get_service_instance_via_proxy']()
host_cache = __salt__['vsphere.get_host_cache'](service_instance=si)
# Check enabled
if host_cache['enabled'] != enabled:
changes.update({'enabled': {'old': host_cache['enabled'],
'new': enabled}})
needs_setting = True
# Check datastores
existing_datastores = None
if host_cache.get('datastore'):
existing_datastores = \
__salt__['vsphere.list_datastores_via_proxy'](
datastore_names=[datastore['name']],
service_instance=si)
# Retrieve backing disks
existing_disks = __salt__['vsphere.list_disks'](
scsi_addresses=[datastore['backing_disk_scsi_addr']],
service_instance=si)
if not existing_disks:
raise VMwareObjectRetrievalError(
'Disk with scsi address \'{0}\' was not found in host \'{1}\''
''.format(datastore['backing_disk_scsi_addr'], hostname))
backing_disk = existing_disks[0]
backing_disk_display = '{0} (id:{1})'.format(
backing_disk['scsi_address'], backing_disk['id'])
log.trace('backing_disk = {0}'.format(backing_disk_display))
existing_datastore = None
if not existing_datastores:
# Check if disk needs to be erased
if erase_backing_disk:
if __opts__['test']:
comments.append('State {0} will erase '
'the backing disk \'{1}\' on host \'{2}\'.'
''.format(name, backing_disk_display,
hostname))
log.info(comments[-1])
else:
# Erase disk
__salt__['vsphere.erase_disk_partitions'](
disk_id=backing_disk['id'], service_instance=si)
comments.append('Erased backing disk \'{0}\' on host '
'\'{1}\'.'.format(backing_disk_display,
hostname))
log.info(comments[-1])
# Create the datastore
if __opts__['test']:
comments.append('State {0} will create '
'the datastore \'{1}\', with backing disk '
'\'{2}\', on host \'{3}\'.'
''.format(name, datastore['name'],
backing_disk_display, hostname))
log.info(comments[-1])
else:
if dedicated_backing_disk:
# Check backing disk doesn't already have partitions
partitions = __salt__['vsphere.list_disk_partitions'](
disk_id=backing_disk['id'], service_instance=si)
log.trace('partitions = {0}'.format(partitions))
# We will ignore the mbr partitions
non_mbr_partitions = [p for p in partitions
if p['format'] != 'mbr']
if len(non_mbr_partitions) > 0:
raise VMwareApiError(
'Backing disk \'{0}\' has unexpected partitions'
''.format(backing_disk_display))
__salt__['vsphere.create_vmfs_datastore'](
datastore['name'], existing_disks[0]['id'],
datastore['vmfs_version'], service_instance=si)
comments.append('Created vmfs datastore \'{0}\', backed by '
'disk \'{1}\', on host \'{2}\'.'
''.format(datastore['name'],
backing_disk_display, hostname))
log.info(comments[-1])
changes.update(
{'datastore':
{'new': {'name': datastore['name'],
'backing_disk': backing_disk_display}}})
existing_datastore = \
__salt__['vsphere.list_datastores_via_proxy'](
datastore_names=[datastore['name']],
service_instance=si)[0]
needs_setting = True
else:
# Check datastore is backed by the correct disk
if not existing_datastores[0].get('backing_disk_ids'):
raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a '
'backing disk'
''.format(datastore['name']))
if backing_disk['id'] not in \
existing_datastores[0]['backing_disk_ids']:
raise VMwareSaltError(
'Datastore \'{0}\' is not backed by the correct disk: '
'expected \'{1}\'; got {2}'
''.format(
datastore['name'], backing_disk['id'],
', '.join(
['\'{0}\''.format(disk) for disk in
existing_datastores[0]['backing_disk_ids']])))
comments.append('Datastore \'{0}\' already exists on host \'{1}\' '
'and is backed by disk \'{2}\'. Nothing to be '
'done.'.format(datastore['name'], hostname,
backing_disk_display))
existing_datastore = existing_datastores[0]
log.trace('existing_datastore = {0}'.format(existing_datastore))
log.info(comments[-1])
if existing_datastore:
# The following comparisons can be done if the existing_datastore
# is set; it may not be set if running in test mode
#
# We support percent, as well as MiB, we will convert the size
# to MiB, multiples of 1024 (VMware SDK limitation)
if swap_type == '%':
# Percentage swap size
# Convert from bytes to MiB
raw_size_MiB = (swap_size_value/100.0) * \
(existing_datastore['capacity']/1024/1024)
else:
raw_size_MiB = swap_size_value * 1024
log.trace('raw_size = {0}MiB'.format(raw_size_MiB))
swap_size_MiB = int(raw_size_MiB/1024)*1024
log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB))
existing_swap_size_MiB = 0
m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \
host_cache.get('swap_size') else None
if m:
# if swap_size from the host is set and has an expected value
# we are going to parse it to get the number of MiBs
existing_swap_size_MiB = int(m.group(1))
if not existing_swap_size_MiB == swap_size_MiB:
needs_setting = True
changes.update(
{'swap_size':
{'old': '{}GiB'.format(existing_swap_size_MiB/1024),
'new': '{}GiB'.format(swap_size_MiB/1024)}})
if needs_setting:
if __opts__['test']:
comments.append('State {0} will configure '
'the host cache on host \'{1}\' to: {2}.'
''.format(name, hostname,
{'enabled': enabled,
'datastore_name': datastore['name'],
'swap_size': swap_size}))
else:
if (existing_datastore['capacity'] / 1024.0**2) < \
swap_size_MiB:
raise ArgumentValueError(
'Capacity of host cache datastore \'{0}\' ({1} MiB) is '
'smaller than the required swap size ({2} MiB)'
''.format(existing_datastore['name'],
existing_datastore['capacity'] / 1024.0**2,
swap_size_MiB))
__salt__['vsphere.configure_host_cache'](
enabled,
datastore['name'],
swap_size_MiB=swap_size_MiB,
service_instance=si)
comments.append('Host cache configured on host '
'\'{0}\'.'.format(hostname))
else:
comments.append('Host cache on host \'{0}\' is already correctly '
'configured. Nothing to be done.'.format(hostname))
result = True
__salt__['vsphere.disconnect'](si)
log.info(comments[-1])
ret.update({'comment': '\n'.join(comments),
'result': result})
if __opts__['test']:
ret['pchanges'] = changes
else:
ret['changes'] = changes
return ret
except CommandExecutionError as err:
log.error('Error: {0}.'.format(err))
if si:
__salt__['vsphere.disconnect'](si)
ret.update({
'result': False if not __opts__['test'] else None,
'comment': '{}.'.format(err)})
return ret
def _lookup_syslog_config(config):
'''
Helper function that looks up syslog_config keys available from

View file

@ -6637,6 +6637,28 @@ def cached(name,
else:
pre_hash = None
def _try_cache(path, checksum):
'''
This helper is not needed anymore in develop as the fileclient in the
develop branch now has means of skipping a download if the existing
hash matches one passed to cp.cache_file. Remove this helper and the
code that invokes it, once we have merged forward into develop.
'''
if not path or not checksum:
return True
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
if form is None:
# Shouldn't happen, an invalid checksum length should be caught
# before we get here. But in the event this gets through, don't let
# it cause any trouble, and just return True.
return True
try:
return salt.utils.get_hash(path, form=form) != checksum
except (IOError, OSError, ValueError):
# Again, shouldn't happen, but don't let invalid input/permissions
# in the call to get_hash blow this up.
return True
# Cache the file. Note that this will not actually download the file if
# either of the following is true:
# 1. source is a salt:// URL and the fileserver determines that the hash
@ -6645,14 +6667,18 @@ def cached(name,
# matches the cached copy.
# Remote, non salt:// sources _will_ download if a copy of the file was
# not already present in the minion cache.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = exc.__str__()
return ret
if _try_cache(local_copy, source_sum.get('hsum')):
# The _try_cache helper is obsolete in the develop branch. Once merged
# forward, remove the helper as well as this if statement, and dedent
# the below block.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = exc.__str__()
return ret
if not local_copy:
ret['comment'] = (

View file

@ -79,8 +79,6 @@ def _construct_yaml_str(self, node):
Construct for yaml
'''
return self.construct_scalar(node)
YamlLoader.add_constructor(u'tag:yaml.org,2002:str',
_construct_yaml_str)
YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp',
_construct_yaml_str)

View file

@ -2,6 +2,8 @@
'''
Linux File Access Control Lists
The Linux ACL state module requires the `getfacl` and `setfacl` binaries.
Ensure a Linux ACL is present
.. code-block:: yaml
@ -50,7 +52,7 @@ def __virtual__():
if salt.utils.path.which('getfacl') and salt.utils.path.which('setfacl'):
return __virtualname__
return False
return False, 'The linux_acl state cannot be loaded: the getfacl or setfacl binary is not in the path.'
def present(name, acl_type, acl_name='', perms='', recurse=False):
@ -85,11 +87,12 @@ def present(name, acl_type, acl_name='', perms='', recurse=False):
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_name is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_name is empty.
if acl_name == '':
_search_name = __current_perms[name].get('comment').get(_acl_type)
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
else:
_search_name = acl_name
@ -187,11 +190,12 @@ def absent(name, acl_type, acl_name='', perms='', recurse=False):
# applied to the user/group that owns the file, e.g.,
# default:group::rwx would be listed as default:group:root:rwx
# In this case, if acl_name is empty, we really want to search for root
# but still uses '' for other
# We search through the dictionary getfacl returns for the owner of the
# file if acl_name is empty.
if acl_name == '':
_search_name = __current_perms[name].get('comment').get(_acl_type)
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
else:
_search_name = acl_name

View file

@ -25,9 +25,6 @@ import logging
# Import salt libs
import salt.utils.path
# Import 3rd-party libs
from salt.ext import six
# Set up logger
log = logging.getLogger(__name__)
@ -88,69 +85,127 @@ def present(name,
# Device exists
raids = __salt__['raid.list']()
if raids.get(name):
ret['comment'] = 'Raid {0} already present'.format(name)
return ret
present = raids.get(name)
# Decide whether to create or assemble
can_assemble = {}
for dev in devices:
# mdadm -E exits with 0 iff all devices given are part of an array
cmd = 'mdadm -E {0}'.format(dev)
can_assemble[dev] = __salt__['cmd.retcode'](cmd) == 0
missing = []
uuid_dict = {}
new_devices = []
if True in six.itervalues(can_assemble) and False in six.itervalues(can_assemble):
in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if x[1]])
not_in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if not x[1]])
ret['comment'] = 'Devices are a mix of RAID constituents ({0}) and '\
'non-RAID-constituents({1}).'.format(in_raid, not_in_raid)
for dev in devices:
if dev == 'missing' or not __salt__['file.access'](dev, 'f'):
missing.append(dev)
continue
superblock = __salt__['raid.examine'](dev)
if 'MD_UUID' in superblock:
uuid = superblock['MD_UUID']
if uuid not in uuid_dict:
uuid_dict[uuid] = []
uuid_dict[uuid].append(dev)
else:
new_devices.append(dev)
if len(uuid_dict) > 1:
ret['comment'] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.'.format(
sorted(uuid_dict.keys()))
ret['result'] = False
return ret
elif next(six.itervalues(can_assemble)):
elif len(uuid_dict) == 1:
uuid = list(uuid_dict.keys())[0]
if present and present['uuid'] != uuid:
ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid'])
ret['result'] = False
return ret
devices_with_superblock = uuid_dict[uuid]
else:
devices_with_superblock = []
if present:
do_assemble = False
do_create = False
elif len(devices_with_superblock) > 0:
do_assemble = True
do_create = False
verb = 'assembled'
else:
if len(new_devices) == 0:
ret['comment'] = 'All devices are missing: {0}.'.format(missing)
ret['result'] = False
return ret
do_assemble = False
do_create = True
verb = 'created'
# If running with test use the test_mode with create or assemble
if __opts__['test']:
if do_assemble:
res = __salt__['raid.assemble'](name,
devices,
devices_with_superblock,
test_mode=True,
**kwargs)
else:
elif do_create:
res = __salt__['raid.create'](name,
level,
devices,
new_devices + ['missing'] * len(missing),
test_mode=True,
**kwargs)
ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res)
ret['result'] = None
if present:
ret['comment'] = 'Raid {0} already present.'.format(name)
if do_assemble or do_create:
ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res)
ret['result'] = None
if (do_assemble or present) and len(new_devices) > 0:
ret['comment'] += ' New devices will be added: {0}'.format(new_devices)
ret['result'] = None
if len(missing) > 0:
ret['comment'] += ' Missing devices: {0}'.format(missing)
return ret
# Attempt to create or assemble the array
if do_assemble:
__salt__['raid.assemble'](name,
devices,
devices_with_superblock,
**kwargs)
else:
elif do_create:
__salt__['raid.create'](name,
level,
devices,
new_devices + ['missing'] * len(missing),
**kwargs)
raids = __salt__['raid.list']()
changes = raids.get(name)
if changes:
ret['comment'] = 'Raid {0} {1}.'.format(name, verb)
ret['changes'] = changes
# Saving config
__salt__['raid.save_config']()
if not present:
raids = __salt__['raid.list']()
changes = raids.get(name)
if changes:
ret['comment'] = 'Raid {0} {1}.'.format(name, verb)
ret['changes'] = changes
# Saving config
__salt__['raid.save_config']()
else:
ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb)
ret['result'] = False
else:
ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb)
ret['result'] = False
ret['comment'] = 'Raid {0} already present.'.format(name)
if (do_assemble or present) and len(new_devices) > 0 and ret['result']:
for d in new_devices:
res = __salt__['raid.add'](name, d)
if not res:
ret['comment'] += ' Unable to add {0} to {1}.\n'.format(d, name)
ret['result'] = False
else:
ret['comment'] += ' Added new device {0} to {1}.\n'.format(d, name)
if ret['result']:
ret['changes']['added'] = new_devices
if len(missing) > 0:
ret['comment'] += ' Missing devices: {0}'.format(missing)
return ret

501
salt/states/pbm.py Normal file
View file

@ -0,0 +1,501 @@
# -*- coding: utf-8 -*-
'''
Manages VMware storage policies
(called pbm because the vCenter endpoint is /pbm)
Examples
========
Storage policy
--------------
.. code-block:: python
{
"name": "salt_storage_policy"
"description": "Managed by Salt. Random capability values.",
"resource_type": "STORAGE",
"subprofiles": [
{
"capabilities": [
{
"setting": {
"type": "scalar",
"value": 2
},
"namespace": "VSAN",
"id": "hostFailuresToTolerate"
},
{
"setting": {
"type": "scalar",
"value": 2
},
"namespace": "VSAN",
"id": "stripeWidth"
},
{
"setting": {
"type": "scalar",
"value": true
},
"namespace": "VSAN",
"id": "forceProvisioning"
},
{
"setting": {
"type": "scalar",
"value": 50
},
"namespace": "VSAN",
"id": "proportionalCapacity"
},
{
"setting": {
"type": "scalar",
"value": 0
},
"namespace": "VSAN",
"id": "cacheReservation"
}
],
"name": "Rule-Set 1: VSAN",
"force_provision": null
}
],
}
Dependencies
============
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import copy
import sys
# Import Salt Libs
from salt.exceptions import CommandExecutionError, ArgumentValueError
from salt.utils.dictdiffer import recursive_diff
from salt.utils.listdiffer import list_diff
# External libraries
try:
from pyVmomi import VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_PYVMOMI:
return False, 'State module did not load: pyVmomi not found'
# We check the supported vim versions to infer the pyVmomi version
if 'vim25/6.0' in VmomiSupport.versionMap and \
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
return False, ('State module did not load: Incompatible versions '
'of Python and pyVmomi present. See Issue #29537.')
return True
def mod_init(low):
'''
Init function
'''
return True
def default_vsan_policy_configured(name, policy):
'''
Configures the default VSAN policy on a vCenter.
The state assumes there is only one default VSAN policy on a vCenter.
policy
Dict representation of a policy
'''
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
policy_copy = copy.deepcopy(policy)
proxy_type = __salt__['vsphere.get_proxy_type']()
log.trace('proxy_type = {0}'.format(proxy_type))
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter']
log.info('Running {0} on vCenter '
'\'{1}\''.format(name, vcenter))
log.trace('policy = {0}'.format(policy))
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
comments = []
changes = {}
changes_required = False
si = None
try:
#TODO policy schema validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_policy = __salt__['vsphere.list_default_vsan_policy'](si)
log.trace('current_policy = {0}'.format(current_policy))
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get('subprofiles'):
if len(policy['subprofiles']) > 1:
raise ArgumentValueError('Multiple sub_profiles ({0}) are not '
'supported in the input policy')
subprofile = policy['subprofiles'][0]
current_subprofile = current_policy['subprofiles'][0]
capabilities_differ = list_diff(current_subprofile['capabilities'],
subprofile.get('capabilities', []),
key='id')
del policy['subprofiles']
if subprofile.get('capabilities'):
del subprofile['capabilities']
del current_subprofile['capabilities']
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile,
dict(subprofile))
del current_policy['subprofiles']
policy_differ = recursive_diff(current_policy, policy)
if policy_differ.diffs or capabilities_differ.diffs or \
subprofile_differ.diffs:
if 'name' in policy_differ.new_values or \
'description' in policy_differ.new_values:
raise ArgumentValueError(
'\'name\' and \'description\' of the default VSAN policy '
'cannot be updated')
changes_required = True
if __opts__['test']:
str_changes = []
if policy_differ.diffs:
str_changes.extend([change for change in
policy_differ.changes_str.split('\n')])
if subprofile_differ.diffs or capabilities_differ.diffs:
str_changes.append('subprofiles:')
if subprofile_differ.diffs:
str_changes.extend(
[' {0}'.format(change) for change in
subprofile_differ.changes_str.split('\n')])
if capabilities_differ.diffs:
str_changes.append(' capabilities:')
str_changes.extend(
[' {0}'.format(change) for change in
capabilities_differ.changes_str2.split('\n')])
comments.append(
'State {0} will update the default VSAN policy on '
'vCenter \'{1}\':\n{2}'
''.format(name, vcenter, '\n'.join(str_changes)))
else:
__salt__['vsphere.update_storage_policy'](
policy=current_policy['name'],
policy_dict=policy_copy,
service_instance=si)
comments.append('Updated the default VSAN policy in vCenter '
'\'{0}\''.format(vcenter))
log.info(comments[-1])
new_values = policy_differ.new_values
new_values['subprofiles'] = [subprofile_differ.new_values]
new_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.new_values
if not new_values['subprofiles'][0]['capabilities']:
del new_values['subprofiles'][0]['capabilities']
if not new_values['subprofiles'][0]:
del new_values['subprofiles']
old_values = policy_differ.old_values
old_values['subprofiles'] = [subprofile_differ.old_values]
old_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.old_values
if not old_values['subprofiles'][0]['capabilities']:
del old_values['subprofiles'][0]['capabilities']
if not old_values['subprofiles'][0]:
del old_values['subprofiles']
changes.update({'default_vsan_policy':
{'new': new_values,
'old': old_values}})
log.trace(changes)
__salt__['vsphere.disconnect'](si)
except CommandExecutionError as exc:
log.error('Error: {}'.format(exc))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('Default VSAN policy in vCenter '
'\'{0}\' is correctly configured. '
'Nothing to be done.'.format(vcenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': changes,
'result': None})
else:
ret.update({'changes': changes,
'result': True})
return ret
def storage_policies_configured(name, policies):
'''
Configures storage policies on a vCenter.
policies
List of dict representation of the required storage policies
'''
comments = []
changes = []
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
log.trace('policies = {0}'.format(policies))
si = None
try:
proxy_type = __salt__['vsphere.get_proxy_type']()
log.trace('proxy_type = {0}'.format(proxy_type))
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter']
log.info('Running state \'{0}\' on vCenter '
'\'{1}\''.format(name, vcenter))
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_policies = __salt__['vsphere.list_storage_policies'](
policy_names=[policy['name'] for policy in policies],
service_instance=si)
log.trace('current_policies = {0}'.format(current_policies))
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
for policy in policies:
policy_copy = copy.deepcopy(policy)
filtered_policies = [p for p in current_policies
if p['name'] == policy['name']]
current_policy = filtered_policies[0] \
if filtered_policies else None
if not current_policy:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create the storage policy '
'\'{1}\' on vCenter \'{2}\''
''.format(name, policy['name'], vcenter))
else:
__salt__['vsphere.create_storage_policy'](
policy['name'], policy, service_instance=si)
comments.append('Created storage policy \'{0}\' on '
'vCenter \'{1}\''.format(policy['name'],
vcenter))
changes.append({'new': policy, 'old': None})
log.trace(comments[-1])
# Continue with next
continue
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get('subprofiles'):
if len(policy['subprofiles']) > 1:
raise ArgumentValueError('Multiple sub_profiles ({0}) are not '
'supported in the input policy')
subprofile = policy['subprofiles'][0]
current_subprofile = current_policy['subprofiles'][0]
capabilities_differ = list_diff(current_subprofile['capabilities'],
subprofile.get('capabilities', []),
key='id')
del policy['subprofiles']
if subprofile.get('capabilities'):
del subprofile['capabilities']
del current_subprofile['capabilities']
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile,
dict(subprofile))
del current_policy['subprofiles']
policy_differ = recursive_diff(current_policy, policy)
if policy_differ.diffs or capabilities_differ.diffs or \
subprofile_differ.diffs:
changes_required = True
if __opts__['test']:
str_changes = []
if policy_differ.diffs:
str_changes.extend(
[change for change in
policy_differ.changes_str.split('\n')])
if subprofile_differ.diffs or \
capabilities_differ.diffs:
str_changes.append('subprofiles:')
if subprofile_differ.diffs:
str_changes.extend(
[' {0}'.format(change) for change in
subprofile_differ.changes_str.split('\n')])
if capabilities_differ.diffs:
str_changes.append(' capabilities:')
str_changes.extend(
[' {0}'.format(change) for change in
capabilities_differ.changes_str2.split('\n')])
comments.append(
'State {0} will update the storage policy \'{1}\''
' on vCenter \'{2}\':\n{3}'
''.format(name, policy['name'], vcenter,
'\n'.join(str_changes)))
else:
__salt__['vsphere.update_storage_policy'](
policy=current_policy['name'],
policy_dict=policy_copy,
service_instance=si)
comments.append('Updated the storage policy \'{0}\''
'in vCenter \'{1}\''
''.format(policy['name'], vcenter))
log.info(comments[-1])
# Build new/old values to report what was changed
new_values = policy_differ.new_values
new_values['subprofiles'] = [subprofile_differ.new_values]
new_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.new_values
if not new_values['subprofiles'][0]['capabilities']:
del new_values['subprofiles'][0]['capabilities']
if not new_values['subprofiles'][0]:
del new_values['subprofiles']
old_values = policy_differ.old_values
old_values['subprofiles'] = [subprofile_differ.old_values]
old_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.old_values
if not old_values['subprofiles'][0]['capabilities']:
del old_values['subprofiles'][0]['capabilities']
if not old_values['subprofiles'][0]:
del old_values['subprofiles']
changes.append({'new': new_values,
'old': old_values})
else:
# No diffs found - no updates required
comments.append('Storage policy \'{0}\' is up to date. '
'Nothing to be done.'.format(policy['name']))
__salt__['vsphere.disconnect'](si)
except CommandExecutionError as exc:
log.error('Error: {0}'.format(exc))
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('All storage policy in vCenter '
'\'{0}\' is correctly configured. '
'Nothing to be done.'.format(vcenter)),
'result': True})
else:
ret.update({'comment': '\n'.join(comments)})
if __opts__['test']:
ret.update({'pchanges': {'storage_policies': changes},
'result': None})
else:
ret.update({'changes': {'storage_policies': changes},
'result': True})
return ret
def default_storage_policy_assigned(name, policy, datastore):
'''
Assigns a default storage policy to a datastore
policy
Name of storage policy
datastore
Name of datastore
'''
log.info('Running state {0} for policy \'{1}\', datastore \'{2}\'.'
''.format(name, policy, datastore))
changes = {}
changes_required = False
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
'pchanges': {}}
si = None
try:
si = __salt__['vsphere.get_service_instance_via_proxy']()
existing_policy = \
__salt__['vsphere.list_default_storage_policy_of_datastore'](
datastore=datastore, service_instance=si)
if existing_policy['name'] == policy:
comment = ('Storage policy \'{0}\' is already assigned to '
'datastore \'{1}\'. Nothing to be done.'
''.format(policy, datastore))
else:
changes_required = True
changes = {
'default_storage_policy': {'old': existing_policy['name'],
'new': policy}}
if __opts__['test']:
comment = ('State {0} will assign storage policy \'{1}\' to '
'datastore \'{2}\'.').format(name, policy,
datastore)
else:
__salt__['vsphere.assign_default_storage_policy_to_datastore'](
policy=policy, datastore=datastore, service_instance=si)
comment = ('Storage policy \'{0} was assigned to datastore '
'\'{1}\'.').format(policy, name)
log.info(comment)
except CommandExecutionError as exc:
log.error('Error: {}'.format(exc))
if si:
__salt__['vsphere.disconnect'](si)
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
ret['comment'] = comment
if changes_required:
if __opts__['test']:
ret.update({'result': None,
'pchanges': changes})
else:
ret.update({'result': True,
'changes': changes})
else:
ret['result'] = True
return ret

View file

@ -36,6 +36,7 @@ def __virtual__():
def present(name,
pattern,
definition,
apply_to=None,
priority=0,
vhost='/',
runas=None):
@ -52,6 +53,8 @@ def present(name,
A json dict describing the policy
priority
Priority (defaults to 0)
apply_to
Apply policy to 'queues', 'exchanges' or 'all' (defailt to 'all')
vhost
Virtual host to apply to (defaults to '/')
runas
@ -68,6 +71,8 @@ def present(name,
updates.append('Pattern')
if policy.get('definition') != definition:
updates.append('Definition')
if apply_to and (policy.get('apply-to') != apply_to):
updates.append('Applyto')
if int(policy.get('priority')) != priority:
updates.append('Priority')
@ -85,6 +90,7 @@ def present(name,
name,
pattern,
definition,
apply_to,
priority=priority,
runas=runas)
elif updates:
@ -97,6 +103,7 @@ def present(name,
name,
pattern,
definition,
apply_to,
priority=priority,
runas=runas)

View file

@ -26,10 +26,15 @@ from __future__ import absolute_import
# Import python libs
import fnmatch
import logging
import sys
import threading
import time
# Import salt libs
import salt.syspaths
import salt.exceptions
import salt.output
import salt.utils
import salt.utils.event
import salt.utils.versions
from salt.ext import six
@ -59,6 +64,48 @@ def _fire_args(tag_data):
)
def _parallel_map(func, inputs):
'''
Applies a function to each element of a list, returning the resulting list.
A separate thread is created for each element in the input list and the
passed function is called for each of the elements. When all threads have
finished execution a list with the results corresponding to the inputs is
returned.
If one of the threads fails (because the function throws an exception),
that exception is reraised. If more than one thread fails, the exception
from the first thread (according to the index of the input element) is
reraised.
func:
function that is applied on each input element.
inputs:
list of elements that shall be processed. The length of this list also
defines the number of threads created.
'''
outputs = len(inputs) * [None]
errors = len(inputs) * [None]
def create_thread(index):
def run_thread():
try:
outputs[index] = func(inputs[index])
except: # pylint: disable=bare-except
errors[index] = sys.exc_info()
thread = threading.Thread(target=run_thread)
thread.start()
return thread
threads = list(six.moves.map(create_thread, six.moves.range(len(inputs))))
for thread in threads:
thread.join()
for error in errors:
if error is not None:
exc_type, exc_value, exc_traceback = error
six.reraise(exc_type, exc_value, exc_traceback)
return outputs
def state(name,
tgt,
ssh=False,
@ -770,6 +817,190 @@ def runner(name, **kwargs):
return ret
def parallel_runners(name, runners):
'''
Executes multiple runner modules on the master in parallel.
.. versionadded:: 2017.x.0 (Nitrogen)
A separate thread is spawned for each runner. This state is intended to be
used with the orchestrate runner in place of the ``saltmod.runner`` state
when different tasks should be run in parallel. In general, Salt states are
not safe when used concurrently, so ensure that they are used in a safe way
(e.g. by only targeting separate minions in parallel tasks).
name:
name identifying this state. The name is provided as part of the
output, but not used for anything else.
runners:
list of runners that should be run in parallel. Each element of the
list has to be a dictionary. This dictionary's name entry stores the
name of the runner function that shall be invoked. The optional kwarg
entry stores a dictionary of named arguments that are passed to the
runner function.
.. code-block:: yaml
parallel-state:
salt.parallel_runners:
- runners:
my_runner_1:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_1
my_runner_2:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_2
'''
# For the sake of consistency, we treat a single string in the same way as
# a key without a value. This allows something like
# salt.parallel_runners:
# - runners:
# state.orchestrate
# Obviously, this will only work if the specified runner does not need any
# arguments.
if isinstance(runners, six.string_types):
runners = {runners: [{name: runners}]}
# If the runners argument is not a string, it must be a dict. Everything
# else is considered an error.
if not isinstance(runners, dict):
return {
'name': name,
'result': False,
'changes': {},
'comment': 'The runners parameter must be a string or dict.'
}
# The configuration for each runner is given as a list of key-value pairs.
# This is not very useful for what we want to do, but it is the typical
# style used in Salt. For further processing, we convert each of these
# lists to a dict. This also makes it easier to check whether a name has
# been specified explicitly.
for runner_id, runner_config in six.iteritems(runners):
if runner_config is None:
runner_config = {}
else:
runner_config = salt.utils.repack_dictlist(runner_config)
if 'name' not in runner_config:
runner_config['name'] = runner_id
runners[runner_id] = runner_config
try:
jid = __orchestration_jid__
except NameError:
log.debug(
'Unable to fire args event due to missing __orchestration_jid__')
jid = None
def call_runner(runner_config):
return __salt__['saltutil.runner'](runner_config['name'],
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**(runner_config.get('kwarg', {})))
try:
outputs = _parallel_map(call_runner, list(six.itervalues(runners)))
except salt.exceptions.SaltException as exc:
return {
'name': name,
'result': False,
'success': False,
'changes': {},
'comment': 'One of the runners raised an exception: {0}'.format(
exc)
}
# We bundle the results of the runners with the IDs of the runners so that
# we can easily identify which output belongs to which runner. At the same
# time we exctract the actual return value of the runner (saltutil.runner
# adds some extra information that is not interesting to us).
outputs = {
runner_id: out['return']for runner_id, out in
six.moves.zip(six.iterkeys(runners), outputs)
}
# If each of the runners returned its output in the format compatible with
# the 'highstate' outputter, we can leverage this fact when merging the
# outputs.
highstate_output = all(
[out.get('outputter', '') == 'highstate' and 'data' in out for out in
six.itervalues(outputs)]
)
# The following helper function is used to extract changes from highstate
# output.
def extract_changes(obj):
if not isinstance(obj, dict):
return {}
elif 'changes' in obj:
if (isinstance(obj['changes'], dict)
and obj['changes'].get('out', '') == 'highstate'
and 'ret' in obj['changes']):
return obj['changes']['ret']
else:
return obj['changes']
else:
found_changes = {}
for key, value in six.iteritems(obj):
change = extract_changes(value)
if change:
found_changes[key] = change
return found_changes
if highstate_output:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out['data'].get('retcode', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
runner_comments = [
'Runner {0} failed with return value:\n{1}'.format(
runner_id,
salt.output.out_format(outputs[runner_id],
'nested',
__opts__,
nested_indent=2)
) for runner_id in failed_runners
]
comment = '\n'.join(runner_comments)
changes = {}
for runner_id, out in six.iteritems(outputs):
runner_changes = extract_changes(out['data'])
if runner_changes:
changes[runner_id] = runner_changes
else:
failed_runners = [runner_id for runner_id, out in
six.iteritems(outputs) if
out.get('exit_code', 0) != 0]
all_successful = not failed_runners
if all_successful:
comment = 'All runner functions executed successfully.'
else:
if len(failed_runners) == 1:
comment = 'Runner {0} failed.'.format(failed_runners[0])
else:
comment =\
'Runners {0} failed.'.format(', '.join(failed_runners))
changes = {'ret': {
runner_id: out for runner_id, out in six.iteritems(outputs)
}}
ret = {
'name': name,
'result': all_successful,
'changes': changes,
'comment': comment
}
# The 'runner' function includes out['jid'] as '__jid__' in the returned
# dict, but we cannot do this here because we have more than one JID if
# we have more than one runner.
return ret
def wheel(name, **kwargs):
'''
Execute a wheel module on the master

View file

@ -310,17 +310,27 @@ def module_remove(name):
def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_level=None):
'''
Makes sure a SELinux policy for a given filespec (name),
filetype and SELinux context type is present.
.. versionadded:: 2017.7.0
name: filespec of the file or directory. Regex syntax is allowed.
sel_type: SELinux context type. There are many.
filetype: The SELinux filetype specification.
Use one of [a, f, d, c, b, s, l, p].
See also `man semanage-fcontext`.
Defaults to 'a' (all files)
sel_user: The SELinux user.
sel_level: The SELinux MLS range
Makes sure a SELinux policy for a given filespec (name), filetype
and SELinux context type is present.
name
filespec of the file or directory. Regex syntax is allowed.
sel_type
SELinux context type. There are many.
filetype
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
(all files).
sel_user
The SELinux user.
sel_level
The SELinux MLS range.
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
new_state = {}
@ -383,17 +393,27 @@ def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_lev
def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel_level=None):
'''
Makes sure an SELinux file context policy for a given filespec (name),
filetype and SELinux context type is absent.
.. versionadded:: 2017.7.0
name: filespec of the file or directory. Regex syntax is allowed.
filetype: The SELinux filetype specification.
Use one of [a, f, d, c, b, s, l, p].
See also `man semanage-fcontext`.
Defaults to 'a' (all files).
sel_type: The SELinux context type. There are many.
sel_user: The SELinux user.
sel_level: The SELinux MLS range
Makes sure an SELinux file context policy for a given filespec
(name), filetype and SELinux context type is absent.
name
filespec of the file or directory. Regex syntax is allowed.
filetype
The SELinux filetype specification. Use one of [a, f, d, c, b,
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
(all files).
sel_type
The SELinux context type. There are many.
sel_user
The SELinux user.
sel_level
The SELinux MLS range.
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
new_state = {}
@ -433,7 +453,10 @@ def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel
def fcontext_policy_applied(name, recursive=False):
'''
Checks and makes sure the SELinux policies for a given filespec are applied.
.. versionadded:: 2017.7.0
Checks and makes sure the SELinux policies for a given filespec are
applied.
'''
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}

View file

@ -84,10 +84,12 @@ def installed(name, updates=None):
Args:
name (str): The identifier of a single update to install.
name (str):
The identifier of a single update to install.
updates (list): A list of identifiers for updates to be installed.
Overrides ``name``. Default is None.
updates (list):
A list of identifiers for updates to be installed. Overrides
``name``. Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
@ -121,7 +123,7 @@ def installed(name, updates=None):
# Install multiple updates
install_updates:
wua.installed:
- name:
- updates:
- KB3194343
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
'''
@ -215,10 +217,12 @@ def removed(name, updates=None):
Args:
name (str): The identifier of a single update to uninstall.
name (str):
The identifier of a single update to uninstall.
updates (list): A list of identifiers for updates to be removed.
Overrides ``name``. Default is None.
updates (list):
A list of identifiers for updates to be removed. Overrides ``name``.
Default is None.
.. note:: Identifiers can be the GUID, the KB number, or any part of the
Title of the Microsoft update. GUIDs and KBs are the preferred method
@ -329,3 +333,172 @@ def removed(name, updates=None):
ret['comment'] = 'Updates removed successfully'
return ret
def uptodate(name,
software=True,
drivers=False,
skip_hidden=False,
skip_mandatory=False,
skip_reboot=True,
categories=None,
severities=None,):
'''
Ensure Microsoft Updates that match the passed criteria are installed.
Updates will be downloaded if needed.
This state allows you to update a system without specifying a specific
update to apply. All matching updates will be installed.
Args:
name (str):
The name has no functional value and is only used as a tracking
reference
software (bool):
Include software updates in the results (default is True)
drivers (bool):
Include driver updates in the results (default is False)
skip_hidden (bool):
Skip updates that have been hidden. Default is False.
skip_mandatory (bool):
Skip mandatory updates. Default is False.
skip_reboot (bool):
Skip updates that require a reboot. Default is True.
categories (list):
Specify the categories to list. Must be passed as a list. All
categories returned by default.
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list):
Specify the severities to include. Must be passed as a list. All
severities returned by default.
Severities include the following:
* Critical
* Important
Returns:
dict: A dictionary containing the results of the update
CLI Example:
.. code-block:: yaml
# Update the system using the state defaults
update_system:
wua.up_to_date
# Update the drivers
update_drivers:
wua.up_to_date:
- software: False
- drivers: True
- skip_reboot: False
# Apply all critical updates
update_critical:
wua.up_to_date:
- severities:
- Critical
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
wua = salt.utils.win_update.WindowsUpdateAgent()
available_updates = wua.available(
skip_hidden=skip_hidden, skip_installed=True,
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
software=software, drivers=drivers, categories=categories,
severities=severities)
# No updates found
if available_updates.count() == 0:
ret['comment'] = 'No updates found'
return ret
updates = list(available_updates.list().keys())
# Search for updates
install_list = wua.search(updates)
# List of updates to download
download = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.is_true(item.IsDownloaded):
download.updates.Add(item)
# List of updates to install
install = salt.utils.win_update.Updates()
for item in install_list.updates:
if not salt.utils.is_true(item.IsInstalled):
install.updates.Add(item)
# Return comment of changes if test.
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Updates will be installed:'
for update in install.updates:
ret['comment'] += '\n'
ret['comment'] += ': '.join(
[update.Identity.UpdateID, update.Title])
return ret
# Download updates
wua.download(download)
# Install updates
wua.install(install)
# Refresh windows update info
wua.refresh()
post_info = wua.updates().list()
# Verify the installation
for item in install.list():
if not salt.utils.is_true(post_info[item]['Installed']):
ret['changes']['failed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'KBs': post_info[item]['KBs']}
}
ret['result'] = False
else:
ret['changes']['installed'] = {
item: {'Title': post_info[item]['Title'][:40] + '...',
'NeedsReboot': post_info[item]['NeedsReboot'],
'KBs': post_info[item]['KBs']}
}
if ret['changes'].get('failed', False):
ret['comment'] = 'Updates failed'
else:
ret['comment'] = 'Updates installed successfully'
return ret

69
salt/tops/saltclass.py Normal file
View file

@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
'''
SaltClass master_tops Module
.. code-block:: yaml
master_tops:
saltclass:
path: /srv/saltclass
'''
# import python libs
from __future__ import absolute_import
import logging
import salt.utils.saltclass as sc
log = logging.getLogger(__name__)
def __virtual__():
'''
Only run if properly configured
'''
if __opts__['master_tops'].get('saltclass'):
return True
return False
def top(**kwargs):
'''
Node definitions path will be retrieved from __opts__ - or set to default -
then added to 'salt_data' dict that is passed to the 'get_tops' function.
'salt_data' dict is a convenient way to pass all the required datas to the function
It contains:
- __opts__
- empty __salt__
- __grains__
- empty __pillar__
- minion_id
- path
If successfull the function will return a top dict for minion_id
'''
# If path has not been set, make a default
_opts = __opts__['master_tops']['saltclass']
if 'path' not in _opts:
path = '/srv/saltclass'
log.warning('path variable unset, using default: {0}'.format(path))
else:
path = _opts['path']
# Create a dict that will contain our salt objects
# to send to get_tops function
if 'id' not in kwargs['opts']:
log.warning('Minion id not found - Returning empty dict')
return {}
else:
minion_id = kwargs['opts']['id']
salt_data = {
'__opts__': kwargs['opts'],
'__salt__': {},
'__grains__': kwargs['grains'],
'__pillar__': {},
'minion_id': minion_id,
'path': path
}
return sc.get_tops(minion_id, salt_data)

View file

@ -217,7 +217,7 @@ class RecursiveDictDiffer(DictDiffer):
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in diff_dict.keys():
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
@ -267,7 +267,7 @@ class RecursiveDictDiffer(DictDiffer):
keys.append('{0}{1}'.format(prefix, key))
return keys
return _added(self._diffs, prefix='')
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
@ -290,7 +290,7 @@ class RecursiveDictDiffer(DictDiffer):
prefix='{0}{1}.'.format(prefix, key)))
return keys
return _removed(self._diffs, prefix='')
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
@ -338,7 +338,7 @@ class RecursiveDictDiffer(DictDiffer):
return keys
return _changed(self._diffs, prefix='')
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
@ -363,7 +363,7 @@ class RecursiveDictDiffer(DictDiffer):
prefix='{0}{1}.'.format(prefix, key)))
return keys
return _unchanged(self.current_dict, self._diffs, prefix='')
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):

View file

@ -485,6 +485,8 @@ def safe_filename_leaf(file_basename):
windows is \\ / : * ? " < > | posix is /
.. versionadded:: 2017.7.2
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
def _replace(re_obj):
return urllib.quote(re_obj.group(0), safe=u'')
@ -497,19 +499,27 @@ def safe_filename_leaf(file_basename):
return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE)
def safe_filepath(file_path_name):
def safe_filepath(file_path_name, dir_sep=None):
'''
Input the full path and filename, splits on directory separator and calls safe_filename_leaf for
each part of the path.
each part of the path. dir_sep allows coder to force a directory separate to a particular character
.. versionadded:: 2017.7.2
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
if not dir_sep:
dir_sep = os.sep
# Normally if file_path_name or dir_sep is Unicode then the output will be Unicode
# This code ensure the output type is the same as file_path_name
if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type):
dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3
# splitdrive only set drive on windows platform
(drive, path) = os.path.splitdrive(file_path_name)
path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)])
path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)])
if drive:
return os.sep.join([drive, path])
else:
return path
path = dir_sep.join([drive, path])
return path
@jinja_filter('is_text_file')

View file

@ -966,6 +966,31 @@ class CkMinions(object):
auth_list.append(matcher)
return auth_list
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
'''
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
'''
if auth_list is None:
auth_list = []
if permissive is None:
permissive = self.opts.get('permissive_acl')
name_matched = False
for match in auth_provider:
if match == '*' and not permissive:
continue
if match.endswith('%'):
if match.rstrip('%') in groups:
auth_list.extend(auth_provider[match])
else:
if salt.utils.expr_match(match, name):
name_matched = True
auth_list.extend(auth_provider[match])
if not permissive and not name_matched and '*' in auth_provider:
auth_list.extend(auth_provider['*'])
return auth_list
def wheel_check(self, auth_list, fun, args):
'''
Check special API permissions
@ -982,6 +1007,8 @@ class CkMinions(object):
'''
Check special API permissions
'''
if not auth_list:
return False
if form != 'cloud':
comps = fun.split('.')
if len(comps) != 2:

View file

@ -968,7 +968,14 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)):
# We've loaded and merged options into the configuration, it's safe
# to query about the pidfile
if self.check_pidfile():
os.unlink(self.config['pidfile'])
try:
os.unlink(self.config['pidfile'])
except OSError as err:
self.info(
'PIDfile could not be deleted: {0}'.format(
self.config['pidfile']
)
)
def set_pidfile(self):
from salt.utils.process import set_pidfile

329
salt/utils/pbm.py Normal file
View file

@ -0,0 +1,329 @@
# -*- coding: utf-8 -*-
'''
Library for VMware Storage Policy management (via the pbm endpoint)
This library is used to manage the various policies available in VMware
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstaley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
'''
# Import Python Libs
from __future__ import absolute_import
import logging
# Import Salt Libs
import salt.utils.vmware
from salt.exceptions import VMwareApiError, VMwareRuntimeError, \
VMwareObjectRetrievalError
try:
from pyVmomi import pbm, vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
else:
return False, 'Missing dependency: The salt.utils.pbm module ' \
'requires the pyvmomi library'
def get_profile_manager(service_instance):
'''
Returns a profile manager
service_instance
Service instance to the host or vCenter
'''
stub = salt.utils.vmware.get_new_service_instance_stub(
service_instance, ns='pbm/2.0', path='/pbm/sdk')
pbm_si = pbm.ServiceInstance('ServiceInstance', stub)
try:
profile_manager = pbm_si.RetrieveContent().profileManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
return profile_manager
def get_placement_solver(service_instance):
'''
Returns a placement solver
service_instance
Service instance to the host or vCenter
'''
stub = salt.utils.vmware.get_new_service_instance_stub(
service_instance, ns='pbm/2.0', path='/pbm/sdk')
pbm_si = pbm.ServiceInstance('ServiceInstance', stub)
try:
profile_manager = pbm_si.RetrieveContent().placementSolver
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
return profile_manager
def get_capability_definitions(profile_manager):
'''
Returns a list of all capability definitions.
profile_manager
Reference to the profile manager.
'''
res_type = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
try:
cap_categories = profile_manager.FetchCapabilityMetadata(res_type)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
cap_definitions = []
for cat in cap_categories:
cap_definitions.extend(cat.capabilityMetadata)
return cap_definitions
def get_policies_by_id(profile_manager, policy_ids):
'''
Returns a list of policies with the specified ids.
profile_manager
Reference to the profile manager.
policy_ids
List of policy ids to retrieve.
'''
try:
return profile_manager.RetrieveContent(policy_ids)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
def get_storage_policies(profile_manager, policy_names=None,
get_all_policies=False):
'''
Returns a list of the storage policies, filtered by name.
profile_manager
Reference to the profile manager.
policy_names
List of policy names to filter by.
Default is None.
get_all_policies
Flag specifying to return all policies, regardless of the specified
filter.
'''
res_type = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
try:
policy_ids = profile_manager.QueryProfile(res_type)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
log.trace('policy_ids = {0}'.format(policy_ids))
# More policies are returned so we need to filter again
policies = [p for p in get_policies_by_id(profile_manager, policy_ids)
if p.resourceType.resourceType ==
pbm.profile.ResourceTypeEnum.STORAGE]
if get_all_policies:
return policies
if not policy_names:
policy_names = []
return [p for p in policies if p.name in policy_names]
def create_storage_policy(profile_manager, policy_spec):
'''
Creates a storage policy.
profile_manager
Reference to the profile manager.
policy_spec
Policy update spec.
'''
try:
profile_manager.Create(policy_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
def update_storage_policy(profile_manager, policy, policy_spec):
'''
Updates a storage policy.
profile_manager
Reference to the profile manager.
policy
Reference to the policy to be updated.
policy_spec
Policy update spec.
'''
try:
profile_manager.Update(policy.profileId, policy_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
def get_default_storage_policy_of_datastore(profile_manager, datastore):
'''
Returns the default storage policy reference assigned to a datastore.
profile_manager
Reference to the profile manager.
datastore
Reference to the datastore.
'''
# Retrieve all datastores visible
hub = pbm.placement.PlacementHub(
hubId=datastore._moId, hubType='Datastore')
log.trace('placement_hub = {0}'.format(hub))
try:
policy_id = profile_manager.QueryDefaultRequirementProfile(hub)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
policy_refs = get_policies_by_id(profile_manager, [policy_id])
if not policy_refs:
raise VMwareObjectRetrievalError('Storage policy with id \'{0}\' was '
'not found'.format(policy_id))
return policy_refs[0]
def assign_default_storage_policy_to_datastore(profile_manager, policy,
datastore):
'''
Assigns a storage policy as the default policy to a datastore.
profile_manager
Reference to the profile manager.
policy
Reference to the policy to assigned.
datastore
Reference to the datastore.
'''
placement_hub = pbm.placement.PlacementHub(
hubId=datastore._moId, hubType='Datastore')
log.trace('placement_hub = {0}'.format(placement_hub))
try:
profile_manager.AssignDefaultRequirementProfile(policy.profileId,
[placement_hub])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)

View file

@ -7,6 +7,7 @@ import glob
import logging
# Import salt libs
import salt.client
import salt.runner
import salt.state
import salt.utils
@ -14,6 +15,7 @@ import salt.utils.cache
import salt.utils.event
import salt.utils.files
import salt.utils.process
import salt.wheel
import salt.defaults.exitcodes
# Import 3rd-party libs
@ -22,6 +24,15 @@ from salt.ext import six
log = logging.getLogger(__name__)
REACTOR_INTERNAL_KEYWORDS = frozenset([
'__id__',
'__sls__',
'name',
'order',
'fun',
'state',
])
class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.state.Compiler):
'''
@ -30,6 +41,10 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
The reactor has the capability to execute pre-programmed executions
as reactions to events
'''
aliases = {
'cmd': 'local',
}
def __init__(self, opts, log_queue=None):
super(Reactor, self).__init__(log_queue=log_queue)
local_minion_opts = opts.copy()
@ -172,6 +187,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
return {'status': False, 'comment': 'Reactor does not exists.'}
def resolve_aliases(self, chunks):
'''
Preserve backward compatibility by rewriting the 'state' key in the low
chunks if it is using a legacy type.
'''
for idx, _ in enumerate(chunks):
new_state = self.aliases.get(chunks[idx]['state'])
if new_state is not None:
chunks[idx]['state'] = new_state
def reactions(self, tag, data, reactors):
'''
Render a list of reactor files and returns a reaction struct
@ -192,6 +217,7 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
except Exception as exc:
log.error('Exception trying to compile reactions: {0}'.format(exc), exc_info=True)
self.resolve_aliases(chunks)
return chunks
def call_reactions(self, chunks):
@ -249,12 +275,19 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
class ReactWrap(object):
'''
Create a wrapper that executes low data for the reaction system
Wrapper that executes low data for the Reactor System
'''
# class-wide cache of clients
client_cache = None
event_user = 'Reactor'
reaction_class = {
'local': salt.client.LocalClient,
'runner': salt.runner.RunnerClient,
'wheel': salt.wheel.Wheel,
'caller': salt.client.Caller,
}
def __init__(self, opts):
self.opts = opts
if ReactWrap.client_cache is None:
@ -265,21 +298,49 @@ class ReactWrap(object):
queue_size=self.opts['reactor_worker_hwm'] # queue size for those workers
)
def populate_client_cache(self, low):
'''
Populate the client cache with an instance of the specified type
'''
reaction_type = low['state']
if reaction_type not in self.client_cache:
log.debug('Reactor is populating %s client cache', reaction_type)
if reaction_type in ('runner', 'wheel'):
# Reaction types that run locally on the master want the full
# opts passed.
self.client_cache[reaction_type] = \
self.reaction_class[reaction_type](self.opts)
# The len() function will cause the module functions to load if
# they aren't already loaded. We want to load them so that the
# spawned threads don't need to load them. Loading in the
# spawned threads creates race conditions such as sometimes not
# finding the required function because another thread is in
# the middle of loading the functions.
len(self.client_cache[reaction_type].functions)
else:
# Reactions which use remote pubs only need the conf file when
# instantiating a client instance.
self.client_cache[reaction_type] = \
self.reaction_class[reaction_type](self.opts['conf_file'])
def run(self, low):
'''
Execute the specified function in the specified state by passing the
low data
Execute a reaction by invoking the proper wrapper func
'''
l_fun = getattr(self, low['state'])
self.populate_client_cache(low)
try:
f_call = salt.utils.format_call(l_fun, low)
kwargs = f_call.get('kwargs', {})
if 'arg' not in kwargs:
kwargs['arg'] = []
if 'kwarg' not in kwargs:
kwargs['kwarg'] = {}
l_fun = getattr(self, low['state'])
except AttributeError:
log.error(
'ReactWrap is missing a wrapper function for \'%s\'',
low['state']
)
# TODO: Setting the user doesn't seem to work for actual remote publishes
try:
wrap_call = salt.utils.format_call(l_fun, low)
args = wrap_call.get('args', ())
kwargs = wrap_call.get('kwargs', {})
# TODO: Setting user doesn't seem to work for actual remote pubs
if low['state'] in ('runner', 'wheel'):
# Update called function's low data with event user to
# segregate events fired by reactor and avoid reaction loops
@ -287,81 +348,106 @@ class ReactWrap(object):
# Replace ``state`` kwarg which comes from high data compiler.
# It breaks some runner functions and seems unnecessary.
kwargs['__state__'] = kwargs.pop('state')
# NOTE: if any additional keys are added here, they will also
# need to be added to filter_kwargs()
l_fun(*f_call.get('args', ()), **kwargs)
if 'args' in kwargs:
# New configuration
reactor_args = kwargs.pop('args')
for item in ('arg', 'kwarg'):
if item in low:
log.warning(
'Reactor \'%s\' is ignoring \'%s\' param %s due to '
'presence of \'args\' param. Check the Reactor System '
'documentation for the correct argument format.',
low['__id__'], item, low[item]
)
if low['state'] == 'caller' \
and isinstance(reactor_args, list) \
and not salt.utils.is_dictlist(reactor_args):
# Legacy 'caller' reactors were already using the 'args'
# param, but only supported a list of positional arguments.
# If low['args'] is a list but is *not* a dictlist, then
# this is actually using the legacy configuration. So, put
# the reactor args into kwarg['arg'] so that the wrapper
# interprets them as positional args.
kwargs['arg'] = reactor_args
kwargs['kwarg'] = {}
else:
kwargs['arg'] = ()
kwargs['kwarg'] = reactor_args
if not isinstance(kwargs['kwarg'], dict):
kwargs['kwarg'] = salt.utils.repack_dictlist(kwargs['kwarg'])
if not kwargs['kwarg']:
log.error(
'Reactor \'%s\' failed to execute %s \'%s\': '
'Incorrect argument format, check the Reactor System '
'documentation for the correct format.',
low['__id__'], low['state'], low['fun']
)
return
else:
# Legacy configuration
react_call = {}
if low['state'] in ('runner', 'wheel'):
if 'arg' not in kwargs or 'kwarg' not in kwargs:
# Runner/wheel execute on the master, so we can use
# format_call to get the functions args/kwargs
react_fun = self.client_cache[low['state']].functions.get(low['fun'])
if react_fun is None:
log.error(
'Reactor \'%s\' failed to execute %s \'%s\': '
'function not available',
low['__id__'], low['state'], low['fun']
)
return
react_call = salt.utils.format_call(
react_fun,
low,
expected_extra_kws=REACTOR_INTERNAL_KEYWORDS
)
if 'arg' not in kwargs:
kwargs['arg'] = react_call.get('args', ())
if 'kwarg' not in kwargs:
kwargs['kwarg'] = react_call.get('kwargs', {})
# Execute the wrapper with the proper args/kwargs. kwargs['arg']
# and kwargs['kwarg'] contain the positional and keyword arguments
# that will be passed to the client interface to execute the
# desired runner/wheel/remote-exec/etc. function.
l_fun(*args, **kwargs)
except SystemExit:
log.warning(
'Reactor \'%s\' attempted to exit. Ignored.', low['__id__']
)
except Exception:
log.error(
'Failed to execute {0}: {1}\n'.format(low['state'], l_fun),
exc_info=True
)
def local(self, *args, **kwargs):
'''
Wrap LocalClient for running :ref:`execution modules <all-salt.modules>`
'''
if 'local' not in self.client_cache:
self.client_cache['local'] = salt.client.LocalClient(self.opts['conf_file'])
try:
self.client_cache['local'].cmd_async(*args, **kwargs)
except SystemExit:
log.warning('Attempt to exit reactor. Ignored.')
except Exception as exc:
log.warning('Exception caught by reactor: {0}'.format(exc))
cmd = local
'Reactor \'%s\' failed to execute %s \'%s\'',
low['__id__'], low['state'], low['fun'], exc_info=True
)
def runner(self, fun, **kwargs):
'''
Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`
'''
if 'runner' not in self.client_cache:
self.client_cache['runner'] = salt.runner.RunnerClient(self.opts)
# The len() function will cause the module functions to load if
# they aren't already loaded. We want to load them so that the
# spawned threads don't need to load them. Loading in the spawned
# threads creates race conditions such as sometimes not finding
# the required function because another thread is in the middle
# of loading the functions.
len(self.client_cache['runner'].functions)
try:
self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs))
except SystemExit:
log.warning('Attempt to exit in reactor by runner. Ignored')
except Exception as exc:
log.warning('Exception caught by reactor: {0}'.format(exc))
self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs))
def wheel(self, fun, **kwargs):
'''
Wrap Wheel to enable executing :ref:`wheel modules <all-salt.wheel>`
'''
if 'wheel' not in self.client_cache:
self.client_cache['wheel'] = salt.wheel.Wheel(self.opts)
# The len() function will cause the module functions to load if
# they aren't already loaded. We want to load them so that the
# spawned threads don't need to load them. Loading in the spawned
# threads creates race conditions such as sometimes not finding
# the required function because another thread is in the middle
# of loading the functions.
len(self.client_cache['wheel'].functions)
try:
self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs))
except SystemExit:
log.warning('Attempt to in reactor by whell. Ignored.')
except Exception as exc:
log.warning('Exception caught by reactor: {0}'.format(exc))
self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs))
def caller(self, fun, *args, **kwargs):
def local(self, fun, tgt, **kwargs):
'''
Wrap Caller to enable executing :ref:`caller modules <all-salt.caller>`
Wrap LocalClient for running :ref:`execution modules <all-salt.modules>`
'''
log.debug("in caller with fun {0} args {1} kwargs {2}".format(fun, args, kwargs))
args = kwargs.get('args', [])
kwargs = kwargs.get('kwargs', {})
if 'caller' not in self.client_cache:
self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file'])
try:
self.client_cache['caller'].cmd(fun, *args, **kwargs)
except SystemExit:
log.warning('Attempt to exit reactor. Ignored.')
except Exception as exc:
log.warning('Exception caught by reactor: {0}'.format(exc))
self.client_cache['local'].cmd_async(tgt, fun, **kwargs)
def caller(self, fun, **kwargs):
'''
Wrap LocalCaller to execute remote exec functions locally on the Minion
'''
self.client_cache['caller'].cmd(fun, *kwargs['arg'], **kwargs['kwarg'])

296
salt/utils/saltclass.py Normal file
View file

@ -0,0 +1,296 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import re
import logging
from salt.ext.six import iteritems
import yaml
from jinja2 import FileSystemLoader, Environment
log = logging.getLogger(__name__)
# Renders jinja from a template file
def render_jinja(_file, salt_data):
j_env = Environment(loader=FileSystemLoader(os.path.dirname(_file)))
j_env.globals.update({
'__opts__': salt_data['__opts__'],
'__salt__': salt_data['__salt__'],
'__grains__': salt_data['__grains__'],
'__pillar__': salt_data['__pillar__'],
'minion_id': salt_data['minion_id'],
})
j_render = j_env.get_template(os.path.basename(_file)).render()
return j_render
# Renders yaml from rendered jinja
def render_yaml(_file, salt_data):
return yaml.safe_load(render_jinja(_file, salt_data))
# Returns a dict from a class yaml definition
def get_class(_class, salt_data):
l_files = []
saltclass_path = salt_data['path']
straight = '{0}/classes/{1}.yml'.format(saltclass_path, _class)
sub_straight = '{0}/classes/{1}.yml'.format(saltclass_path,
_class.replace('.', '/'))
sub_init = '{0}/classes/{1}/init.yml'.format(saltclass_path,
_class.replace('.', '/'))
for root, dirs, files in os.walk('{0}/classes'.format(saltclass_path)):
for l_file in files:
l_files.append('{0}/{1}'.format(root, l_file))
if straight in l_files:
return render_yaml(straight, salt_data)
if sub_straight in l_files:
return render_yaml(sub_straight, salt_data)
if sub_init in l_files:
return render_yaml(sub_init, salt_data)
log.warning('{0}: Class definition not found'.format(_class))
return {}
# Return environment
def get_env_from_dict(exp_dict_list):
environment = ''
for s_class in exp_dict_list:
if 'environment' in s_class:
environment = s_class['environment']
return environment
# Merge dict b into a
def dict_merge(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], list) and isinstance(b[key], list):
if b[key][0] == '^':
b[key].pop(0)
a[key] = b[key]
else:
a[key].extend(b[key])
elif isinstance(a[key], dict) and isinstance(b[key], dict):
dict_merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
# Recursive search and replace in a dict
def dict_search_and_replace(d, old, new, expanded):
for (k, v) in iteritems(d):
if isinstance(v, dict):
dict_search_and_replace(d[k], old, new, expanded)
if v == old:
d[k] = new
return d
# Retrieve original value from ${xx:yy:zz} to be expanded
def find_value_to_expand(x, v):
a = x
for i in v[2:-1].split(':'):
if i in a:
a = a.get(i)
else:
a = v
return a
return a
# Return a dict that contains expanded variables if found
def expand_variables(a, b, expanded, path=None):
if path is None:
b = a.copy()
path = []
for (k, v) in iteritems(a):
if isinstance(v, dict):
expand_variables(v, b, expanded, path + [str(k)])
else:
if isinstance(v, str):
vre = re.search(r'(^|.)\$\{.*?\}', v)
if vre:
re_v = vre.group(0)
if re_v.startswith('\\'):
v_new = v.replace(re_v, re_v.lstrip('\\'))
b = dict_search_and_replace(b, v, v_new, expanded)
expanded.append(k)
elif not re_v.startswith('$'):
v_expanded = find_value_to_expand(b, re_v[1:])
v_new = v.replace(re_v[1:], v_expanded)
b = dict_search_and_replace(b, v, v_new, expanded)
expanded.append(k)
else:
v_expanded = find_value_to_expand(b, re_v)
b = dict_search_and_replace(b, v, v_expanded, expanded)
expanded.append(k)
return b
def expand_classes_in_order(minion_dict,
salt_data,
seen_classes,
expanded_classes,
classes_to_expand):
# Get classes to expand from minion dictionnary
if not classes_to_expand and 'classes' in minion_dict:
classes_to_expand = minion_dict['classes']
# Now loop on list to recursively expand them
for klass in classes_to_expand:
if klass not in seen_classes:
seen_classes.append(klass)
expanded_classes[klass] = get_class(klass, salt_data)
# Fix corner case where class is loaded but doesn't contain anything
if expanded_classes[klass] is None:
expanded_classes[klass] = {}
# Now replace class element in classes_to_expand by expansion
if 'classes' in expanded_classes[klass]:
l_id = classes_to_expand.index(klass)
classes_to_expand[l_id:l_id] = expanded_classes[klass]['classes']
expand_classes_in_order(minion_dict,
salt_data,
seen_classes,
expanded_classes,
classes_to_expand)
else:
expand_classes_in_order(minion_dict,
salt_data,
seen_classes,
expanded_classes,
classes_to_expand)
# We may have duplicates here and we want to remove them
tmp = []
for t_element in classes_to_expand:
if t_element not in tmp:
tmp.append(t_element)
classes_to_expand = tmp
# Now that we've retrieved every class in order,
# let's return an ordered list of dicts
ord_expanded_classes = []
ord_expanded_states = []
for ord_klass in classes_to_expand:
ord_expanded_classes.append(expanded_classes[ord_klass])
# And be smart and sort out states list
# Address the corner case where states is empty in a class definition
if 'states' in expanded_classes[ord_klass] and expanded_classes[ord_klass]['states'] is None:
expanded_classes[ord_klass]['states'] = {}
if 'states' in expanded_classes[ord_klass]:
ord_expanded_states.extend(expanded_classes[ord_klass]['states'])
# Add our minion dict as final element but check if we have states to process
if 'states' in minion_dict and minion_dict['states'] is None:
minion_dict['states'] = []
if 'states' in minion_dict:
ord_expanded_states.extend(minion_dict['states'])
ord_expanded_classes.append(minion_dict)
return ord_expanded_classes, classes_to_expand, ord_expanded_states
def expanded_dict_from_minion(minion_id, salt_data):
_file = ''
saltclass_path = salt_data['path']
# Start
for root, dirs, files in os.walk('{0}/nodes'.format(saltclass_path)):
for minion_file in files:
if minion_file == '{0}.yml'.format(minion_id):
_file = os.path.join(root, minion_file)
# Load the minion_id definition if existing, else an exmpty dict
node_dict = {}
if _file:
node_dict[minion_id] = render_yaml(_file, salt_data)
else:
log.warning('{0}: Node definition not found'.format(minion_id))
node_dict[minion_id] = {}
# Get 2 ordered lists:
# expanded_classes: A list of all the dicts
# classes_list: List of all the classes
expanded_classes, classes_list, states_list = expand_classes_in_order(
node_dict[minion_id],
salt_data, [], {}, [])
# Here merge the pillars together
pillars_dict = {}
for exp_dict in expanded_classes:
if 'pillars' in exp_dict:
dict_merge(pillars_dict, exp_dict)
return expanded_classes, pillars_dict, classes_list, states_list
def get_pillars(minion_id, salt_data):
# Get 2 dicts and 2 lists
# expanded_classes: Full list of expanded dicts
# pillars_dict: dict containing merged pillars in order
# classes_list: All classes processed in order
# states_list: All states listed in order
(expanded_classes,
pillars_dict,
classes_list,
states_list) = expanded_dict_from_minion(minion_id, salt_data)
# Retrieve environment
environment = get_env_from_dict(expanded_classes)
# Expand ${} variables in merged dict
# pillars key shouldn't exist if we haven't found any minion_id ref
if 'pillars' in pillars_dict:
pillars_dict_expanded = expand_variables(pillars_dict['pillars'], {}, [])
else:
pillars_dict_expanded = expand_variables({}, {}, [])
# Build the final pillars dict
pillars_dict = {}
pillars_dict['__saltclass__'] = {}
pillars_dict['__saltclass__']['states'] = states_list
pillars_dict['__saltclass__']['classes'] = classes_list
pillars_dict['__saltclass__']['environment'] = environment
pillars_dict['__saltclass__']['nodename'] = minion_id
pillars_dict.update(pillars_dict_expanded)
return pillars_dict
def get_tops(minion_id, salt_data):
# Get 2 dicts and 2 lists
# expanded_classes: Full list of expanded dicts
# pillars_dict: dict containing merged pillars in order
# classes_list: All classes processed in order
# states_list: All states listed in order
(expanded_classes,
pillars_dict,
classes_list,
states_list) = expanded_dict_from_minion(minion_id, salt_data)
# Retrieve environment
environment = get_env_from_dict(expanded_classes)
# Build final top dict
tops_dict = {}
tops_dict[environment] = states_list
return tops_dict

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more