mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
merge upstream develop
This commit is contained in:
parent
5de8f9ce3e
commit
52acfd980d
352 changed files with 22456 additions and 4369 deletions
60
.github/CODEOWNERS
vendored
Normal file
60
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
# SALTSTACK CODE OWNERS
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about the CODEOWNERS file
|
||||
|
||||
# Team Boto
|
||||
salt/**/*boto* @saltstack/team-boto
|
||||
|
||||
# Team Core
|
||||
salt/auth/ @saltstack/team-core
|
||||
salt/cache/ @saltstack/team-core
|
||||
salt/cli/ @saltstack/team-core
|
||||
salt/client/* @saltstack/team-core
|
||||
salt/config/* @saltstack/team-core
|
||||
salt/daemons/ @saltstack/team-core
|
||||
salt/pillar/ @saltstack/team-core
|
||||
salt/loader.py @saltstack/team-core
|
||||
salt/payload.py @saltstack/team-core
|
||||
salt/**/master* @saltstack/team-core
|
||||
salt/**/minion* @saltstack/team-core
|
||||
|
||||
# Team Cloud
|
||||
salt/cloud/ @saltstack/team-cloud
|
||||
salt/utils/openstack/ @saltstack/team-cloud
|
||||
salt/utils/aws.py @saltstack/team-cloud
|
||||
salt/**/*cloud* @saltstack/team-cloud
|
||||
|
||||
# Team NetAPI
|
||||
salt/cli/api.py @saltstack/team-netapi
|
||||
salt/client/netapi.py @saltstack/team-netapi
|
||||
salt/netapi/ @saltstack/team-netapi
|
||||
|
||||
# Team Network
|
||||
salt/proxy/ @saltstack/team-proxy
|
||||
|
||||
# Team SPM
|
||||
salt/cli/spm.py @saltstack/team-spm
|
||||
salt/spm/ @saltstack/team-spm
|
||||
|
||||
# Team SSH
|
||||
salt/cli/ssh.py @saltstack/team-ssh
|
||||
salt/client/ssh/ @saltstack/team-ssh
|
||||
salt/runners/ssh.py @saltstack/team-ssh
|
||||
salt/**/thin.py @saltstack/team-ssh
|
||||
|
||||
# Team State
|
||||
salt/state.py @saltstack/team-state
|
||||
|
||||
# Team Transport
|
||||
salt/transport/ @saltstack/team-transport
|
||||
salt/utils/zeromq.py @saltstack/team-transport
|
||||
|
||||
# Team Windows
|
||||
salt/**/*win* @saltstack/team-windows
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
|
@ -1,8 +1,8 @@
|
|||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1075 is approximately 2 years and 11 months
|
||||
daysUntilStale: 1075
|
||||
# 1000 is approximately 2 years and 9 months
|
||||
daysUntilStale: 1000
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# directory is identical.
|
||||
|
||||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#my-digitalocean-config:
|
||||
# driver: digital_ocean
|
||||
# driver: digitalocean
|
||||
# client_key: wFGEwgregeqw3435gDger
|
||||
# api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
# location: New York 1
|
||||
|
|
|
@ -373,7 +373,7 @@
|
|||
# interface: eth0
|
||||
# cidr: '10.0.0.0/8'
|
||||
|
||||
# The number of seconds a mine update runs.
|
||||
# The number of minutes between mine updates.
|
||||
#mine_interval: 60
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
|
|
|
@ -10795,6 +10795,7 @@ cmd_whitelist_glob:
|
|||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS Thread Settings
|
||||
.SS \fBmultiprocessing\fP
|
||||
.sp
|
||||
Default: \fBTrue\fP
|
||||
.sp
|
||||
|
|
|
@ -22,6 +22,7 @@ beacon modules
|
|||
load
|
||||
log
|
||||
memusage
|
||||
napalm_beacon
|
||||
network_info
|
||||
network_settings
|
||||
pkg
|
||||
|
|
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
6
doc/ref/beacons/all/salt.beacons.napalm_beacon.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
==========================
|
||||
salt.beacons.napalm_beacon
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.beacons.napalm_beacon
|
||||
:members:
|
|
@ -136,7 +136,7 @@ Query Options
|
|||
.. versionadded:: 2014.7.0
|
||||
|
||||
Display a list of configured profiles. Pass in a cloud provider to view
|
||||
the provider's associated profiles, such as ``digital_ocean``, or pass in
|
||||
the provider's associated profiles, such as ``digitalocean``, or pass in
|
||||
``all`` to list all the configured profiles.
|
||||
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Full list of Salt Cloud modules
|
|||
aliyun
|
||||
azurearm
|
||||
cloudstack
|
||||
digital_ocean
|
||||
digitalocean
|
||||
dimensiondata
|
||||
ec2
|
||||
gce
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
===============================
|
||||
salt.cloud.clouds.digital_ocean
|
||||
salt.cloud.clouds.digitalocean
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.digital_ocean
|
||||
.. automodule:: salt.cloud.clouds.digitalocean
|
||||
:members:
|
|
@ -4175,7 +4175,9 @@ information.
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
reactor: []
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- salt://reactor/startup_tasks.sls
|
||||
|
||||
.. conf_master:: reactor_refresh_interval
|
||||
|
||||
|
|
|
@ -706,7 +706,7 @@ Note these can be defined in the pillar for a minion as well.
|
|||
|
||||
Default: ``60``
|
||||
|
||||
The number of seconds a mine update runs.
|
||||
The number of minutes between mine updates.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -2113,6 +2113,41 @@ It will be interpreted as megabytes.
|
|||
|
||||
file_recv_max_size: 100
|
||||
|
||||
.. conf_minion:: pass_to_ext_pillars
|
||||
|
||||
``pass_to_ext_pillars``
|
||||
-----------------------
|
||||
|
||||
Specify a list of configuration keys whose values are to be passed to
|
||||
external pillar functions.
|
||||
|
||||
Suboptions can be specified using the ':' notation (i.e. ``option:suboption``)
|
||||
|
||||
The values are merged and included in the ``extra_minion_data`` optional
|
||||
parameter of the external pillar function. The ``extra_minion_data`` parameter
|
||||
is passed only to the external pillar functions that have it explicitly
|
||||
specified in their definition.
|
||||
|
||||
If the config contains
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opt1: value1
|
||||
opt2:
|
||||
subopt1: value2
|
||||
subopt2: value3
|
||||
|
||||
pass_to_ext_pillars:
|
||||
- opt1
|
||||
- opt2: subopt1
|
||||
|
||||
the ``extra_minion_data`` parameter will be
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'opt1': 'value1',
|
||||
'opt2': {'subopt1': 'value2'}}
|
||||
|
||||
Security Settings
|
||||
=================
|
||||
|
||||
|
@ -2369,11 +2404,14 @@ Thread Settings
|
|||
|
||||
.. conf_minion:: multiprocessing
|
||||
|
||||
``multiprocessing``
|
||||
-------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If `multiprocessing` is enabled when a minion receives a
|
||||
If ``multiprocessing`` is enabled when a minion receives a
|
||||
publication a new process is spawned and the command is executed therein.
|
||||
Conversely, if `multiprocessing` is disabled the new publication will be run
|
||||
Conversely, if ``multiprocessing`` is disabled the new publication will be run
|
||||
executed in a thread.
|
||||
|
||||
|
||||
|
|
|
@ -118,3 +118,53 @@ has to be closed after every command.
|
|||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
||||
|
||||
``proxy_merge_pillar_in_opts``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Wheter the pillar data to be merged into the proxy configuration options.
|
||||
As multiple proxies can run on the same server, we may need different
|
||||
configuration options for each, while there's one single configuration file.
|
||||
The solution is merging the pillar data of each proxy minion into the opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_pillar_in_opts: True
|
||||
|
||||
``proxy_deep_merge_pillar_in_opts``
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Deep merge of pillar data into configuration opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_merge_pillar_in_opts_strategy``
|
||||
---------------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``smart``.
|
||||
|
||||
The strategy used when merging pillar configuration into opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_mines_pillar``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``True``.
|
||||
|
||||
Allow enabling mine details using pillar data. This evaluates the mine
|
||||
configuration under the pillar, for the following regular minion options that
|
||||
are also equally available on the proxy minion: :conf_minion:`mine_interval`,
|
||||
and :conf_minion:`mine_functions`.
|
||||
|
|
|
@ -44,6 +44,7 @@ execution modules
|
|||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch
|
||||
boto_cloudwatch_event
|
||||
|
@ -326,6 +327,7 @@ execution modules
|
|||
ps
|
||||
publish
|
||||
puppet
|
||||
purefa
|
||||
pushbullet
|
||||
pushover_notify
|
||||
pw_group
|
||||
|
@ -417,6 +419,7 @@ execution modules
|
|||
test
|
||||
testinframod
|
||||
test_virtual
|
||||
textfsm_mod
|
||||
timezone
|
||||
tls
|
||||
tomcat
|
||||
|
|
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
============================
|
||||
salt.modules.boto_cloudfront
|
||||
============================
|
||||
|
||||
.. automodule:: salt.modules.boto_cloudfront
|
||||
:members:
|
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
6
doc/ref/modules/all/salt.modules.purefa.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
===================
|
||||
salt.modules.purefa
|
||||
===================
|
||||
|
||||
.. automodule:: salt.modules.purefa
|
||||
:members:
|
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
|
@ -0,0 +1,5 @@
|
|||
salt.modules.textfsm_mod module
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.modules.textfsm_mod
|
||||
:members:
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.digicertapi module
|
||||
===============================
|
||||
salt.runners.digicertapi
|
||||
========================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
salt.runners.mattermost module
|
||||
==============================
|
||||
salt.runners.mattermost
|
||||
=======================
|
||||
|
||||
**Note for 2017.7 releases!**
|
||||
|
||||
Due to the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module not being available in this release series, importing the `salt.runners.config <https://github.com/saltstack/salt/blob/develop/salt/runners/config.py>`_ module from the develop branch is required to make this module work.
|
||||
|
||||
Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 <https://github.com/saltstack/salt/issues/43479>`_
|
||||
|
||||
.. automodule:: salt.runners.mattermost
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.vault module
|
||||
=========================
|
||||
salt.runners.vault
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.vault
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.venafiapi module
|
||||
=============================
|
||||
salt.runners.venafiapi
|
||||
======================
|
||||
|
||||
.. automodule:: salt.runners.venafiapi
|
||||
:members:
|
||||
|
|
|
@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f
|
|||
for chunk in chunks:
|
||||
# The state runtime uses "tags" to track completed jobs, it may
|
||||
# look familiar with the _|-
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
|
|
@ -31,6 +31,7 @@ state modules
|
|||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch_alarm
|
||||
boto_cloudwatch_event
|
||||
|
@ -179,6 +180,7 @@ state modules
|
|||
netusers
|
||||
network
|
||||
netyang
|
||||
nfs_export
|
||||
nftables
|
||||
npm
|
||||
ntp
|
||||
|
|
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
===========================
|
||||
salt.states.boto_cloudfront
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.states.boto_cloudfront
|
||||
:members:
|
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
======================
|
||||
salt.states.nfs_export
|
||||
======================
|
||||
|
||||
.. automodule:: salt.states.nfs_export
|
||||
:members:
|
|
@ -153,7 +153,12 @@ A State Module must return a dict containing the following keys/values:
|
|||
However, if a state is going to fail and this can be determined
|
||||
in test mode without applying the change, ``False`` can be returned.
|
||||
|
||||
- **comment:** A string containing a summary of the result.
|
||||
- **comment:** A list of strings or a single string summarizing the result.
|
||||
Note that support for lists of strings is available as of Salt Oxygen.
|
||||
Lists of strings will be joined with newlines to form the final comment;
|
||||
this is useful to allow multiple comments from subparts of a state.
|
||||
Prefer to keep line lengths short (use multiple lines as needed),
|
||||
and end with punctuation (e.g. a period) to delimit multiple comments.
|
||||
|
||||
The return data can also, include the **pchanges** key, this stands for
|
||||
`predictive changes`. The **pchanges** key informs the State system what
|
||||
|
|
|
@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``:
|
|||
|
||||
.. note::
|
||||
You can have only one top level ``reactor`` section, so if one already
|
||||
exists, add this code to the existing section. See :ref:`Understanding the
|
||||
Structure of Reactor Formulas <reactor-structure>` to learn more about
|
||||
reactor SLS syntax.
|
||||
exists, add this code to the existing section. See :ref:`here
|
||||
<reactor-sls>` to learn more about reactor SLS syntax.
|
||||
|
||||
|
||||
Start the Salt Master in Debug Mode
|
||||
|
|
|
@ -183,7 +183,7 @@ imports should be absent from the Salt Cloud module.
|
|||
|
||||
A good example of a non-libcloud driver is the DigitalOcean driver:
|
||||
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digital_ocean.py
|
||||
https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digitalocean.py
|
||||
|
||||
The ``create()`` Function
|
||||
-------------------------
|
||||
|
|
|
@ -444,7 +444,7 @@ under the API Access tab.
|
|||
.. code-block:: yaml
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
location: New York 1
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ under the "SSH Keys" section.
|
|||
# /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
|
@ -63,7 +63,7 @@ command:
|
|||
# salt-cloud --list-locations my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
Amsterdam 1:
|
||||
----------
|
||||
|
@ -87,7 +87,7 @@ command:
|
|||
# salt-cloud --list-sizes my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
512MB:
|
||||
----------
|
||||
|
@ -117,7 +117,7 @@ command:
|
|||
# salt-cloud --list-images my-digitalocean-config
|
||||
my-digitalocean-config:
|
||||
----------
|
||||
digital_ocean:
|
||||
digitalocean:
|
||||
----------
|
||||
10.1:
|
||||
----------
|
||||
|
@ -142,7 +142,7 @@ Profile Specifics:
|
|||
ssh_username
|
||||
------------
|
||||
|
||||
If using a FreeBSD image from Digital Ocean, you'll need to set the ``ssh_username``
|
||||
If using a FreeBSD image from DigitalOcean, you'll need to set the ``ssh_username``
|
||||
setting to ``freebsd`` in your profile configuration.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
|
|
@ -263,9 +263,17 @@ against that branch.
|
|||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases.
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
|
||||
Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well.
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
|
|
@ -219,7 +219,7 @@ the default cloud provider configuration file for DigitalOcean looks like this:
|
|||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
|
@ -230,7 +230,7 @@ must be provided:
|
|||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: wFGEwgregeqw3435gDger
|
||||
api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg
|
||||
location: New York 1
|
||||
|
|
|
@ -541,7 +541,7 @@ provider configuration file in the integration test file directory located at
|
|||
``tests/integration/files/conf/cloud.*.d/``.
|
||||
|
||||
The following is an example of the default profile configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -557,12 +557,12 @@ be provided by the user by editing the provider configuration file before runnin
|
|||
tests.
|
||||
|
||||
The following is an example of the default provider configuration file for Digital
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digital_ocean.conf``:
|
||||
Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digitalocean.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
digitalocean-config:
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
client_key: ''
|
||||
api_key: ''
|
||||
location: New York 1
|
||||
|
|
|
@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
|
|||
port: 5959
|
||||
proto: tcp
|
||||
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines_dirs:
|
||||
- /home/bob/engines
|
||||
|
||||
Writing an Engine
|
||||
=================
|
||||
|
|
|
@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described
|
|||
Installation from the Official SaltStack Repository
|
||||
===================================================
|
||||
|
||||
Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the
|
||||
Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the
|
||||
Official SaltStack repository.
|
||||
|
||||
Instructions are at https://repo.saltstack.com/#debian.
|
||||
|
|
|
@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and
|
|||
other systems about operations.
|
||||
|
||||
The event system fires events with a very specific criteria. Every event has a
|
||||
:strong:`tag`. Event tags allow for fast top level filtering of events. In
|
||||
addition to the tag, each event has a data structure. This data structure is a
|
||||
dict, which contains information about the event.
|
||||
**tag**. Event tags allow for fast top-level filtering of events. In addition
|
||||
to the tag, each event has a data structure. This data structure is a
|
||||
dictionary, which contains information about the event.
|
||||
|
||||
.. _reactor-mapping-events:
|
||||
|
||||
|
@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run.
|
|||
the :ref:`querystring syntax <querystring-syntax>` (e.g.
|
||||
``salt://reactor/mycustom.sls?saltenv=reactor``).
|
||||
|
||||
Reactor sls files are similar to state and pillar sls files. They are
|
||||
by default yaml + Jinja templates and are passed familiar context variables.
|
||||
Reactor SLS files are similar to State and Pillar SLS files. They are by
|
||||
default YAML + Jinja templates and are passed familiar context variables.
|
||||
Click :ref:`here <reactor-jinja-context>` for more detailed information on the
|
||||
variables availble in Jinja templating.
|
||||
|
||||
They differ because of the addition of the ``tag`` and ``data`` variables.
|
||||
|
||||
- The ``tag`` variable is just the tag in the fired event.
|
||||
- The ``data`` variable is the event's data dict.
|
||||
|
||||
Here is a simple reactor sls:
|
||||
Here is the SLS for a simple reaction:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
|
@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor
|
|||
system. The only difference is that the data is matched up to the salt command
|
||||
API and the runner system. In this example, a command is published to the
|
||||
``mysql1`` minion with a function of :py:func:`state.apply
|
||||
<salt.modules.state.apply_>`. Similarly, a runner can be called:
|
||||
<salt.modules.state.apply_>`, which performs a :ref:`highstate
|
||||
<running-highstate>`. Similarly, a runner can be called:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% if data['data']['custom_var'] == 'runit' %}
|
||||
call_runit_orch:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.runit
|
||||
- args:
|
||||
- mods: orchestrate.runit
|
||||
{% endif %}
|
||||
|
||||
This example will execute the state.orchestrate runner and intiate an execution
|
||||
of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using
|
||||
``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate"
|
||||
as this is most likely to cause confusion.
|
||||
of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``.
|
||||
|
||||
Writing SLS Files
|
||||
-----------------
|
||||
Types of Reactions
|
||||
==================
|
||||
|
||||
Reactor SLS files are stored in the same location as State SLS files. This means
|
||||
that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are
|
||||
available to the reactor and orchestrator.
|
||||
============================== ==================================================================================
|
||||
Name Description
|
||||
============================== ==================================================================================
|
||||
:ref:`local <reactor-local>` Runs a :ref:`remote-execution function <all-salt.modules>` on targeted minions
|
||||
:ref:`runner <reactor-runner>` Executes a :ref:`runner function <all-salt.runners>`
|
||||
:ref:`wheel <reactor-wheel>` Executes a :ref:`wheel function <all-salt.wheel>` on the master
|
||||
:ref:`caller <reactor-caller>` Runs a :ref:`remote-execution function <all-salt.modules>` on a masterless minion
|
||||
============================== ==================================================================================
|
||||
|
||||
It is recommended to keep reactor and orchestrator SLS files in their own uniquely
|
||||
named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``,
|
||||
``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to
|
||||
read through this a few years down the road.
|
||||
.. note::
|
||||
The ``local`` and ``caller`` reaction types will be renamed for the Oxygen
|
||||
release. These reaction types were named after Salt's internal client
|
||||
interfaces, and are not intuitively named. Both ``local`` and ``caller``
|
||||
will continue to work in Reactor SLS files, but for the Oxygen release the
|
||||
documentation will be updated to reflect the new preferred naming.
|
||||
|
||||
The Goal of Writing Reactor SLS Files
|
||||
=====================================
|
||||
Where to Put Reactor SLS Files
|
||||
==============================
|
||||
|
||||
Reactor SLS files share the familiar syntax from Salt States but there are
|
||||
important differences. The goal of a Reactor file is to process a Salt event as
|
||||
quickly as possible and then to optionally start a **new** process in response.
|
||||
Reactor SLS files can come both from files local to the master, and from any of
|
||||
backends enabled via the :conf_master:`fileserver_backend` config option. Files
|
||||
placed in the Salt fileserver can be referenced using a ``salt://`` URL, just
|
||||
like they can in State SLS files.
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. The event tag is matched against the list of event tags under the
|
||||
``reactor`` section in the Salt Master config.
|
||||
3. The SLS files for any matches are Rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
It is recommended to place reactor and orchestrator SLS files in their own
|
||||
uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``,
|
||||
``reactor/``, etc., to keep them organized.
|
||||
|
||||
.. _reactor-sls:
|
||||
|
||||
Writing Reactor SLS
|
||||
===================
|
||||
|
||||
The different reaction types were developed separately and have historically
|
||||
had different methods for passing arguments. For the 2017.7.2 release a new,
|
||||
unified configuration schema has been introduced, which applies to all reaction
|
||||
types.
|
||||
|
||||
The old config schema will continue to be supported, and there is no plan to
|
||||
deprecate it at this time.
|
||||
|
||||
.. _reactor-local:
|
||||
|
||||
Local Reactions
|
||||
---------------
|
||||
|
||||
A ``local`` reaction runs a :ref:`remote-execution function <all-salt.modules>`
|
||||
on the targeted minions.
|
||||
|
||||
The old config schema required the positional and keyword arguments to be
|
||||
manually separated by the user under ``arg`` and ``kwarg`` parameters. However,
|
||||
this is not very user-friendly, as it forces the user to distinguish which type
|
||||
of argument is which, and make sure that positional arguments are ordered
|
||||
properly. Therefore, the new config schema is recommended if the master is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+-----------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+=============================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| install_zsh: | install_zsh: |
|
||||
| local.state.single: | local.state.single: |
|
||||
| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' |
|
||||
| - tgt_type: grain | - tgt_type: grain |
|
||||
| - args: | - arg: |
|
||||
| - fun: pkg.installed | - pkg.installed |
|
||||
| - name: zsh | - zsh |
|
||||
| - fromrepo: updates | - kwarg: |
|
||||
| | fromrepo: updates |
|
||||
+---------------------------------+-----------------------------+
|
||||
|
||||
This reaction would be equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates
|
||||
|
||||
.. note::
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd_async()
|
||||
<salt.client.LocalClient.cmd_async>` method can be passed at the same
|
||||
indentation level as ``tgt``.
|
||||
|
||||
.. note::
|
||||
``tgt_type`` is only required when the target expression defined in ``tgt``
|
||||
uses a :ref:`target type <targeting>` other than a minion ID glob.
|
||||
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0.
|
||||
|
||||
.. _reactor-runner:
|
||||
|
||||
Runner Reactions
|
||||
----------------
|
||||
|
||||
Runner reactions execute :ref:`runner functions <all-salt.runners>` locally on
|
||||
the master.
|
||||
|
||||
The old config schema called for passing arguments to the reaction directly
|
||||
under the name of the runner function. However, this can cause unpredictable
|
||||
interactions with the Reactor system's internal arguments. It is also possible
|
||||
to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above
|
||||
in :ref:`local reactions <reactor-local>`, but as noted above this is not very
|
||||
user-friendly. Therefore, the new config schema is recommended if the master
|
||||
is running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================================+=================================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| deploy_app: | deploy_app: |
|
||||
| runner.state.orchestrate: | runner.state.orchestrate: |
|
||||
| - args: | - mods: orchestrate.deploy_app |
|
||||
| - mods: orchestrate.deploy_app | - kwarg: |
|
||||
| - pillar: | pillar: |
|
||||
| event_tag: {{ tag }} | event_tag: {{ tag }} |
|
||||
| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} |
|
||||
+-------------------------------------------------+-------------------------------------------------+
|
||||
|
||||
Assuming that the event tag is ``foo``, and the data passed to the event is
|
||||
``{'bar': 'baz'}``, then this reaction is equvalent to running the following
|
||||
Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}'
|
||||
|
||||
.. _reactor-wheel:
|
||||
|
||||
Wheel Reactions
|
||||
---------------
|
||||
|
||||
Wheel reactions run :ref:`wheel functions <all-salt.wheel>` locally on the
|
||||
master.
|
||||
|
||||
Like :ref:`runner reactions <reactor-runner>`, the old config schema called for
|
||||
wheel reactions to have arguments passed directly under the name of the
|
||||
:ref:`wheel function <all-salt.wheel>` (or in ``arg`` or ``kwarg`` parameters).
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+-----------------------------------+---------------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+===================================+=================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| remove_key: | remove_key: |
|
||||
| wheel.key.delete: | wheel.key.delete: |
|
||||
| - args: | - match: {{ data['id'] }} |
|
||||
| - match: {{ data['id'] }} | |
|
||||
+-----------------------------------+---------------------------------+
|
||||
|
||||
.. _reactor-caller:
|
||||
|
||||
Caller Reactions
|
||||
----------------
|
||||
|
||||
Caller reactions run :ref:`remote-execution functions <all-salt.modules>` on a
|
||||
minion daemon's Reactor system. To run a Reactor on the minion, it is necessary
|
||||
to configure the :mod:`Reactor Engine <salt.engines.reactor>` in the minion
|
||||
config file, and then setup your watched events in a ``reactor`` section in the
|
||||
minion config file as well.
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Both the old and new config schemas involve passing arguments under an ``args``
|
||||
parameter. However, the old config schema only supports positional arguments.
|
||||
Therefore, the new config schema is recommended if the masterless minion is
|
||||
running a supported release.
|
||||
|
||||
The below two examples are equivalent:
|
||||
|
||||
+---------------------------------+---------------------------+
|
||||
| Supported in 2017.7.2 and later | Supported in all releases |
|
||||
+=================================+===========================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| touch_file: | touch_file: |
|
||||
| caller.file.touch: | caller.file.touch: |
|
||||
| - args: | - args: |
|
||||
| - name: /tmp/foo | - /tmp/foo |
|
||||
+---------------------------------+---------------------------+
|
||||
|
||||
This reaction is equvalent to running the following Salt command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call file.touch name=/tmp/foo
|
||||
|
||||
Best Practices for Writing Reactor SLS Files
|
||||
============================================
|
||||
|
||||
The Reactor works as follows:
|
||||
|
||||
1. The Salt Reactor watches Salt's event bus for new events.
|
||||
2. Each event's tag is matched against the list of event tags configured under
|
||||
the :conf_master:`reactor` section in the Salt Master config.
|
||||
3. The SLS files for any matches are rendered into a data structure that
|
||||
represents one or more function calls.
|
||||
4. That data structure is given to a pool of worker threads for execution.
|
||||
|
||||
Matching and rendering Reactor SLS files is done sequentially in a single
|
||||
process. Complex Jinja that calls out to slow Execution or Runner modules slows
|
||||
down the rendering and causes other reactions to pile up behind the current
|
||||
one. The worker pool is designed to handle complex and long-running processes
|
||||
such as Salt Orchestrate.
|
||||
process. For that reason, reactor SLS files should contain few individual
|
||||
reactions (one, if at all possible). Also, keep in mind that reactions are
|
||||
fired asynchronously (with the exception of :ref:`caller <reactor-caller>`) and
|
||||
do *not* support :ref:`requisites <requisites>`.
|
||||
|
||||
tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process
|
||||
started by the worker threads can be long-running. Using the reactor to fire
|
||||
an orchestrate runner would be ideal.
|
||||
Complex Jinja templating that calls out to slow :ref:`remote-execution
|
||||
<all-salt.modules>` or :ref:`runner <all-salt.runners>` functions slows down
|
||||
the rendering and causes other reactions to pile up behind the current one. The
|
||||
worker pool is designed to handle complex and long-running processes like
|
||||
:ref:`orchestration <orchestrate-runner>` jobs.
|
||||
|
||||
Therefore, when complex tasks are in order, :ref:`orchestration
|
||||
<orchestrate-runner>` is a natural fit. Orchestration SLS files can be more
|
||||
complex, and use requisites. Performing a complex task using orchestration lets
|
||||
the Reactor system fire off the orchestration job and proceed with processing
|
||||
other reactions.
|
||||
|
||||
.. _reactor-jinja-context:
|
||||
|
||||
Jinja Context
|
||||
-------------
|
||||
=============
|
||||
|
||||
Reactor files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are not available. The ``salt`` object is available for calling
|
||||
Runner and Execution modules but it should be used sparingly and only for quick
|
||||
tasks for the reasons mentioned above.
|
||||
Reactor SLS files only have access to a minimal Jinja context. ``grains`` and
|
||||
``pillar`` are *not* available. The ``salt`` object is available for calling
|
||||
:ref:`remote-execution <all-salt.modules>` or :ref:`runner <all-salt.runners>`
|
||||
functions, but it should be used sparingly and only for quick tasks for the
|
||||
reasons mentioned above.
|
||||
|
||||
In addition to the ``salt`` object, the following variables are available in
|
||||
the Jinja context:
|
||||
|
||||
- ``tag`` - the tag from the event that triggered execution of the Reactor SLS
|
||||
file
|
||||
- ``data`` - the event's data dictionary
|
||||
|
||||
The ``data`` dict will contain an ``id`` key containing the minion ID, if the
|
||||
event was fired from a minion, and a ``data`` key containing the data passed to
|
||||
the event.
|
||||
|
||||
Advanced State System Capabilities
|
||||
----------------------------------
|
||||
==================================
|
||||
|
||||
Reactor SLS files, by design, do not support Requisites, ordering,
|
||||
``onlyif``/``unless`` conditionals and most other powerful constructs from
|
||||
Salt's State system.
|
||||
Reactor SLS files, by design, do not support :ref:`requisites <requisites>`,
|
||||
ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs
|
||||
from Salt's State system.
|
||||
|
||||
Complex Master-side operations are best performed by Salt's Orchestrate system
|
||||
so using the Reactor to kick off an Orchestrate run is a very common pairing.
|
||||
|
@ -166,7 +370,7 @@ For example:
|
|||
# /etc/salt/master.d/reactor.conf
|
||||
# A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"}
|
||||
reactor:
|
||||
- myco/custom/event:
|
||||
- my/custom/event:
|
||||
- /srv/reactor/some_event.sls
|
||||
|
||||
.. code-block:: jinja
|
||||
|
@ -174,15 +378,15 @@ For example:
|
|||
# /srv/reactor/some_event.sls
|
||||
invoke_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls
|
||||
- kwarg:
|
||||
pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json() }}
|
||||
- args:
|
||||
- mods: orchestrate.do_complex_thing
|
||||
- pillar:
|
||||
event_tag: {{ tag }}
|
||||
event_data: {{ data|json }}
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
# /srv/salt/_orch/do_complex_thing.sls
|
||||
# /srv/salt/orchestrate/do_complex_thing.sls
|
||||
{% set tag = salt.pillar.get('event_tag') %}
|
||||
{% set data = salt.pillar.get('event_data') %}
|
||||
|
||||
|
@ -209,7 +413,7 @@ For example:
|
|||
.. _beacons-and-reactors:
|
||||
|
||||
Beacons and Reactors
|
||||
--------------------
|
||||
====================
|
||||
|
||||
An event initiated by a beacon, when it arrives at the master will be wrapped
|
||||
inside a second event, such that the data object containing the beacon
|
||||
|
@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file,
|
|||
you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{
|
||||
data['id'] }}`` as for events initiated directly on the event bus.
|
||||
|
||||
Similarly, the data dictionary attached to the event would be located in
|
||||
``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``.
|
||||
|
||||
See the :ref:`beacon documentation <beacon-example>` for examples.
|
||||
|
||||
Fire an event
|
||||
=============
|
||||
Manually Firing an Event
|
||||
========================
|
||||
|
||||
To fire an event from a minion call ``event.send``
|
||||
From the Master
|
||||
---------------
|
||||
|
||||
Use the :py:func:`event.send <salt.runners.event.send>` runner:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.send 'foo' '{orchestrate: refresh}'
|
||||
salt-run event.send foo '{orchestrate: refresh}'
|
||||
|
||||
After this is called, any reactor sls files matching event tag ``foo`` will
|
||||
execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``.
|
||||
From the Minion
|
||||
---------------
|
||||
|
||||
See :py:mod:`salt.modules.event` for more information.
|
||||
To fire an event to the master from a minion, call :py:func:`event.send
|
||||
<salt.modules.event.send>`:
|
||||
|
||||
Knowing what event is being fired
|
||||
=================================
|
||||
.. code-block:: bash
|
||||
|
||||
The best way to see exactly what events are fired and what data is available in
|
||||
each event is to use the :py:func:`state.event runner
|
||||
salt-call event.send foo '{orchestrate: refresh}'
|
||||
|
||||
To fire an event to the minion's local event bus, call :py:func:`event.fire
|
||||
<salt.modules.event.fire>`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.fire '{orchestrate: refresh}' foo
|
||||
|
||||
Referencing Data Passed in Events
|
||||
---------------------------------
|
||||
|
||||
Assuming any of the above examples, any reactor SLS files triggered by watching
|
||||
the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}``
|
||||
equal to ``'refresh'``.
|
||||
|
||||
Getting Information About Events
|
||||
================================
|
||||
|
||||
The best way to see exactly what events have been fired and what data is
|
||||
available in each event is to use the :py:func:`state.event runner
|
||||
<salt.runners.state.event>`.
|
||||
|
||||
.. seealso:: :ref:`Common Salt Events <event-master_events>`
|
||||
|
@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file).
|
|||
view the result of referencing Jinja variables. If the result is empty then
|
||||
Jinja produced an empty result and the Reactor will ignore it.
|
||||
|
||||
.. _reactor-structure:
|
||||
Passing Event Data to Minions or Orchestration as Pillar
|
||||
--------------------------------------------------------
|
||||
|
||||
Understanding the Structure of Reactor Formulas
|
||||
===============================================
|
||||
|
||||
**I.e., when to use `arg` and `kwarg` and when to specify the function
|
||||
arguments directly.**
|
||||
|
||||
While the reactor system uses the same basic data structure as the state
|
||||
system, the functions that will be called using that data structure are
|
||||
different functions than are called via Salt's state system. The Reactor can
|
||||
call Runner modules using the `runner` prefix, Wheel modules using the `wheel`
|
||||
prefix, and can also cause minions to run Execution modules using the `local`
|
||||
prefix.
|
||||
|
||||
.. versionchanged:: 2014.7.0
|
||||
The ``cmd`` prefix was renamed to ``local`` for consistency with other
|
||||
parts of Salt. A backward-compatible alias was added for ``cmd``.
|
||||
|
||||
The Reactor runs on the master and calls functions that exist on the master. In
|
||||
the case of Runner and Wheel functions the Reactor can just call those
|
||||
functions directly since they exist on the master and are run on the master.
|
||||
|
||||
In the case of functions that exist on minions and are run on minions, the
|
||||
Reactor still needs to call a function on the master in order to send the
|
||||
necessary data to the minion so the minion can execute that function.
|
||||
|
||||
The Reactor calls functions exposed in :ref:`Salt's Python API documentation
|
||||
<client-apis>`. and thus the structure of Reactor files very transparently
|
||||
reflects the function signatures of those functions.
|
||||
|
||||
Calling Execution modules on Minions
|
||||
------------------------------------
|
||||
|
||||
The Reactor sends commands down to minions in the exact same way Salt's CLI
|
||||
interface does. It calls a function locally on the master that sends the name
|
||||
of the function as well as a list of any arguments and a dictionary of any
|
||||
keyword arguments that the minion should use to execute that function.
|
||||
|
||||
Specifically, the Reactor calls the async version of :py:meth:`this function
|
||||
<salt.client.LocalClient.cmd>`. You can see that function has 'arg' and 'kwarg'
|
||||
parameters which are both values that are sent down to the minion.
|
||||
|
||||
Executing remote commands maps to the :strong:`LocalClient` interface which is
|
||||
used by the :strong:`salt` command. This interface more specifically maps to
|
||||
the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This
|
||||
means that the arguments passed are being passed to the :strong:`cmd_async`
|
||||
method, not the remote method. A field starts with :strong:`local` to use the
|
||||
:strong:`LocalClient` subsystem. The result is, to execute a remote command,
|
||||
a reactor formula would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: '*'
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
The ``arg`` option takes a list of arguments as they would be presented on the
|
||||
command line, so the above declaration is the same as running this salt
|
||||
command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cmd.run 'rm -rf /tmp/*'
|
||||
|
||||
Use the ``tgt_type`` argument to specify a matcher:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'os:Ubuntu'
|
||||
- tgt_type: grain
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
|
||||
clean_tmp:
|
||||
local.cmd.run:
|
||||
- tgt: 'G@roles:hbase_master'
|
||||
- tgt_type: compound
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note::
|
||||
The ``tgt_type`` argument was named ``expr_form`` in releases prior to
|
||||
2017.7.0 (2016.11.x and earlier).
|
||||
|
||||
Any other parameters in the :py:meth:`LocalClient().cmd()
|
||||
<salt.client.LocalClient.cmd>` method can be specified as well.
|
||||
|
||||
Executing Reactors from the Minion
|
||||
----------------------------------
|
||||
|
||||
The minion can be setup to use the Reactor via a reactor engine. This just
|
||||
sets up and listens to the minions event bus, instead of to the masters.
|
||||
|
||||
The biggest difference is that you have to use the caller method on the
|
||||
Reactor, which is the equivalent of salt-call, to run your commands.
|
||||
|
||||
:mod:`Reactor Engine setup <salt.engines.reactor>`
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clean_tmp:
|
||||
caller.cmd.run:
|
||||
- arg:
|
||||
- rm -rf /tmp/*
|
||||
|
||||
.. note:: Masterless Minions use this Reactor
|
||||
|
||||
This is the only way to run the Reactor if you use masterless minions.
|
||||
|
||||
Calling Runner modules and Wheel modules
|
||||
----------------------------------------
|
||||
|
||||
Calling Runner modules and Wheel modules from the Reactor uses a more direct
|
||||
syntax since the function is being executed locally instead of sending a
|
||||
command to a remote system to be executed there. There are no 'arg' or 'kwarg'
|
||||
parameters (unless the Runner function or Wheel function accepts a parameter
|
||||
with either of those names.)
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
clear_the_grains_cache_for_all_minions:
|
||||
runner.cache.clear_grains
|
||||
|
||||
If the :py:func:`the runner takes arguments <salt.runners.cloud.profile>` then
|
||||
they must be specified as keyword arguments.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
spin_up_more_web_machines:
|
||||
runner.cloud.profile:
|
||||
- prof: centos_6
|
||||
- instances:
|
||||
- web11 # These VM names would be generated via Jinja in a
|
||||
- web12 # real-world example.
|
||||
|
||||
To determine the proper names for the arguments, check the documentation
|
||||
or source code for the runner function you wish to call.
|
||||
|
||||
Passing event data to Minions or Orchestrate as Pillar
|
||||
------------------------------------------------------
|
||||
|
||||
An interesting trick to pass data from the Reactor script to
|
||||
An interesting trick to pass data from the Reactor SLS file to
|
||||
:py:func:`state.apply <salt.modules.state.apply_>` is to pass it as inline
|
||||
Pillar data since both functions take a keyword argument named ``pillar``.
|
||||
|
||||
|
@ -484,10 +567,9 @@ from the event to the state file via inline Pillar.
|
|||
add_new_minion_to_pool:
|
||||
local.state.apply:
|
||||
- tgt: 'haproxy*'
|
||||
- arg:
|
||||
- haproxy.refresh_pool
|
||||
- kwarg:
|
||||
pillar:
|
||||
- args:
|
||||
- mods: haproxy.refresh_pool
|
||||
- pillar:
|
||||
new_minion: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
|
@ -503,17 +585,16 @@ This works with Orchestrate files as well:
|
|||
|
||||
call_some_orchestrate_file:
|
||||
runner.state.orchestrate:
|
||||
- mods: _orch.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
- args:
|
||||
- mods: orchestrate.some_orchestrate_file
|
||||
- pillar:
|
||||
stuff: things
|
||||
|
||||
Which is equivalent to the following command at the CLI:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls.
|
||||
salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}'
|
||||
|
||||
Finally, that data is available in the state file using the normal Pillar
|
||||
lookup syntax. The following example is grabbing web server names and IP
|
||||
|
@ -564,7 +645,7 @@ includes the minion id, which we can use for matching.
|
|||
- 'salt/minion/ink*/start':
|
||||
- /srv/reactor/auth-complete.sls
|
||||
|
||||
In this sls file, we say that if the key was rejected we will delete the key on
|
||||
In this SLS file, we say that if the key was rejected we will delete the key on
|
||||
the master and then also tell the master to ssh in to the minion and tell it to
|
||||
restart the minion, since a minion process will die if the key is rejected.
|
||||
|
||||
|
@ -580,19 +661,21 @@ authentication every ten seconds by default.
|
|||
{% if not data['result'] and data['id'].startswith('ink') %}
|
||||
minion_remove:
|
||||
wheel.key.delete:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
minion_rejoin:
|
||||
local.cmd.run:
|
||||
- tgt: salt-master.domain.tld
|
||||
- arg:
|
||||
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
- args:
|
||||
- cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart'
|
||||
{% endif %}
|
||||
|
||||
{# Ink server is sending new key -- accept this key #}
|
||||
{% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %}
|
||||
minion_add:
|
||||
wheel.key.accept:
|
||||
- match: {{ data['id'] }}
|
||||
- args:
|
||||
- match: {{ data['id'] }}
|
||||
{% endif %}
|
||||
|
||||
No if statements are needed here because we already limited this action to just
|
||||
|
|
|
@ -51,6 +51,19 @@ New NaCl Renderer
|
|||
|
||||
A new renderer has been added for encrypted data.
|
||||
|
||||
New support for Cisco UCS Chassis
|
||||
---------------------------------
|
||||
|
||||
The salt proxy minion now allows for control of Cisco USC chassis. See
|
||||
the `cimc` modules for details.
|
||||
|
||||
New salt-ssh roster
|
||||
-------------------
|
||||
|
||||
A new roster has been added that allows users to pull in a list of hosts
|
||||
for salt-ssh targeting from a ~/.ssh configuration. For full details,
|
||||
please see the `sshconfig` roster.
|
||||
|
||||
New GitFS Features
|
||||
------------------
|
||||
|
||||
|
@ -154,6 +167,14 @@ Newer PyWinRM Versions
|
|||
Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self
|
||||
signed certificates. :ref:`Here<new-pywinrm>` for more information.
|
||||
|
||||
DigitalOcean
|
||||
------------
|
||||
|
||||
The DigitalOcean driver has been renamed to conform to the companies name. The
|
||||
new driver name is ``digitalocean``. The old name ``digital_ocean`` and a
|
||||
short one ``do`` will still be supported through virtual aliases, this is mostly
|
||||
cosmetic.
|
||||
|
||||
Solaris Logical Domains In Virtual Grain
|
||||
----------------------------------------
|
||||
|
||||
|
@ -161,9 +182,15 @@ Support has been added to the ``virtual`` grain for detecting Solaris LDOMs
|
|||
running on T-Series SPARC hardware. The ``virtual_subtype`` grain is
|
||||
populated as a list of domain roles.
|
||||
|
||||
Lists of comments in state returns
|
||||
----------------------------------
|
||||
|
||||
State functions can now return a list of strings for the ``comment`` field,
|
||||
as opposed to only a single string.
|
||||
This is meant to ease writing states with multiple or multi-part comments.
|
||||
|
||||
Beacon configuration changes
|
||||
----------------------------------------
|
||||
----------------------------
|
||||
|
||||
In order to remain consistent and to align with other Salt components such as states,
|
||||
support for configuring beacons using dictionary based configuration has been deprecated
|
||||
|
@ -780,3 +807,7 @@ Other Miscellaneous Deprecations
|
|||
The ``version.py`` file had the following changes:
|
||||
|
||||
- The ``rc_info`` function was removed. Please use ``pre_info`` instead.
|
||||
|
||||
Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be
|
||||
used instead. The removal of these warnings does not have a behavior change. Only
|
||||
the warning text was removed.
|
||||
|
|
|
@ -13,7 +13,7 @@ Using Apache Libcloud for declarative and procedural multi-cloud orchestration
|
|||
|
||||
Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows
|
||||
you to manage different cloud resources through a unified and easy to use API. Apache Libcloud supports over
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, Digital Ocean, Google Cloud Platform and OpenStack.
|
||||
60 cloud platforms, including Amazon, Microsoft Azure, DigitalOcean, Google Cloud Platform and OpenStack.
|
||||
|
||||
Execution and state modules are available for Compute, DNS, Storage and Load Balancer drivers from Apache Libcloud in
|
||||
SaltStack.
|
||||
|
|
|
@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
|
|||
:param bool allusers: This parameter is specific to `.msi` installations. It
|
||||
tells `msiexec` to install the software for all users. The default is True.
|
||||
|
||||
:param bool cache_dir: If true, the entire directory where the installer resides
|
||||
will be recursively cached. This is useful for installers that depend on
|
||||
other files in the same directory for installation.
|
||||
:param bool cache_dir: If true when installer URL begins with salt://, the
|
||||
entire directory where the installer resides will be recursively cached.
|
||||
This is useful for installers that depend on other files in the same
|
||||
directory for installation.
|
||||
|
||||
.. note:: Only applies to salt: installer URLs.
|
||||
:param str cache_file:
|
||||
When installer URL begins with salt://, this indicates single file to copy
|
||||
down for use with the installer. Copied to the same location as the
|
||||
installer. Use this over ``cache_dir`` if there are many files in the
|
||||
directory and you only need a specific file and don't want to cache
|
||||
additional files that may reside in the installer directory.
|
||||
|
||||
Here's an example for a software package that has dependent files:
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ fi
|
|||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
if [ -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
|
|
@ -35,8 +35,9 @@ _salt_get_keys(){
|
|||
}
|
||||
|
||||
_salt(){
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:-"$HOME/.cache/salt-comp-cache_functions"}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:-"last hour"}
|
||||
CACHE_DIR="$HOME/.cache/salt-comp-cache_functions"
|
||||
local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR}
|
||||
local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'}
|
||||
|
||||
if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then
|
||||
mkdir -p "$(dirname ${_salt_cache_functions})"
|
||||
|
|
|
@ -89,7 +89,7 @@ if Defined x (
|
|||
if %Python%==2 (
|
||||
Set "PyDir=C:\Python27"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
)
|
||||
Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts"
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
|
|||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower())))
|
|||
|
||||
#==============================================================================
|
||||
# Update PIP and SetupTools
|
||||
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
|
||||
|
@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
|
|||
|
||||
#==============================================================================
|
||||
# Install pypi resources using pip
|
||||
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
|
||||
|
@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
|
|||
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
# Move PyWin32 DLL's to site-packages\win32
|
||||
#==============================================================================
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
|
||||
|
||||
# Remove pythonwin directory
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Install PyYAML with CLoader
|
||||
# This has to be a compiled binary to get the CLoader
|
||||
|
|
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
|
|||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
|
||||
$p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i
|
|||
|
||||
# Move DLL's to Python Root
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
|
@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"
|
|||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Fix PyCrypto
|
||||
#==============================================================================
|
||||
|
|
|
@ -56,7 +56,7 @@ if %Python%==2 (
|
|||
Set "PyVerMajor=2"
|
||||
Set "PyVerMinor=7"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
Set "PyVerMajor=3"
|
||||
Set "PyVerMinor=5"
|
||||
)
|
||||
|
|
|
@ -16,9 +16,10 @@ if %errorLevel%==0 (
|
|||
)
|
||||
echo.
|
||||
|
||||
:CheckPython2
|
||||
if exist "\Python27" goto RemovePython2
|
||||
if exist "\Program Files\Python35" goto RemovePython3
|
||||
goto eof
|
||||
|
||||
goto CheckPython3
|
||||
|
||||
:RemovePython2
|
||||
rem Uninstall Python 2.7
|
||||
|
@ -47,25 +48,30 @@ goto eof
|
|||
|
||||
goto eof
|
||||
|
||||
:CheckPython3
|
||||
if exist "\Python35" goto RemovePython3
|
||||
|
||||
goto eof
|
||||
|
||||
:RemovePython3
|
||||
echo %0 :: Uninstalling Python 3 ...
|
||||
echo ---------------------------------------------------------------------
|
||||
:: 64 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
|
||||
echo %0 :: - 3.5.3 64bit
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
|
||||
)
|
||||
|
||||
:: 32 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
|
||||
echo %0 :: - 3.5.3 32bit
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
|
||||
)
|
||||
|
||||
rem wipe the Python directory
|
||||
echo %0 :: Removing the C:\Program Files\Python35 Directory ...
|
||||
echo %0 :: Removing the C:\Python35 Directory ...
|
||||
echo ---------------------------------------------------------------------
|
||||
rd /s /q "C:\Program Files\Python35"
|
||||
rd /s /q "C:\Python35"
|
||||
if %errorLevel%==0 (
|
||||
echo Successful
|
||||
) else (
|
||||
|
|
|
@ -44,7 +44,7 @@ ${StrStrAdv}
|
|||
!define CPUARCH "x86"
|
||||
!endif
|
||||
|
||||
; Part of the Trim function for Strings
|
||||
# Part of the Trim function for Strings
|
||||
!define Trim "!insertmacro Trim"
|
||||
!macro Trim ResultVar String
|
||||
Push "${String}"
|
||||
|
@ -61,27 +61,27 @@ ${StrStrAdv}
|
|||
!define MUI_UNICON "salt.ico"
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp"
|
||||
|
||||
; Welcome page
|
||||
# Welcome page
|
||||
!insertmacro MUI_PAGE_WELCOME
|
||||
|
||||
; License page
|
||||
# License page
|
||||
!insertmacro MUI_PAGE_LICENSE "LICENSE.txt"
|
||||
|
||||
; Configure Minion page
|
||||
# Configure Minion page
|
||||
Page custom pageMinionConfig pageMinionConfig_Leave
|
||||
|
||||
; Instfiles page
|
||||
# Instfiles page
|
||||
!insertmacro MUI_PAGE_INSTFILES
|
||||
|
||||
; Finish page (Customized)
|
||||
# Finish page (Customized)
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave
|
||||
!insertmacro MUI_PAGE_FINISH
|
||||
|
||||
; Uninstaller pages
|
||||
# Uninstaller pages
|
||||
!insertmacro MUI_UNPAGE_INSTFILES
|
||||
|
||||
; Language files
|
||||
# Language files
|
||||
!insertmacro MUI_LANGUAGE "English"
|
||||
|
||||
|
||||
|
@ -201,8 +201,8 @@ ShowInstDetails show
|
|||
ShowUnInstDetails show
|
||||
|
||||
|
||||
; Check and install Visual C++ redist packages
|
||||
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
# Check and install Visual C++ redist packages
|
||||
# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
Section -Prerequisites
|
||||
|
||||
Var /GLOBAL VcRedistName
|
||||
|
@ -211,12 +211,12 @@ Section -Prerequisites
|
|||
Var /Global CheckVcRedist
|
||||
StrCpy $CheckVcRedist "False"
|
||||
|
||||
; Visual C++ 2015 redist packages
|
||||
# Visual C++ 2015 redist packages
|
||||
!define PY3_VC_REDIST_NAME "VC_Redist_2015"
|
||||
!define PY3_VC_REDIST_X64_GUID "{50A2BC33-C9CD-3BF1-A8FF-53C10A0B183C}"
|
||||
!define PY3_VC_REDIST_X86_GUID "{BBF2AC74-720C-3CB3-8291-5E34039232FA}"
|
||||
|
||||
; Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
# Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
!define PY2_VC_REDIST_NAME "VC_Redist_2008_SP1_MFC"
|
||||
!define PY2_VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
|
||||
!define PY2_VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
|
||||
|
@ -239,7 +239,7 @@ Section -Prerequisites
|
|||
StrCpy $VcRedistGuid ${PY2_VC_REDIST_X86_GUID}
|
||||
${EndIf}
|
||||
|
||||
; VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
# VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
${If} ${AtMostWin2008R2}
|
||||
StrCpy $CheckVcRedist "True"
|
||||
${EndIf}
|
||||
|
@ -255,20 +255,41 @@ Section -Prerequisites
|
|||
"$VcRedistName is currently not installed. Would you like to install?" \
|
||||
/SD IDYES IDNO endVcRedist
|
||||
|
||||
ClearErrors
|
||||
; The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
# The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
SetOutPath "$INSTDIR\"
|
||||
File "..\prereqs\vcredist.exe"
|
||||
; /passive used by 2015 installer
|
||||
; /qb! used by 2008 installer
|
||||
; It just ignores the unrecognized switches...
|
||||
ExecWait "$INSTDIR\vcredist.exe /qb! /passive"
|
||||
IfErrors 0 endVcRedist
|
||||
# If an output variable is specified ($0 in the case below),
|
||||
# ExecWait sets the variable with the exit code (and only sets the
|
||||
# error flag if an error occurs; if an error occurs, the contents
|
||||
# of the user variable are undefined).
|
||||
# http://nsis.sourceforge.net/Reference/ExecWait
|
||||
# /passive used by 2015 installer
|
||||
# /qb! used by 2008 installer
|
||||
# It just ignores the unrecognized switches...
|
||||
ClearErrors
|
||||
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
|
||||
IfErrors 0 CheckVcRedistErrorCode
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed to install. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
Goto endVcRedist
|
||||
|
||||
CheckVcRedistErrorCode:
|
||||
# Check for Reboot Error Code (3010)
|
||||
${If} $0 == 3010
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName installed but requires a restart to complete." \
|
||||
/SD IDOK
|
||||
|
||||
# Check for any other errors
|
||||
${ElseIfNot} $0 == 0
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed with ErrorCode: $0. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
${EndIf}
|
||||
|
||||
endVcRedist:
|
||||
|
||||
${EndIf}
|
||||
|
||||
${EndIf}
|
||||
|
@ -294,12 +315,12 @@ Function .onInit
|
|||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
; Check for existing installation
|
||||
# Check for existing installation
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" checkOther
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
|
@ -307,12 +328,12 @@ Function .onInit
|
|||
Abort
|
||||
|
||||
checkOther:
|
||||
; Check for existing installation of full salt
|
||||
# Check for existing installation of full salt
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" skipUninstall
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME_OTHER} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
|
@ -321,22 +342,22 @@ Function .onInit
|
|||
|
||||
uninst:
|
||||
|
||||
; Get current Silent status
|
||||
# Get current Silent status
|
||||
StrCpy $R0 0
|
||||
${If} ${Silent}
|
||||
StrCpy $R0 1
|
||||
${EndIf}
|
||||
|
||||
; Turn on Silent mode
|
||||
# Turn on Silent mode
|
||||
SetSilent silent
|
||||
|
||||
; Don't remove all directories
|
||||
# Don't remove all directories
|
||||
StrCpy $DeleteInstallDir 0
|
||||
|
||||
; Uninstall silently
|
||||
# Uninstall silently
|
||||
Call uninstallSalt
|
||||
|
||||
; Set it back to Normal mode, if that's what it was before
|
||||
# Set it back to Normal mode, if that's what it was before
|
||||
${If} $R0 == 0
|
||||
SetSilent normal
|
||||
${EndIf}
|
||||
|
@ -350,7 +371,7 @@ Section -Post
|
|||
|
||||
WriteUninstaller "$INSTDIR\uninst.exe"
|
||||
|
||||
; Uninstall Registry Entries
|
||||
# Uninstall Registry Entries
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"DisplayName" "$(^Name)"
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
|
@ -366,19 +387,19 @@ Section -Post
|
|||
WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \
|
||||
"DependOnService" "nsi"
|
||||
|
||||
; Set the estimated size
|
||||
# Set the estimated size
|
||||
${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2
|
||||
IntFmt $0 "0x%08X" $0
|
||||
WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"EstimatedSize" "$0"
|
||||
|
||||
; Commandline Registry Entries
|
||||
# Commandline Registry Entries
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
|
||||
; Register the Salt-Minion Service
|
||||
# Register the Salt-Minion Service
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
|
||||
|
@ -398,12 +419,12 @@ SectionEnd
|
|||
|
||||
Function .onInstSuccess
|
||||
|
||||
; If StartMinionDelayed is 1, then set the service to start delayed
|
||||
# If StartMinionDelayed is 1, then set the service to start delayed
|
||||
${If} $StartMinionDelayed == 1
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
|
||||
${EndIf}
|
||||
|
||||
; If start-minion is 1, then start the service
|
||||
# If start-minion is 1, then start the service
|
||||
${If} $StartMinion == 1
|
||||
nsExec::Exec 'net start salt-minion'
|
||||
${EndIf}
|
||||
|
@ -413,10 +434,11 @@ FunctionEnd
|
|||
|
||||
Function un.onInit
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Uninstaller: Remove Installation Directory
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/delete-install-dir" $R1
|
||||
IfErrors delete_install_dir_not_found
|
||||
StrCpy $DeleteInstallDir 1
|
||||
|
@ -434,7 +456,7 @@ Section Uninstall
|
|||
|
||||
Call un.uninstallSalt
|
||||
|
||||
; Remove C:\salt from the Path
|
||||
# Remove C:\salt from the Path
|
||||
Push "C:\salt"
|
||||
Call un.RemoveFromPath
|
||||
|
||||
|
@ -444,27 +466,27 @@ SectionEnd
|
|||
!macro uninstallSalt un
|
||||
Function ${un}uninstallSalt
|
||||
|
||||
; Make sure we're in the right directory
|
||||
# Make sure we're in the right directory
|
||||
${If} $INSTDIR == "c:\salt\bin\Scripts"
|
||||
StrCpy $INSTDIR "C:\salt"
|
||||
${EndIf}
|
||||
|
||||
; Stop and Remove salt-minion service
|
||||
# Stop and Remove salt-minion service
|
||||
nsExec::Exec 'net stop salt-minion'
|
||||
nsExec::Exec 'sc delete salt-minion'
|
||||
|
||||
; Stop and remove the salt-master service
|
||||
# Stop and remove the salt-master service
|
||||
nsExec::Exec 'net stop salt-master'
|
||||
nsExec::Exec 'sc delete salt-master'
|
||||
|
||||
; Remove files
|
||||
# Remove files
|
||||
Delete "$INSTDIR\uninst.exe"
|
||||
Delete "$INSTDIR\nssm.exe"
|
||||
Delete "$INSTDIR\salt*"
|
||||
Delete "$INSTDIR\vcredist.exe"
|
||||
RMDir /r "$INSTDIR\bin"
|
||||
|
||||
; Remove Registry entries
|
||||
# Remove Registry entries
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}"
|
||||
|
@ -474,17 +496,17 @@ Function ${un}uninstallSalt
|
|||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}"
|
||||
|
||||
; Automatically close when finished
|
||||
# Automatically close when finished
|
||||
SetAutoClose true
|
||||
|
||||
; Prompt to remove the Installation directory
|
||||
# Prompt to remove the Installation directory
|
||||
${IfNot} $DeleteInstallDir == 1
|
||||
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
|
||||
"Would you like to completely remove $INSTDIR and all of its contents?" \
|
||||
/SD IDNO IDNO finished
|
||||
${EndIf}
|
||||
|
||||
; Make sure you're not removing Program Files
|
||||
# Make sure you're not removing Program Files
|
||||
${If} $INSTDIR != 'Program Files'
|
||||
${AndIf} $INSTDIR != 'Program Files (x86)'
|
||||
RMDir /r "$INSTDIR"
|
||||
|
@ -526,7 +548,7 @@ FunctionEnd
|
|||
|
||||
Function Trim
|
||||
|
||||
Exch $R1 ; Original string
|
||||
Exch $R1 # Original string
|
||||
Push $R2
|
||||
|
||||
Loop:
|
||||
|
@ -558,36 +580,36 @@ Function Trim
|
|||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; StrStr Function
|
||||
; - find substring in a string
|
||||
;
|
||||
; Usage:
|
||||
; Push "this is some string"
|
||||
; Push "some"
|
||||
; Call StrStr
|
||||
; Pop $0 ; "some string"
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
#
|
||||
# Usage:
|
||||
# Push "this is some string"
|
||||
# Push "some"
|
||||
# Call StrStr
|
||||
# Pop $0 ; "some string"
|
||||
#------------------------------------------------------------------------------
|
||||
!macro StrStr un
|
||||
Function ${un}StrStr
|
||||
|
||||
Exch $R1 ; $R1=substring, stack=[old$R1,string,...]
|
||||
Exch ; stack=[string,old$R1,...]
|
||||
Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 ; $R3=strlen(substring)
|
||||
Push $R4 ; $R4=count
|
||||
Push $R5 ; $R5=tmp
|
||||
StrLen $R3 $R1 ; Get the length of the Search String
|
||||
StrCpy $R4 0 ; Set the counter to 0
|
||||
Exch $R1 # $R1=substring, stack=[old$R1,string,...]
|
||||
Exch # stack=[string,old$R1,...]
|
||||
Exch $R2 # $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 # $R3=strlen(substring)
|
||||
Push $R4 # $R4=count
|
||||
Push $R5 # $R5=tmp
|
||||
StrLen $R3 $R1 # Get the length of the Search String
|
||||
StrCpy $R4 0 # Set the counter to 0
|
||||
|
||||
loop:
|
||||
StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is
|
||||
; the size of the length of the search string
|
||||
StrCmp $R5 $R1 done ; Is the contents of the window the same as
|
||||
; search string, then done
|
||||
StrCmp $R5 "" done ; Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 ; Shift the windows one character
|
||||
Goto loop ; Repeat
|
||||
StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is
|
||||
# the size of the length of the search string
|
||||
StrCmp $R5 $R1 done # Is the contents of the window the same as
|
||||
# search string, then done
|
||||
StrCmp $R5 "" done # Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 # Shift the windows one character
|
||||
Goto loop # Repeat
|
||||
|
||||
done:
|
||||
StrCpy $R1 $R2 "" $R4
|
||||
|
@ -595,7 +617,7 @@ Function ${un}StrStr
|
|||
Pop $R4
|
||||
Pop $R3
|
||||
Pop $R2
|
||||
Exch $R1 ; $R1=old$R1, stack=[result,...]
|
||||
Exch $R1 # $R1=old$R1, stack=[result,...]
|
||||
|
||||
FunctionEnd
|
||||
!macroend
|
||||
|
@ -603,74 +625,74 @@ FunctionEnd
|
|||
!insertmacro StrStr "un."
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; AddToPath Function
|
||||
; - Adds item to Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call AddToPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# AddToPath Function
|
||||
# - Adds item to Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call AddToPath
|
||||
#------------------------------------------------------------------------------
|
||||
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
Function AddToPath
|
||||
|
||||
Exch $0 ; Path to add
|
||||
Push $1 ; Current Path
|
||||
Push $2 ; Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 ; Handle to Reg / Length of Path
|
||||
Push $4 ; Result of Registry Call
|
||||
Exch $0 # Path to add
|
||||
Push $1 # Current Path
|
||||
Push $2 # Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 # Handle to Reg / Length of Path
|
||||
Push $4 # Result of Registry Call
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
"You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Check if already in PATH
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0;" ; The string to find
|
||||
# Check if already in PATH
|
||||
Push "$1;" # The string to search
|
||||
Push "$0;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result of the search
|
||||
StrCmp $2 "" 0 done ; String not found, try again with ';' at the end
|
||||
; Otherwise, it's already in the path
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0\;" ; The string to find
|
||||
Pop $2 # The result of the search
|
||||
StrCmp $2 "" 0 done # String not found, try again with ';' at the end
|
||||
# Otherwise, it's already in the path
|
||||
Push "$1;" # The string to search
|
||||
Push "$0\;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result
|
||||
StrCmp $2 "" 0 done ; String not found, continue (add)
|
||||
; Otherwise, it's already in the path
|
||||
Pop $2 # The result
|
||||
StrCmp $2 "" 0 done # String not found, continue (add)
|
||||
# Otherwise, it's already in the path
|
||||
|
||||
; Prevent NSIS string overflow
|
||||
StrLen $2 $0 ; Length of path to add ($2)
|
||||
StrLen $3 $1 ; Length of current path ($3)
|
||||
IntOp $2 $2 + $3 ; Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 ; Account for the additional ';'
|
||||
; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
# Prevent NSIS string overflow
|
||||
StrLen $2 $0 # Length of path to add ($2)
|
||||
StrLen $3 $1 # Length of current path ($3)
|
||||
IntOp $2 $2 + $3 # Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 # Account for the additional ';'
|
||||
# $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
|
||||
; Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
# Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
|
||||
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
|
@ -678,18 +700,18 @@ Function AddToPath
|
|||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; Append dir to PATH
|
||||
# Append dir to PATH
|
||||
DetailPrint "Add to PATH: $0"
|
||||
StrCpy $2 $1 1 -1 ; Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 ; Check for trailing ';'
|
||||
StrCpy $1 $1 -1 ; remove trailing ';'
|
||||
StrCmp $1 "" +2 ; Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" ; Append new path at the end ($0)
|
||||
StrCpy $2 $1 1 -1 # Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 # Check for trailing ';'
|
||||
StrCpy $1 $1 -1 # remove trailing ';'
|
||||
StrCmp $1 "" +2 # Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" # Append new path at the end ($0)
|
||||
|
||||
; We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
# We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
WriteRegExpandStr ${Environ} "PATH" $0
|
||||
|
||||
; Broadcast registry change to open programs
|
||||
# Broadcast registry change to open programs
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
|
@ -702,16 +724,16 @@ Function AddToPath
|
|||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; RemoveFromPath Function
|
||||
; - Removes item from Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call un.RemoveFromPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# RemoveFromPath Function
|
||||
# - Removes item from Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call un.RemoveFromPath
|
||||
#------------------------------------------------------------------------------
|
||||
Function un.RemoveFromPath
|
||||
|
||||
Exch $0
|
||||
|
@ -722,59 +744,59 @@ Function un.RemoveFromPath
|
|||
Push $5
|
||||
Push $6
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 ; Copy the last character of the path
|
||||
StrCmp $5 ";" +2 ; Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" ; ensure trailing ';'
|
||||
# Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 # Copy the last character of the path
|
||||
StrCmp $5 ";" +2 # Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" # ensure trailing ';'
|
||||
|
||||
; Check for our directory inside the path
|
||||
Push $1 ; String to Search
|
||||
Push "$0;" ; Dir to Find
|
||||
# Check for our directory inside the path
|
||||
Push $1 # String to Search
|
||||
Push "$0;" # Dir to Find
|
||||
Call un.StrStr
|
||||
Pop $2 ; The results of the search
|
||||
StrCmp $2 "" done ; If results are empty, we're done, otherwise continue
|
||||
Pop $2 # The results of the search
|
||||
StrCmp $2 "" done # If results are empty, we're done, otherwise continue
|
||||
|
||||
; Remove our Directory from the Path
|
||||
# Remove our Directory from the Path
|
||||
DetailPrint "Remove from PATH: $0"
|
||||
StrLen $3 "$0;" ; Get the length of our dir ($3)
|
||||
StrLen $4 $2 ; Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" ; Combine $5 and $6
|
||||
StrLen $3 "$0;" # Get the length of our dir ($3)
|
||||
StrLen $4 $2 # Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 # $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" # Combine $5 and $6
|
||||
|
||||
; Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 ; Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 ; Check for ';'
|
||||
StrCpy $3 $3 -1 ; remove trailing ';'
|
||||
# Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 # Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 # Check for ';'
|
||||
StrCpy $3 $3 -1 # remove trailing ';'
|
||||
|
||||
; Write the new path to the registry
|
||||
# Write the new path to the registry
|
||||
WriteRegExpandStr ${Environ} "PATH" $3
|
||||
|
||||
; Broadcast the change to all open applications
|
||||
# Broadcast the change to all open applications
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
|
@ -808,6 +830,7 @@ Function getMinionConfig
|
|||
confFound:
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
|
@ -838,68 +861,69 @@ FunctionEnd
|
|||
Function updateMinionConfig
|
||||
|
||||
ClearErrors
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading
|
||||
GetTempFileName $R0 ; get new temp file name
|
||||
FileOpen $1 $R0 "w" ; open temp file for writing
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
|
||||
GetTempFileName $R0 # get new temp file name
|
||||
FileOpen $1 $R0 "w" # open temp file for writing
|
||||
|
||||
loop: ; loop through each line
|
||||
FileRead $0 $2 ; read line from target file
|
||||
IfErrors done ; end if errors are encountered (end of line)
|
||||
loop: # loop through each line
|
||||
FileRead $0 $2 # read line from target file
|
||||
IfErrors done # end if errors are encountered (end of line)
|
||||
|
||||
${If} $MasterHost_State != "" ; if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MasterHost_State != "" # if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" # and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
${If} $MinionName_State != "" ; if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" ; change line
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MinionName_State != "" # if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" # change line
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
FileWrite $1 $2 ; write changed or unchanged line to temp file
|
||||
FileWrite $1 $2 # write changed or unchanged line to temp file
|
||||
Goto loop
|
||||
|
||||
done:
|
||||
FileClose $0 ; close target file
|
||||
FileClose $1 ; close temp file
|
||||
Delete "$INSTDIR\conf\minion" ; delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file
|
||||
Delete $R0 ; delete temp file
|
||||
FileClose $0 # close target file
|
||||
FileClose $1 # close temp file
|
||||
Delete "$INSTDIR\conf\minion" # delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
|
||||
Delete $R0 # delete temp file
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function parseCommandLineSwitches
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
; Check for start-minion switches
|
||||
; /start-service is to be deprecated, so we must check for both
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
${GetOptions} $R0 "/start-minion=" $R2
|
||||
|
||||
# Service: Start Salt Minion
|
||||
${IfNot} $R2 == ""
|
||||
; If start-minion was passed something, then set it
|
||||
# If start-minion was passed something, then set it
|
||||
StrCpy $StartMinion $R2
|
||||
${ElseIfNot} $R1 == ""
|
||||
; If start-service was passed something, then set StartMinion to that
|
||||
# If start-service was passed something, then set StartMinion to that
|
||||
StrCpy $StartMinion $R1
|
||||
${Else}
|
||||
; Otherwise default to 1
|
||||
# Otherwise default to 1
|
||||
StrCpy $StartMinion 1
|
||||
${EndIf}
|
||||
|
||||
# Service: Minion Startup Type Delayed
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/start-minion-delayed" $R1
|
||||
IfErrors start_minion_delayed_not_found
|
||||
StrCpy $StartMinionDelayed 1
|
||||
|
|
|
@ -19,9 +19,9 @@ Function Get-Settings {
|
|||
"Python2Dir" = "C:\Python27"
|
||||
"Scripts2Dir" = "C:\Python27\Scripts"
|
||||
"SitePkgs2Dir" = "C:\Python27\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Program Files\Python35"
|
||||
"Scripts3Dir" = "C:\Program Files\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Python35"
|
||||
"Scripts3Dir" = "C:\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Python35\Lib\site-packages"
|
||||
"DownloadDir" = "$env:Temp\DevSalt"
|
||||
}
|
||||
# The script deletes the DownLoadDir (above) for each install.
|
||||
|
|
|
@ -29,6 +29,7 @@ import salt.config
|
|||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.versions
|
||||
|
@ -69,7 +70,7 @@ class LoadAuth(object):
|
|||
if fstr not in self.auth:
|
||||
return ''
|
||||
try:
|
||||
pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0]
|
||||
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0]
|
||||
return load[pname_arg]
|
||||
except IndexError:
|
||||
return ''
|
||||
|
@ -216,8 +217,9 @@ class LoadAuth(object):
|
|||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
groups = self.get_groups(load)
|
||||
if groups:
|
||||
tdata['groups'] = groups
|
||||
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
|
@ -292,29 +294,31 @@ class LoadAuth(object):
|
|||
def authenticate_key(self, load, key):
|
||||
'''
|
||||
Authenticate a user by the key passed in load.
|
||||
Return the effective user id (name) if it's differ from the specified one (for sudo).
|
||||
If the effective user id is the same as passed one return True on success or False on
|
||||
Return the effective user id (name) if it's different from the specified one (for sudo).
|
||||
If the effective user id is the same as the passed one, return True on success or False on
|
||||
failure.
|
||||
'''
|
||||
auth_key = load.pop('key')
|
||||
if not auth_key:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
error_msg = 'Authentication failure of type "user" occurred.'
|
||||
auth_key = load.pop('key', None)
|
||||
if auth_key is None:
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
|
||||
if 'user' in load:
|
||||
auth_user = AuthUser(load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return auth_user.sudo_name()
|
||||
elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root':
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_user.is_running_user():
|
||||
if auth_key != key.get(load['user']):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_key == key.get('root'):
|
||||
pass
|
||||
|
@ -322,19 +326,19 @@ class LoadAuth(object):
|
|||
if load['user'] in key:
|
||||
# User is authorised, check key and check perms
|
||||
if auth_key != key[load['user']]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return load['user']
|
||||
else:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
log.warning('Authentication failure of type "other" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
def get_auth_list(self, load, token=None):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
|
@ -342,30 +346,37 @@ class LoadAuth(object):
|
|||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get auth list from token
|
||||
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
return token['auth_list']
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
eauth = token['eauth'] if token else load['eauth']
|
||||
if eauth not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
if token:
|
||||
name = token['name']
|
||||
groups = token.get('groups')
|
||||
else:
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
@ -405,6 +416,64 @@ class LoadAuth(object):
|
|||
|
||||
return auth_list
|
||||
|
||||
def check_authentication(self, load, auth_type, key=None, show_username=False):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Go through various checks to see if the token/eauth/user can be authenticated.
|
||||
|
||||
Returns a dictionary containing the following keys:
|
||||
|
||||
- auth_list
|
||||
- username
|
||||
- error
|
||||
|
||||
If an error is encountered, return immediately with the relevant error dictionary
|
||||
as authentication has failed. Otherwise, return the username and valid auth_list.
|
||||
'''
|
||||
auth_list = []
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
ret = {'auth_list': auth_list,
|
||||
'username': username,
|
||||
'error': {}}
|
||||
|
||||
# Authenticate
|
||||
if auth_type == 'token':
|
||||
token = self.authenticate_token(load)
|
||||
if not token:
|
||||
ret['error'] = {'name': 'TokenAuthenticationError',
|
||||
'message': 'Authentication failure of type "token" occurred.'}
|
||||
return ret
|
||||
|
||||
# Update username for token
|
||||
username = token['name']
|
||||
ret['username'] = username
|
||||
auth_list = self.get_auth_list(load, token=token)
|
||||
elif auth_type == 'eauth':
|
||||
if not self.authenticate_eauth(load):
|
||||
ret['error'] = {'name': 'EauthAuthenticationError',
|
||||
'message': 'Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.'.format(username)}
|
||||
return ret
|
||||
|
||||
auth_list = self.get_auth_list(load)
|
||||
elif auth_type == 'user':
|
||||
if not self.authenticate_key(load, key):
|
||||
if show_username:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username)
|
||||
else:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
else:
|
||||
ret['error'] = {'name': 'SaltInvocationError',
|
||||
'message': 'Authentication type not supported.'}
|
||||
return ret
|
||||
|
||||
# Authentication checks passed
|
||||
ret['auth_list'] = auth_list
|
||||
return ret
|
||||
|
||||
|
||||
class Authorize(object):
|
||||
'''
|
||||
|
@ -550,6 +619,15 @@ class Authorize(object):
|
|||
load.get('arg', None),
|
||||
load.get('tgt', None),
|
||||
load.get('tgt_type', 'glob'))
|
||||
|
||||
# Handle possible return of dict data structure from any_auth call to
|
||||
# avoid a stacktrace. As mentioned in PR #43181, this entire class is
|
||||
# dead code and is marked for removal in Salt Neon. But until then, we
|
||||
# should handle the dict return, which is an error and should return
|
||||
# False until this class is removed.
|
||||
if isinstance(good, dict):
|
||||
return False
|
||||
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load.get('fun', '') != 'saltutil.find_job':
|
||||
|
@ -562,7 +640,7 @@ class Authorize(object):
|
|||
authorization
|
||||
|
||||
Note: this will check that the user has at least one right that will let
|
||||
him execute "load", this does not deal with conflicting rules
|
||||
the user execute "load", this does not deal with conflicting rules
|
||||
'''
|
||||
|
||||
adata = self.auth_data
|
||||
|
@ -634,7 +712,7 @@ class Resolver(object):
|
|||
'not available').format(eauth))
|
||||
return ret
|
||||
|
||||
args = salt.utils.arg_lookup(self.auth[fstr])
|
||||
args = salt.utils.args.arg_lookup(self.auth[fstr])
|
||||
for arg in args['args']:
|
||||
if arg in self.opts:
|
||||
ret[arg] = self.opts[arg]
|
||||
|
|
|
@ -378,7 +378,7 @@ def groups(username, **kwargs):
|
|||
search_results = bind.search_s(search_base,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
search_string,
|
||||
[_config('accountattributename'), 'cn'])
|
||||
[_config('accountattributename'), 'cn', _config('groupattribute')])
|
||||
for _, entry in search_results:
|
||||
if username in entry[_config('accountattributename')]:
|
||||
group_list.append(entry['cn'][0])
|
||||
|
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
|||
|
||||
# Only test user auth on first call for job.
|
||||
# 'show_jid' only exists on first payload so we can use that for the conditional.
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs['password'],
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs.get('password'),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False) and
|
||||
_config('anonymous', mandatory=False)):
|
||||
log.error('LDAP username and password do not match')
|
||||
|
|
|
@ -59,7 +59,7 @@ class Beacon(object):
|
|||
|
||||
if 'enabled' in current_beacon_config:
|
||||
if not current_beacon_config['enabled']:
|
||||
log.trace('Beacon {0} disabled'.format(mod))
|
||||
log.trace('Beacon %s disabled', mod)
|
||||
continue
|
||||
else:
|
||||
# remove 'enabled' item before processing the beacon
|
||||
|
@ -68,7 +68,7 @@ class Beacon(object):
|
|||
else:
|
||||
self._remove_list_item(config[mod], 'enabled')
|
||||
|
||||
log.trace('Beacon processing: {0}'.format(mod))
|
||||
log.trace('Beacon processing: %s', mod)
|
||||
fun_str = '{0}.beacon'.format(mod)
|
||||
validate_str = '{0}.validate'.format(mod)
|
||||
if fun_str in self.beacons:
|
||||
|
@ -77,10 +77,10 @@ class Beacon(object):
|
|||
if interval:
|
||||
b_config = self._trim_config(b_config, mod, 'interval')
|
||||
if not self._process_interval(mod, interval):
|
||||
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
|
||||
log.trace('Skipping beacon %s. Interval not reached.', mod)
|
||||
continue
|
||||
if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
|
||||
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
|
||||
log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod)
|
||||
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
|
||||
is_running = False
|
||||
running_jobs = salt.utils.minion.running(self.opts)
|
||||
|
@ -90,10 +90,10 @@ class Beacon(object):
|
|||
if is_running:
|
||||
close_str = '{0}.close'.format(mod)
|
||||
if close_str in self.beacons:
|
||||
log.info('Closing beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Closing beacon %s. State run in progress.', mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
else:
|
||||
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Skipping beacon %s. State run in progress.', mod)
|
||||
continue
|
||||
# Update __grains__ on the beacon
|
||||
self.beacons[fun_str].__globals__['__grains__'] = grains
|
||||
|
@ -120,7 +120,7 @@ class Beacon(object):
|
|||
if runonce:
|
||||
self.disable_beacon(mod)
|
||||
else:
|
||||
log.warning('Unable to process beacon {0}'.format(mod))
|
||||
log.warning('Unable to process beacon %s', mod)
|
||||
return ret
|
||||
|
||||
def _trim_config(self, b_config, mod, key):
|
||||
|
@ -149,19 +149,19 @@ class Beacon(object):
|
|||
Process beacons with intervals
|
||||
Return True if a beacon should be run on this loop
|
||||
'''
|
||||
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
|
||||
log.trace('Processing interval %s for beacon mod %s', interval, mod)
|
||||
loop_interval = self.opts['loop_interval']
|
||||
if mod in self.interval_map:
|
||||
log.trace('Processing interval in map')
|
||||
counter = self.interval_map[mod]
|
||||
log.trace('Interval counter: {0}'.format(counter))
|
||||
log.trace('Interval counter: %s', counter)
|
||||
if counter * loop_interval >= interval:
|
||||
self.interval_map[mod] = 1
|
||||
return True
|
||||
else:
|
||||
self.interval_map[mod] += 1
|
||||
else:
|
||||
log.trace('Interval process inserting mod: {0}'.format(mod))
|
||||
log.trace('Interval process inserting mod: %s', mod)
|
||||
self.interval_map[mod] = 1
|
||||
return False
|
||||
|
||||
|
@ -205,15 +205,50 @@ class Beacon(object):
|
|||
'''
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
b_conf = self.functions['config.merge']('beacons')
|
||||
if not isinstance(self.opts['beacons'], dict):
|
||||
self.opts['beacons'] = {}
|
||||
self.opts['beacons'].update(b_conf)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacons_list_complete')
|
||||
|
||||
return True
|
||||
|
||||
def list_available_beacons(self):
|
||||
'''
|
||||
List the available beacons
|
||||
'''
|
||||
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
|
||||
for _beacon in self.beacons if '.beacon' in _beacon]
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': _beacons},
|
||||
tag='/salt/minion/minion_beacons_list_available_complete')
|
||||
|
||||
return True
|
||||
|
||||
def validate_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Return available beacon functions
|
||||
'''
|
||||
validate_str = '{}.validate'.format(name)
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
if 'enabled' in beacon_data:
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True,
|
||||
'vcomment': vcomment,
|
||||
'valid': valid},
|
||||
tag='/salt/minion/minion_beacon_validation_complete')
|
||||
|
||||
return True
|
||||
|
||||
def add_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Add a beacon item
|
||||
|
@ -224,9 +259,9 @@ class Beacon(object):
|
|||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
else:
|
||||
log.info('Added new beacon item {0}'.format(name))
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -245,7 +280,7 @@ class Beacon(object):
|
|||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -261,7 +296,7 @@ class Beacon(object):
|
|||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item {0}'.format(name))
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
|
|
@ -10,14 +10,19 @@ Beacon to fire events at failed login of users
|
|||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'btmp'
|
||||
BTMP = '/var/log/btmp'
|
||||
|
@ -37,6 +42,15 @@ FIELDS = [
|
|||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'btmp.loc'
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(BTMP):
|
||||
|
@ -44,6 +58,20 @@ def __virtual__():
|
|||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
|
@ -60,6 +88,45 @@ def validate(config):
|
|||
if not isinstance(config, list):
|
||||
return False, ('Configuration for btmp beacon must '
|
||||
'be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
|
@ -72,8 +139,40 @@ def beacon(config):
|
|||
|
||||
beacons:
|
||||
btmp: []
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
btmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(BTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
|
@ -83,6 +182,7 @@ def beacon(config):
|
|||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
|
@ -91,7 +191,30 @@ def beacon(config):
|
|||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
|
|
@ -23,7 +23,9 @@ import re
|
|||
|
||||
# Import salt libs
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
|
353
salt/beacons/napalm_beacon.py
Normal file
353
salt/beacons/napalm_beacon.py
Normal file
|
@ -0,0 +1,353 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
NAPALM functions
|
||||
================
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Watch NAPALM functions and fire events on specific triggers.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``NAPALM`` beacon only works only when running under
|
||||
a regular Minion or a Proxy Minion, managed via NAPALM_.
|
||||
Check the documentation for the
|
||||
:mod:`NAPALM proxy module <salt.proxy.napalm>`.
|
||||
|
||||
_NAPALM: http://napalm.readthedocs.io/en/latest/index.html
|
||||
|
||||
The configuration accepts a list of Salt functions to be
|
||||
invoked, and the corresponding output hierarchy that should
|
||||
be matched against. To invoke a function with certain
|
||||
arguments, they can be specified using the ``_args`` key, or
|
||||
``_kwargs`` for more specific key-value arguments.
|
||||
|
||||
The match structure follows the output hierarchy of the NAPALM
|
||||
functions, under the ``out`` key.
|
||||
|
||||
For example, the following is normal structure returned by the
|
||||
:mod:`ntp.stats <salt.modules.napalm_ntp.stats>` execution function:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"comment": "",
|
||||
"result": true,
|
||||
"out": [
|
||||
{
|
||||
"referenceid": ".GPSs.",
|
||||
"remote": "172.17.17.1",
|
||||
"synchronized": true,
|
||||
"reachability": 377,
|
||||
"offset": 0.461,
|
||||
"when": "860",
|
||||
"delay": 143.606,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 1,
|
||||
"jitter": 0.027,
|
||||
"type": "-"
|
||||
},
|
||||
{
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.2",
|
||||
"synchronized": false,
|
||||
"reachability": 0,
|
||||
"offset": 0.0,
|
||||
"when": "-",
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"stratum": 16,
|
||||
"jitter": 4000.0,
|
||||
"type": "-"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
In order to fire events when the synchronization is lost with
|
||||
one of the NTP peers, e.g., ``172.17.17.2``, we can match it explicitly as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ntp.stats:
|
||||
remote: 172.17.17.2
|
||||
synchronized: false
|
||||
|
||||
There is one single nesting level, as the output of ``ntp.stats`` is
|
||||
just a list of dictionaries, and this beacon will compare each dictionary
|
||||
from the list with the structure examplified above.
|
||||
|
||||
.. note::
|
||||
|
||||
When we want to match on any element at a certain level, we can
|
||||
configure ``*`` to match anything.
|
||||
|
||||
Considering a more complex structure consisting on multiple nested levels,
|
||||
e.g., the output of the :mod:`bgp.neighbors <salt.modules.napalm_bgp.neighbors>`
|
||||
execution function, to check when any neighbor from the ``global``
|
||||
routing table is down, the match structure would have the format:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
bgp.neighbors:
|
||||
global:
|
||||
'*':
|
||||
up: false
|
||||
|
||||
The match structure above will match any BGP neighbor, with
|
||||
any network (``*`` matches any AS number), under the ``global`` VRF.
|
||||
In other words, this beacon will push an event on the Salt bus
|
||||
when there's a BGP neighbor down.
|
||||
|
||||
The right operand can also accept mathematical operations
|
||||
(i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.) when comparing
|
||||
numerical values.
|
||||
|
||||
Configuration Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
beacons:
|
||||
napalm:
|
||||
- net.interfaces:
|
||||
# fire events when any interfaces is down
|
||||
'*':
|
||||
is_up: false
|
||||
- net.interfaces:
|
||||
# fire events only when the xe-0/0/0 interface is down
|
||||
'xe-0/0/0':
|
||||
is_up: false
|
||||
- ntp.stats:
|
||||
# fire when there's any NTP peer unsynchornized
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when the synchronization
|
||||
# with with the 172.17.17.2 NTP server is lost
|
||||
_args:
|
||||
- 172.17.17.2
|
||||
synchronized: false
|
||||
- ntp.stats:
|
||||
# fire only when there's a NTP peer with
|
||||
# synchronization stratum > 5
|
||||
stratum: '> 5'
|
||||
|
||||
Event structure example:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
salt/beacon/edge01.bjm01/napalm/junos/ntp.stats {
|
||||
"_stamp": "2017-09-05T09:51:09.377202",
|
||||
"args": [],
|
||||
"data": {
|
||||
"comment": "",
|
||||
"out": [
|
||||
{
|
||||
"delay": 0.0,
|
||||
"hostpoll": 1024,
|
||||
"jitter": 4000.0,
|
||||
"offset": 0.0,
|
||||
"reachability": 0,
|
||||
"referenceid": ".INIT.",
|
||||
"remote": "172.17.17.1",
|
||||
"stratum": 16,
|
||||
"synchronized": false,
|
||||
"type": "-",
|
||||
"when": "-"
|
||||
}
|
||||
],
|
||||
"result": true
|
||||
},
|
||||
"fun": "ntp.stats",
|
||||
"id": "edge01.bjm01",
|
||||
"kwargs": {},
|
||||
"match": {
|
||||
"stratum": "> 5"
|
||||
}
|
||||
}
|
||||
|
||||
The event examplified above has been fired when the device
|
||||
identified by the Minion id ``edge01.bjm01`` has been synchronized
|
||||
with a NTP server at a stratum level greater than 5.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python std lib
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Import Salt modules
|
||||
from salt.ext import six
|
||||
import salt.utils.napalm
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
_numeric_regex = re.compile(r'^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$')
|
||||
# the numeric regex will match the right operand, e.g '>= 20', '< 100', '!= 20', '< 1000.12' etc.
|
||||
_numeric_operand = {
|
||||
'<': '__lt__',
|
||||
'>': '__gt__',
|
||||
'>=': '__ge__',
|
||||
'<=': '__le__',
|
||||
'==': '__eq__',
|
||||
'!=': '__ne__',
|
||||
} # mathematical operand - private method map
|
||||
|
||||
|
||||
__virtualname__ = 'napalm'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
This beacon can only work when running under a regular or a proxy minion, managed through napalm.
|
||||
'''
|
||||
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
|
||||
|
||||
|
||||
def _compare(cur_cmp, cur_struct):
|
||||
'''
|
||||
Compares two objects and return a boolean value
|
||||
when there's a match.
|
||||
'''
|
||||
if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict):
|
||||
log.debug('Comparing dict to dict')
|
||||
for cmp_key, cmp_value in six.iteritems(cur_cmp):
|
||||
if cmp_key == '*':
|
||||
# matches any key from the source dictionary
|
||||
if isinstance(cmp_value, dict):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
found = False
|
||||
if isinstance(cur_struct, (list, tuple)):
|
||||
for cur_ele in cur_struct:
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
elif isinstance(cur_struct, dict):
|
||||
for _, cur_ele in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_ele)
|
||||
return found
|
||||
else:
|
||||
if isinstance(cmp_value, dict):
|
||||
if cmp_key not in cur_struct:
|
||||
return False
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
if isinstance(cmp_value, list):
|
||||
found = False
|
||||
for _, cur_struct_val in six.iteritems(cur_struct):
|
||||
found |= _compare(cmp_value, cur_struct_val)
|
||||
return found
|
||||
else:
|
||||
return _compare(cmp_value, cur_struct[cmp_key])
|
||||
elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing list to list')
|
||||
found = False
|
||||
for cur_cmp_ele in cur_cmp:
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp_ele, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, dict) and isinstance(cur_struct, (list, tuple)):
|
||||
log.debug('Comparing dict to list (of dicts?)')
|
||||
found = False
|
||||
for cur_struct_ele in cur_struct:
|
||||
found |= _compare(cur_cmp, cur_struct_ele)
|
||||
return found
|
||||
elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool):
|
||||
log.debug('Comparing booleans: %s ? %s', cur_cmp, cur_struct)
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \
|
||||
isinstance(cur_struct, (six.string_types, six.text_type)):
|
||||
log.debug('Comparing strings (and regex?): %s ? %s', cur_cmp, cur_struct)
|
||||
# Trying literal match
|
||||
matched = re.match(cur_cmp, cur_struct, re.I)
|
||||
if matched:
|
||||
return True
|
||||
return False
|
||||
elif isinstance(cur_cmp, (six.integer_types, float)) and \
|
||||
isinstance(cur_struct, (six.integer_types, float)):
|
||||
log.debug('Comparing numeric values: %d ? %d', cur_cmp, cur_struct)
|
||||
# numeric compare
|
||||
return cur_cmp == cur_struct
|
||||
elif isinstance(cur_struct, (six.integer_types, float)) and \
|
||||
isinstance(cur_cmp, (six.string_types, six.text_type)):
|
||||
# Comapring the numerical value agains a presumably mathematical value
|
||||
log.debug('Comparing a numeric value (%d) with a string (%s)', cur_struct, cur_cmp)
|
||||
numeric_compare = _numeric_regex.match(cur_cmp)
|
||||
# determine if the value to compare agains is a mathematical operand
|
||||
if numeric_compare:
|
||||
compare_value = numeric_compare.group(2)
|
||||
return getattr(float(cur_struct), _numeric_operand[numeric_compare.group(1)])(float(compare_value))
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
Validate the beacon configuration.
|
||||
'''
|
||||
# Must be a list of dicts.
|
||||
if not isinstance(config, list):
|
||||
return False, 'Configuration for napalm beacon must be a list.'
|
||||
for mod in config:
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
if not isinstance(fun_cfg, dict):
|
||||
return False, 'The match structure for the {} execution function output must be a dictionary'.format(fun)
|
||||
if fun not in __salt__:
|
||||
return False, 'Execution function {} is not availabe!'.format(fun)
|
||||
return True, 'Valid configuration for the napal beacon!'
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Watch napalm function and fire events.
|
||||
'''
|
||||
log.debug('Executing napalm beacon with config:')
|
||||
log.debug(config)
|
||||
ret = []
|
||||
for mod in config:
|
||||
if not mod:
|
||||
continue
|
||||
event = {}
|
||||
fun = mod.keys()[0]
|
||||
fun_cfg = mod.values()[0]
|
||||
args = fun_cfg.pop('_args', [])
|
||||
kwargs = fun_cfg.pop('_kwargs', {})
|
||||
log.debug('Executing {fun} with {args} and {kwargs}'.format(
|
||||
fun=fun,
|
||||
args=args,
|
||||
kwargs=kwargs
|
||||
))
|
||||
fun_ret = __salt__[fun](*args, **kwargs)
|
||||
log.debug('Got the reply from the minion:')
|
||||
log.debug(fun_ret)
|
||||
if not fun_ret.get('result', False):
|
||||
log.error('Error whilst executing {}'.format(fun))
|
||||
log.error(fun_ret)
|
||||
continue
|
||||
fun_ret_out = fun_ret['out']
|
||||
log.debug('Comparing to:')
|
||||
log.debug(fun_cfg)
|
||||
try:
|
||||
fun_cmp_result = _compare(fun_cfg, fun_ret_out)
|
||||
except Exception as err:
|
||||
log.error(err, exc_info=True)
|
||||
# catch any exception and continue
|
||||
# to not jeopardise the execution of the next function in the list
|
||||
continue
|
||||
log.debug('Result of comparison: {res}'.format(res=fun_cmp_result))
|
||||
if fun_cmp_result:
|
||||
log.info('Matched {fun} with {cfg}'.format(
|
||||
fun=fun,
|
||||
cfg=fun_cfg
|
||||
))
|
||||
event['tag'] = '{os}/{fun}'.format(os=__grains__['os'], fun=fun)
|
||||
event['fun'] = fun
|
||||
event['args'] = args
|
||||
event['kwargs'] = kwargs
|
||||
event['data'] = fun_ret
|
||||
event['match'] = fun_cfg
|
||||
log.debug('Queueing event:')
|
||||
log.debug(event)
|
||||
ret.append(event)
|
||||
log.debug('NAPALM beacon generated the events:')
|
||||
log.debug(ret)
|
||||
return ret
|
|
@ -10,14 +10,19 @@ Beacon to fire events at login of users as registered in the wtmp file
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
import salt.ext.six
|
||||
# pylint: disable=import-error
|
||||
from salt.ext.six.moves import map
|
||||
# pylint: enable=import-error
|
||||
|
||||
__virtualname__ = 'wtmp'
|
||||
WTMP = '/var/log/wtmp'
|
||||
|
@ -37,9 +42,15 @@ FIELDS = [
|
|||
SIZE = struct.calcsize(FMT)
|
||||
LOC_KEY = 'wtmp.loc'
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import dateutil.parser as dateutil_parser
|
||||
_TIME_SUPPORTED = True
|
||||
except ImportError:
|
||||
_TIME_SUPPORTED = False
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if os.path.isfile(WTMP):
|
||||
|
@ -47,6 +58,20 @@ def __virtual__():
|
|||
return False
|
||||
|
||||
|
||||
def _check_time_range(time_range, now):
|
||||
'''
|
||||
Check time range
|
||||
'''
|
||||
if _TIME_SUPPORTED:
|
||||
_start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple()))
|
||||
_end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple()))
|
||||
|
||||
return bool(_start <= now <= _end)
|
||||
else:
|
||||
log.error('Dateutil is required.')
|
||||
return False
|
||||
|
||||
|
||||
def _get_loc():
|
||||
'''
|
||||
return the active file location
|
||||
|
@ -62,6 +87,44 @@ def validate(config):
|
|||
# Configuration for wtmp beacon should be a list of dicts
|
||||
if not isinstance(config, list):
|
||||
return False, ('Configuration for wtmp beacon must be a list.')
|
||||
else:
|
||||
_config = {}
|
||||
list(map(_config.update, config))
|
||||
|
||||
if 'users' in _config:
|
||||
if not isinstance(_config['users'], dict):
|
||||
return False, ('User configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
for user in _config['users']:
|
||||
if _config['users'][user] and \
|
||||
'time_range' in _config['users'][user]:
|
||||
_time_range = _config['users'][user]['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
if 'defaults' in _config:
|
||||
if not isinstance(_config['defaults'], dict):
|
||||
return False, ('Defaults configuration for btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if 'time_range' in _config['defaults']:
|
||||
_time_range = _config['defaults']['time_range']
|
||||
if not isinstance(_time_range, dict):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must '
|
||||
'be a dictionary.')
|
||||
else:
|
||||
if not all(k in _time_range for k in ('start', 'end')):
|
||||
return False, ('The time_range parameter for '
|
||||
'btmp beacon must contain '
|
||||
'start & end options.')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
|
@ -74,8 +137,40 @@ def beacon(config):
|
|||
|
||||
beacons:
|
||||
wtmp: []
|
||||
'''
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
|
||||
beacons:
|
||||
wtmp:
|
||||
- users:
|
||||
gareth:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
- defaults:
|
||||
time_range:
|
||||
start: '8am'
|
||||
end: '4pm'
|
||||
'''
|
||||
ret = []
|
||||
|
||||
users = None
|
||||
defaults = None
|
||||
|
||||
for config_item in config:
|
||||
if 'users' in config_item:
|
||||
users = config_item['users']
|
||||
|
||||
if 'defaults' in config_item:
|
||||
defaults = config_item['defaults']
|
||||
|
||||
with salt.utils.files.fopen(WTMP, 'rb') as fp_:
|
||||
loc = __context__.get(LOC_KEY, 0)
|
||||
if loc == 0:
|
||||
|
@ -85,6 +180,7 @@ def beacon(config):
|
|||
else:
|
||||
fp_.seek(loc)
|
||||
while True:
|
||||
now = int(time.time())
|
||||
raw = fp_.read(SIZE)
|
||||
if len(raw) != SIZE:
|
||||
return ret
|
||||
|
@ -93,7 +189,30 @@ def beacon(config):
|
|||
event = {}
|
||||
for ind, field in enumerate(FIELDS):
|
||||
event[field] = pack[ind]
|
||||
if isinstance(event[field], six.string_types):
|
||||
event[field] = event[field].strip('\x00')
|
||||
ret.append(event)
|
||||
if isinstance(event[field], salt.ext.six.string_types):
|
||||
if isinstance(event[field], bytes):
|
||||
event[field] = event[field].decode()
|
||||
event[field] = event[field].strip('b\x00')
|
||||
else:
|
||||
event[field] = event[field].strip('\x00')
|
||||
|
||||
if users:
|
||||
if event['user'] in users:
|
||||
_user = users[event['user']]
|
||||
if isinstance(_user, dict) and 'time_range' in _user:
|
||||
if _check_time_range(_user['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'],
|
||||
now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
else:
|
||||
if defaults and 'time_range' in defaults:
|
||||
if _check_time_range(defaults['time_range'], now):
|
||||
ret.append(event)
|
||||
else:
|
||||
ret.append(event)
|
||||
return ret
|
||||
|
|
2
salt/cache/__init__.py
vendored
2
salt/cache/__init__.py
vendored
|
@ -73,7 +73,7 @@ class Cache(object):
|
|||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
|
|
9
salt/cache/consul.py
vendored
9
salt/cache/consul.py
vendored
|
@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store.
|
|||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
:depends: python-consul >= 0.2.0
|
||||
|
||||
It is up to the system administrator to set up and configure the Consul
|
||||
infrastructure. All is needed for this plugin is a working Consul agent
|
||||
with a read-write access to the key-value store.
|
||||
|
@ -81,8 +83,11 @@ def __virtual__():
|
|||
'verify': __opts__.get('consul.verify', True),
|
||||
}
|
||||
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
try:
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
except AttributeError:
|
||||
return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed")
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ class BaseCaller(object):
|
|||
'''
|
||||
ret = {}
|
||||
fun = self.opts['fun']
|
||||
ret['jid'] = salt.utils.jid.gen_jid()
|
||||
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
|
||||
proc_fn = os.path.join(
|
||||
salt.minion.get_proc_dir(self.opts['cachedir']),
|
||||
ret['jid']
|
||||
|
|
|
@ -184,9 +184,10 @@ class SaltCP(object):
|
|||
if gzip \
|
||||
else salt.utils.itertools.read_file
|
||||
|
||||
minions = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
tgt,
|
||||
tgt_type=selected_target_option or 'glob')
|
||||
minions = _res['minions']
|
||||
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ from __future__ import absolute_import
|
|||
# Import Salt libs
|
||||
import salt.spm
|
||||
import salt.utils.parsers as parsers
|
||||
from salt.utils.verify import verify_log
|
||||
from salt.utils.verify import verify_log, verify_env
|
||||
|
||||
|
||||
class SPM(parsers.SPMParser):
|
||||
|
@ -29,6 +29,10 @@ class SPM(parsers.SPMParser):
|
|||
ui = salt.spm.SPMCmdlineInterface()
|
||||
self.parse_args()
|
||||
self.setup_logfile_logger()
|
||||
v_dirs = [
|
||||
self.config['cachedir'],
|
||||
]
|
||||
verify_env(v_dirs, self.config['user'],)
|
||||
verify_log(self.config)
|
||||
client = salt.spm.SPMClient(ui, self.config)
|
||||
client.run(self.args)
|
||||
|
|
|
@ -347,7 +347,8 @@ class LocalClient(object):
|
|||
return self._check_pub_data(pub_data)
|
||||
|
||||
def gather_minions(self, tgt, expr_form):
|
||||
return salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
return _res['minions']
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def run_job_async(
|
||||
|
@ -1141,6 +1142,7 @@ class LocalClient(object):
|
|||
minion_timeouts = {}
|
||||
|
||||
found = set()
|
||||
missing = []
|
||||
# Check to see if the jid is real, if not return the empty dict
|
||||
try:
|
||||
if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}:
|
||||
|
@ -1179,6 +1181,8 @@ class LocalClient(object):
|
|||
break
|
||||
if u'minions' in raw.get(u'data', {}):
|
||||
minions.update(raw[u'data'][u'minions'])
|
||||
if u'missing' in raw.get(u'data', {}):
|
||||
missing.extend(raw[u'data'][u'missing'])
|
||||
continue
|
||||
if u'return' not in raw[u'data']:
|
||||
continue
|
||||
|
@ -1320,6 +1324,10 @@ class LocalClient(object):
|
|||
for minion in list((minions - found)):
|
||||
yield {minion: {u'failed': True}}
|
||||
|
||||
if missing:
|
||||
for minion in missing:
|
||||
yield {minion: {'failed': True}}
|
||||
|
||||
def get_returns(
|
||||
self,
|
||||
jid,
|
||||
|
|
|
@ -14,8 +14,9 @@ client applications.
|
|||
http://docs.saltstack.com/ref/clients/index.html
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -24,9 +25,9 @@ import salt.auth
|
|||
import salt.client
|
||||
import salt.runner
|
||||
import salt.wheel
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.syspaths as syspaths
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import EauthAuthenticationError
|
||||
|
||||
|
||||
|
@ -229,7 +230,7 @@ class APIClient(object):
|
|||
functions = self.wheelClient.functions
|
||||
elif client == u'runner':
|
||||
functions = self.runnerClient.functions
|
||||
result = {u'master': salt.utils.argspec_report(functions, module)}
|
||||
result = {u'master': salt.utils.args.argspec_report(functions, module)}
|
||||
return result
|
||||
|
||||
def create_token(self, creds):
|
||||
|
@ -322,4 +323,4 @@ class APIClient(object):
|
|||
Need to convert this to a master call with appropriate authentication
|
||||
|
||||
'''
|
||||
return self.event.fire_event(data, tagify(tag, u'wui'))
|
||||
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
|
||||
|
|
|
@ -16,7 +16,8 @@ import copy as pycopy
|
|||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
import salt.utils.event
|
||||
|
@ -25,6 +26,7 @@ import salt.utils.job
|
|||
import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
|
@ -297,7 +299,7 @@ class SyncClientMixin(object):
|
|||
# this is not to clutter the output with the module loading
|
||||
# if we have a high debug level.
|
||||
self.mminion # pylint: disable=W0104
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid())
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
|
||||
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
|
||||
|
||||
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
|
||||
|
@ -362,29 +364,19 @@ class SyncClientMixin(object):
|
|||
# packed into the top level object. The plan is to move away from
|
||||
# that since the caller knows what is an arg vs a kwarg, but while
|
||||
# we make the transition we will load "kwargs" using format_call if
|
||||
# there are no kwargs in the low object passed in
|
||||
f_call = None
|
||||
if u'arg' not in low:
|
||||
# there are no kwargs in the low object passed in.
|
||||
|
||||
if u'arg' in low and u'kwarg' in low:
|
||||
args = low[u'arg']
|
||||
kwargs = low[u'kwarg']
|
||||
else:
|
||||
f_call = salt.utils.format_call(
|
||||
self.functions[fun],
|
||||
low,
|
||||
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
|
||||
)
|
||||
args = f_call.get(u'args', ())
|
||||
else:
|
||||
args = low[u'arg']
|
||||
|
||||
if u'kwarg' not in low:
|
||||
log.critical(
|
||||
u'kwargs must be passed inside the low data within the '
|
||||
u'\'kwarg\' key. See usage of '
|
||||
u'salt.utils.args.parse_input() and '
|
||||
u'salt.minion.load_args_and_kwargs() elsewhere in the '
|
||||
u'codebase.'
|
||||
)
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = low[u'kwarg']
|
||||
kwargs = f_call.get(u'kwargs', {})
|
||||
|
||||
# Update the event data with loaded args and kwargs
|
||||
data[u'fun_args'] = list(args) + ([kwargs] if kwargs else [])
|
||||
|
@ -396,7 +388,7 @@ class SyncClientMixin(object):
|
|||
data[u'success'] = True
|
||||
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
|
||||
# some functions can return boolean values
|
||||
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
|
||||
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data[u'return'] = str(ex)
|
||||
|
@ -510,7 +502,7 @@ class AsyncClientMixin(object):
|
|||
|
||||
def _gen_async_pub(self, jid=None):
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
|
||||
return {u'tag': tag, u'jid': jid}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ under the "SSH Keys" section.
|
|||
personal_access_token: xxx
|
||||
ssh_key_file: /path/to/ssh/key/file
|
||||
ssh_key_names: my-key-name,my-key-name-2
|
||||
driver: digital_ocean
|
||||
driver: digitalocean
|
||||
|
||||
:depends: requests
|
||||
'''
|
||||
|
@ -59,10 +59,11 @@ except ImportError:
|
|||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'digital_ocean'
|
||||
__virtualname__ = 'digitalocean'
|
||||
__virtual_aliases__ = ('digital_ocean', 'do')
|
||||
|
||||
|
||||
# Only load in this module if the DIGITAL_OCEAN configurations are in place
|
||||
# Only load in this module if the DIGITALOCEAN configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for DigitalOcean configurations
|
||||
|
@ -274,7 +275,7 @@ def create(vm_):
|
|||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if vm_['profile'] and config.is_profile_configured(__opts__,
|
||||
__active_provider_name__ or 'digital_ocean',
|
||||
__active_provider_name__ or 'digitalocean',
|
||||
vm_['profile'],
|
||||
vm_=vm_) is False:
|
||||
return False
|
||||
|
@ -441,7 +442,7 @@ def create(vm_):
|
|||
ret = create_node(kwargs)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Error creating {0} on DIGITAL_OCEAN\n\n'
|
||||
'Error creating {0} on DIGITALOCEAN\n\n'
|
||||
'The following exception was thrown when trying to '
|
||||
'run the initial deployment: {1}'.format(
|
||||
vm_['name'],
|
||||
|
@ -716,12 +717,12 @@ def import_keypair(kwargs=None, call=None):
|
|||
with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename:
|
||||
public_key_content = public_key_filename.read()
|
||||
|
||||
digital_ocean_kwargs = {
|
||||
digitalocean_kwargs = {
|
||||
'name': kwargs['keyname'],
|
||||
'public_key': public_key_content
|
||||
}
|
||||
|
||||
created_result = create_key(digital_ocean_kwargs, call=call)
|
||||
created_result = create_key(digitalocean_kwargs, call=call)
|
||||
return created_result
|
||||
|
||||
|
||||
|
@ -938,11 +939,11 @@ def show_pricing(kwargs=None, call=None):
|
|||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'digital_ocean':
|
||||
return {'Error': 'The requested profile does not belong to Digital Ocean'}
|
||||
if len(comps) < 2 or comps[1] != 'digitalocean':
|
||||
return {'Error': 'The requested profile does not belong to DigitalOcean'}
|
||||
|
||||
raw = {}
|
||||
ret = {}
|
||||
|
@ -968,7 +969,7 @@ def list_floating_ips(call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f list_floating_ips my-digitalocean-config
|
||||
'''
|
||||
|
@ -1008,7 +1009,7 @@ def show_floating_ip(kwargs=None, call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
|
@ -1041,7 +1042,7 @@ def create_floating_ip(kwargs=None, call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2'
|
||||
|
||||
|
@ -1083,7 +1084,7 @@ def delete_floating_ip(kwargs=None, call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f delete_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
||||
|
@ -1118,7 +1119,7 @@ def assign_floating_ip(kwargs=None, call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f assign_floating_ip my-digitalocean-config droplet_id=1234567 floating_ip='45.55.96.47'
|
||||
'''
|
||||
|
@ -1151,7 +1152,7 @@ def unassign_floating_ip(kwargs=None, call=None):
|
|||
|
||||
CLI Examples:
|
||||
|
||||
... code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f unassign_floating_ip my-digitalocean-config floating_ip='45.55.96.47'
|
||||
'''
|
|
@ -2643,7 +2643,7 @@ def show_pricing(kwargs=None, call=None):
|
|||
if not profile:
|
||||
return {'Error': 'The requested profile was not found'}
|
||||
|
||||
# Make sure the profile belongs to Digital Ocean
|
||||
# Make sure the profile belongs to DigitalOcean
|
||||
provider = profile.get('provider', '0:0')
|
||||
comps = provider.split(':')
|
||||
if len(comps) < 2 or comps[1] != 'gce':
|
||||
|
|
|
@ -41,6 +41,7 @@ Example profile:
|
|||
master_port: 5506
|
||||
|
||||
Tested on:
|
||||
- Fedora 26 (libvirt 3.2.1, qemu 2.9.1)
|
||||
- Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1)
|
||||
- Fedora 23 (libvirt 1.2.18, qemu 2.4.1)
|
||||
- Centos 7 (libvirt 1.2.17, qemu 1.5.3)
|
||||
|
@ -82,9 +83,6 @@ from salt.exceptions import (
|
|||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
|
@ -99,6 +97,20 @@ IP_LEARNING_XML = """<filterref filter='clean-traffic'>
|
|||
|
||||
__virtualname__ = 'libvirt'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error):
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
log.debug("libvirt error {0}".format(error))
|
||||
|
||||
|
||||
if HAS_LIBVIRT:
|
||||
libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -280,7 +292,7 @@ def create(vm_):
|
|||
|
||||
validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True
|
||||
|
||||
log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
|
||||
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
|
@ -516,7 +528,7 @@ def destroy(name, call=None):
|
|||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
@ -527,7 +539,7 @@ def destroy(name, call=None):
|
|||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
{'name': name},
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
|
|
@ -44,9 +44,6 @@ from salt.exceptions import (
|
|||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Import Salt-Cloud Libs
|
||||
import salt.utils.cloud
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -1193,7 +1190,7 @@ def list_nodes_select(call=None):
|
|||
'''
|
||||
Return a list of the VMs that are on the provider, with select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full(), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
@ -1503,7 +1500,7 @@ def _query(action=None,
|
|||
if LASTCALL >= now:
|
||||
time.sleep(ratelimit_sleep)
|
||||
|
||||
result = salt.utils.http.query(
|
||||
result = __utils__['http.query'](
|
||||
url,
|
||||
method,
|
||||
params=args,
|
||||
|
|
|
@ -24,7 +24,6 @@ import logging
|
|||
# Import salt libs
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
import salt.config as config
|
||||
import salt.utils.cloud as cloud
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
|
@ -136,7 +135,7 @@ def create(vm_info):
|
|||
)
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'starting create',
|
||||
'salt/cloud/{0}/creating'.format(vm_info['name']),
|
||||
|
@ -151,7 +150,7 @@ def create(vm_info):
|
|||
'clone_from': vm_info['clonefrom']
|
||||
}
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_info['name']),
|
||||
|
@ -174,10 +173,10 @@ def create(vm_info):
|
|||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = cloud.bootstrap(vm_info, __opts__)
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
vm_result.update(res)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created machine',
|
||||
'salt/cloud/{0}/created'.format(vm_info['name']),
|
||||
|
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
|
|||
"private_ips",
|
||||
"public_ips",
|
||||
]
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), attributes, call,
|
||||
)
|
||||
|
||||
|
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
|
|||
"""
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
"""
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
@ -306,7 +305,7 @@ def destroy(name, call=None):
|
|||
if not vb_machine_exists(name):
|
||||
return "{0} doesn't exist and can't be deleted".format(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
|
@ -317,7 +316,7 @@ def destroy(name, call=None):
|
|||
|
||||
vb_destroy_machine(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
|
|
|
@ -53,7 +53,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S'
|
|||
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
|
||||
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
|
||||
_DFLT_LOG_FMT_LOGFILE = (
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
|
||||
)
|
||||
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']
|
||||
|
||||
|
@ -111,9 +111,10 @@ VALID_OPTS = {
|
|||
'master_port': (six.string_types, int),
|
||||
|
||||
# The behaviour of the minion when connecting to a master. Can specify 'failover',
|
||||
# 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an
|
||||
# exec module function to run to determine the master hostname. If 'disable' is specified
|
||||
# the minion will run, but will not try to connect to a master.
|
||||
# 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be
|
||||
# set to an exec module function to run to determine the master hostname. If 'disable' is
|
||||
# specified the minion will run, but will not try to connect to a master. If 'distributed'
|
||||
# is specified the minion will try to deterministically pick a master based on its' id.
|
||||
'master_type': str,
|
||||
|
||||
# Specify the format in which the master address will be specified. Can
|
||||
|
@ -186,6 +187,16 @@ VALID_OPTS = {
|
|||
# A unique identifier for this daemon
|
||||
'id': str,
|
||||
|
||||
# Use a module function to determine the unique identifier. If this is
|
||||
# set and 'id' is not set, it will allow invocation of a module function
|
||||
# to determine the value of 'id'. For simple invocations without function
|
||||
# arguments, this may be a string that is the function name. For
|
||||
# invocations with function arguments, this may be a dictionary with the
|
||||
# key being the function name, and the value being an embedded dictionary
|
||||
# where each key is a function argument name and each value is the
|
||||
# corresponding argument value.
|
||||
'id_function': (dict, str),
|
||||
|
||||
# The directory to store all cache files.
|
||||
'cachedir': str,
|
||||
|
||||
|
@ -332,7 +343,7 @@ VALID_OPTS = {
|
|||
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
|
||||
'mine_return_job': bool,
|
||||
|
||||
# Schedule a mine update every n number of seconds
|
||||
# The number of minutes between mine updates.
|
||||
'mine_interval': int,
|
||||
|
||||
# The ipc strategy. (i.e., sockets versus tcp, etc)
|
||||
|
@ -417,6 +428,12 @@ VALID_OPTS = {
|
|||
# Tell the client to display the jid when a job is published
|
||||
'show_jid': bool,
|
||||
|
||||
# Ensure that a generated jid is always unique. If this is set, the jid
|
||||
# format is different due to an underscore and process id being appended
|
||||
# to the jid. WARNING: A change to the jid format may break external
|
||||
# applications that depend on the original format.
|
||||
'unique_jid': bool,
|
||||
|
||||
# Tells the highstate outputter to show successful states. False will omit successes.
|
||||
'state_verbose': bool,
|
||||
|
||||
|
@ -573,6 +590,23 @@ VALID_OPTS = {
|
|||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# Merge pillar data into configuration opts.
|
||||
# As multiple proxies can run on the same server, we may need different
|
||||
# configuration options for each, while there's one single configuration file.
|
||||
# The solution is merging the pillar data of each proxy minion into the opts.
|
||||
'proxy_merge_pillar_in_opts': bool,
|
||||
|
||||
# Deep merge of pillar data into configuration opts.
|
||||
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_deep_merge_pillar_in_opts': bool,
|
||||
|
||||
# The strategy used when merging pillar into opts.
|
||||
# Considered only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_merge_pillar_in_opts_strategy': str,
|
||||
|
||||
# Allow enabling mine details using pillar data.
|
||||
'proxy_mines_pillar': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
|
@ -908,6 +942,7 @@ VALID_OPTS = {
|
|||
'ssh_scan_timeout': float,
|
||||
'ssh_identities_only': bool,
|
||||
'ssh_log_file': str,
|
||||
'ssh_config_file': str,
|
||||
|
||||
# Enable ioflo verbose logging. Warning! Very verbose!
|
||||
'ioflo_verbose': int,
|
||||
|
@ -1079,6 +1114,11 @@ VALID_OPTS = {
|
|||
# (in other words, require that minions have 'minion_sign_messages'
|
||||
# turned on)
|
||||
'require_minion_sign_messages': bool,
|
||||
|
||||
# The list of config entries to be passed to external pillar function as
|
||||
# part of the extra_minion_data param
|
||||
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
|
||||
'pass_to_ext_pillars': (six.string_types, list),
|
||||
}
|
||||
|
||||
# default configurations
|
||||
|
@ -1102,6 +1142,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'root_dir': salt.syspaths.ROOT_DIR,
|
||||
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
|
||||
'id': '',
|
||||
'id_function': {},
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
|
||||
'append_minionid_config_dirs': [],
|
||||
'cache_jobs': False,
|
||||
|
@ -1197,6 +1238,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'gitfs_ref_types': ['branch', 'tag', 'sha'],
|
||||
'gitfs_refspecs': _DFLT_REFSPECS,
|
||||
'gitfs_disable_saltenv_mapping': False,
|
||||
'unique_jid': False,
|
||||
'hash_type': 'sha256',
|
||||
'disable_modules': [],
|
||||
'disable_returners': [],
|
||||
|
@ -1441,6 +1483,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'hgfs_saltenv_blacklist': [],
|
||||
'show_timeout': True,
|
||||
'show_jid': False,
|
||||
'unique_jid': False,
|
||||
'svnfs_remotes': [],
|
||||
'svnfs_mountpoint': '',
|
||||
'svnfs_root': '',
|
||||
|
@ -1607,6 +1650,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'ssh_scan_timeout': 0.01,
|
||||
'ssh_identities_only': False,
|
||||
'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'),
|
||||
'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'),
|
||||
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
|
||||
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
|
||||
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
|
||||
|
@ -1673,6 +1717,12 @@ DEFAULT_PROXY_MINION_OPTS = {
|
|||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
'proxy_merge_pillar_in_opts': False,
|
||||
'proxy_deep_merge_pillar_in_opts': False,
|
||||
'proxy_merge_pillar_in_opts_strategy': 'smart',
|
||||
|
||||
'proxy_mines_pillar': True,
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
|
@ -2671,7 +2721,7 @@ def old_to_new(opts):
|
|||
providers = (
|
||||
'AWS',
|
||||
'CLOUDSTACK',
|
||||
'DIGITAL_OCEAN',
|
||||
'DIGITALOCEAN',
|
||||
'EC2',
|
||||
'GOGRID',
|
||||
'IBMSCE',
|
||||
|
@ -3335,6 +3385,57 @@ def _cache_id(minion_id, cache_file):
|
|||
log.error('Could not cache minion ID: {0}'.format(exc))
|
||||
|
||||
|
||||
def call_id_function(opts):
|
||||
'''
|
||||
Evaluate the function that determines the ID if the 'id_function'
|
||||
option is set and return the result
|
||||
'''
|
||||
if opts.get('id'):
|
||||
return opts['id']
|
||||
|
||||
# Import 'salt.loader' here to avoid a circular dependency
|
||||
import salt.loader as loader
|
||||
|
||||
if isinstance(opts['id_function'], str):
|
||||
mod_fun = opts['id_function']
|
||||
fun_kwargs = {}
|
||||
elif isinstance(opts['id_function'], dict):
|
||||
mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function']))
|
||||
if fun_kwargs is None:
|
||||
fun_kwargs = {}
|
||||
else:
|
||||
log.error('\'id_function\' option is neither a string nor a dictionary')
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
# split module and function and try loading the module
|
||||
mod, fun = mod_fun.split('.')
|
||||
if not opts.get('grains'):
|
||||
# Get grains for use by the module
|
||||
opts['grains'] = loader.grains(opts)
|
||||
|
||||
try:
|
||||
id_mod = loader.raw_mod(opts, mod, fun)
|
||||
if not id_mod:
|
||||
raise KeyError
|
||||
# we take whatever the module returns as the minion ID
|
||||
newid = id_mod[mod_fun](**fun_kwargs)
|
||||
if not isinstance(newid, str) or not newid:
|
||||
log.error('Function {0} returned value "{1}" of type {2} instead of string'.format(
|
||||
mod_fun, newid, type(newid))
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
log.info('Evaluated minion ID from module: {0}'.format(mod_fun))
|
||||
return newid
|
||||
except TypeError:
|
||||
log.error('Function arguments {0} are incorrect for function {1}'.format(
|
||||
fun_kwargs, mod_fun)
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
except KeyError:
|
||||
log.error('Failed to load module {0}'.format(mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
|
||||
def get_id(opts, cache_minion_id=False):
|
||||
'''
|
||||
Guess the id of the minion.
|
||||
|
@ -3376,13 +3477,21 @@ def get_id(opts, cache_minion_id=False):
|
|||
log.debug('Guessing ID. The id can be explicitly set in {0}'
|
||||
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
|
||||
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
if opts.get('id_function'):
|
||||
newid = call_id_function(opts)
|
||||
else:
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
|
||||
if opts.get('minion_id_lowercase'):
|
||||
newid = newid.lower()
|
||||
log.debug('Changed minion id {0} to lowercase.'.format(newid))
|
||||
if '__role' in opts and opts.get('__role') == 'minion':
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(newid))
|
||||
if opts.get('id_function'):
|
||||
log.debug('Found minion id from external function {0}: {1}'.format(
|
||||
opts['id_function'], newid))
|
||||
else:
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(
|
||||
newid))
|
||||
if cache_minion_id and opts.get('minion_id_caching', True):
|
||||
_cache_id(newid, id_cache)
|
||||
is_ipv4 = salt.utils.network.is_ipv4(newid)
|
||||
|
|
215
salt/config/schemas/esxcluster.py
Normal file
215
salt/config/schemas/esxcluster.py
Normal file
|
@ -0,0 +1,215 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
|
||||
salt.config.schemas.esxcluster
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESX Cluster configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
DefinitionsSchema,
|
||||
ComplexSchemaItem,
|
||||
DictItem,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
BooleanItem,
|
||||
StringItem,
|
||||
AnyOfItem)
|
||||
|
||||
|
||||
class OptionValueItem(ComplexSchemaItem):
|
||||
'''Sechma item of the OptionValue'''
|
||||
|
||||
title = 'OptionValue'
|
||||
key = StringItem(title='Key', required=True)
|
||||
value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()])
|
||||
|
||||
|
||||
class AdmissionControlPolicyItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA admission control policy
|
||||
'''
|
||||
|
||||
title = 'Admission Control Policy'
|
||||
|
||||
cpu_failover_percent = IntegerItem(
|
||||
title='CPU Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
memory_failover_percent = IntegerItem(
|
||||
title='Memory Failover Percent',
|
||||
minimum=0, maximum=100)
|
||||
|
||||
|
||||
class DefaultVmSettingsItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the HA default vm settings
|
||||
'''
|
||||
|
||||
title = 'Default VM Settings'
|
||||
|
||||
isolation_response = StringItem(
|
||||
title='Isolation Response',
|
||||
enum=['clusterIsolationResponse', 'none', 'powerOff', 'shutdown'])
|
||||
restart_priority = StringItem(
|
||||
title='Restart Priority',
|
||||
enum=['clusterRestartPriority', 'disabled', 'high', 'low', 'medium'])
|
||||
|
||||
|
||||
class HAConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of ESX cluster high availability
|
||||
'''
|
||||
|
||||
title = 'HA Configuration'
|
||||
description = 'ESX cluster HA configuration json schema item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if HA should be enabled')
|
||||
admission_control_enabled = BooleanItem(
|
||||
title='Admission Control Enabled')
|
||||
admission_control_policy = AdmissionControlPolicyItem()
|
||||
default_vm_settings = DefaultVmSettingsItem()
|
||||
hb_ds_candidate_policy = StringItem(
|
||||
title='Heartbeat Datastore Candidate Policy',
|
||||
enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference',
|
||||
'userSelectedDs'])
|
||||
host_monitoring = StringItem(title='Host Monitoring',
|
||||
choices=['enabled', 'disabled'])
|
||||
options = ArrayItem(min_items=1, items=OptionValueItem())
|
||||
vm_monitoring = StringItem(
|
||||
title='Vm Monitoring',
|
||||
choices=['vmMonitoringDisabled', 'vmAndAppMonitoring',
|
||||
'vmMonitoringOnly'])
|
||||
|
||||
|
||||
class vSANClusterConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'vSAN Configuration'
|
||||
description = 'ESX cluster vSAN configurationi item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if vSAN should be enabled')
|
||||
auto_claim_storage = BooleanItem(
|
||||
title='Auto Claim Storage',
|
||||
description='Specifies whether the storage of member ESXi hosts should '
|
||||
'be automatically claimed for vSAN')
|
||||
dedup_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies dedup should be enabled')
|
||||
compression_enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if compression should be enabled')
|
||||
|
||||
|
||||
class DRSConfigItem(ComplexSchemaItem):
|
||||
'''
|
||||
Schema item of the ESX cluster DRS configuration
|
||||
'''
|
||||
|
||||
title = 'DRS Configuration'
|
||||
description = 'ESX cluster DRS configuration item'
|
||||
|
||||
enabled = BooleanItem(
|
||||
title='Enabled',
|
||||
description='Specifies if DRS should be enabled')
|
||||
vmotion_rate = IntegerItem(
|
||||
title='vMotion rate',
|
||||
description='Aggressiveness to do automatic vMotions: '
|
||||
'1 (least aggressive) - 5 (most aggressive)',
|
||||
minimum=1,
|
||||
maximum=5)
|
||||
default_vm_behavior = StringItem(
|
||||
title='Default VM DRS Behavior',
|
||||
description='Specifies the default VM DRS behavior',
|
||||
enum=['fullyAutomated', 'partiallyAutomated', 'manual'])
|
||||
|
||||
|
||||
class ESXClusterConfigSchema(DefinitionsSchema):
|
||||
'''
|
||||
Schema of the ESX cluster config
|
||||
'''
|
||||
|
||||
title = 'ESX Cluster Configuration Schema'
|
||||
description = 'ESX cluster configuration schema'
|
||||
|
||||
ha = HAConfigItem()
|
||||
vsan = vSANClusterConfigItem()
|
||||
drs = DRSConfigItem()
|
||||
vm_swap_placement = StringItem(title='VM Swap Placement')
|
||||
|
||||
|
||||
class ESXClusterEntitySchema(Schema):
|
||||
'''Schema of the ESX cluster entity'''
|
||||
|
||||
title = 'ESX Cluster Entity Schema'
|
||||
description = 'ESX cluster entity schema'
|
||||
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['cluster'])
|
||||
|
||||
datacenter = StringItem(title='Datacenter',
|
||||
description='Specifies the cluster datacenter',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
cluster = StringItem(title='Cluster',
|
||||
description='Specifies the cluster name',
|
||||
required=True,
|
||||
pattern=r'\w+')
|
||||
|
||||
|
||||
class LicenseSchema(Schema):
|
||||
'''
|
||||
Schema item of the ESX cluster vSAN configuration
|
||||
'''
|
||||
|
||||
title = 'Licenses schema'
|
||||
description = 'License configuration schema'
|
||||
|
||||
licenses = DictItem(
|
||||
title='Licenses',
|
||||
description='Dictionary containing the license name to key mapping',
|
||||
required=True,
|
||||
additional_properties=StringItem(
|
||||
title='License Key',
|
||||
description='Specifies the license key',
|
||||
pattern=r'^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$'))
|
||||
|
||||
|
||||
class EsxclusterProxySchema(Schema):
|
||||
'''
|
||||
Schema of the esxcluster proxy input
|
||||
'''
|
||||
|
||||
title = 'Esxcluster Proxy Schema'
|
||||
description = 'Esxcluster proxy schema'
|
||||
additional_properties = False
|
||||
proxytype = StringItem(required=True,
|
||||
enum=['esxcluster'])
|
||||
vcenter = StringItem(required=True, pattern=r'[^\s]+')
|
||||
datacenter = StringItem(required=True)
|
||||
cluster = StringItem(required=True)
|
||||
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
|
||||
username = StringItem()
|
||||
passwords = ArrayItem(min_items=1,
|
||||
items=StringItem(),
|
||||
unique_items=True)
|
||||
# TODO Should be changed when anyOf is supported for schemas
|
||||
domain = StringItem()
|
||||
principal = StringItem()
|
||||
protocol = StringItem()
|
||||
port = IntegerItem(minimum=1)
|
33
salt/config/schemas/vcenter.py
Normal file
33
salt/config/schemas/vcenter.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)`
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
salt.config.schemas.vcenter
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
VCenter configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
StringItem)
|
||||
|
||||
|
||||
class VCenterEntitySchema(Schema):
|
||||
'''
|
||||
Entity Schema for a VCenter.
|
||||
'''
|
||||
title = 'VCenter Entity Schema'
|
||||
description = 'VCenter entity schema'
|
||||
type = StringItem(title='Type',
|
||||
description='Specifies the entity type',
|
||||
required=True,
|
||||
enum=['vcenter'])
|
||||
|
||||
vcenter = StringItem(title='vCenter',
|
||||
description='Specifies the vcenter hostname',
|
||||
required=True)
|
|
@ -400,7 +400,7 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
|
|||
kind=kinds.applKinds.master))
|
||||
except gaierror as ex:
|
||||
log.warning("Unable to connect to master {0}: {1}".format(mha, ex))
|
||||
if self.opts.value.get('master_type') != 'failover':
|
||||
if self.opts.value.get(u'master_type') not in (u'failover', u'distributed'):
|
||||
raise ex
|
||||
if not stack.remotes:
|
||||
raise ex
|
||||
|
|
|
@ -550,11 +550,12 @@ class RemoteFuncs(object):
|
|||
if match_type.lower() == 'compound':
|
||||
match_type = 'compound_pillar_exact'
|
||||
checker = salt.utils.minions.CkMinions(self.opts)
|
||||
minions = checker.check_minions(
|
||||
_res = checker.check_minions(
|
||||
load['tgt'],
|
||||
match_type,
|
||||
greedy=False
|
||||
)
|
||||
minions = _res['minions']
|
||||
for minion in minions:
|
||||
fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine')
|
||||
if isinstance(fdata, dict):
|
||||
|
@ -718,7 +719,7 @@ class RemoteFuncs(object):
|
|||
Handle the return data sent from the minions
|
||||
'''
|
||||
# Generate EndTime
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts))
|
||||
# If the return data is invalid, just ignore it
|
||||
if any(key not in load for key in ('return', 'jid', 'id')):
|
||||
return False
|
||||
|
@ -872,9 +873,10 @@ class RemoteFuncs(object):
|
|||
pub_load['tgt_type'] = load['tgt_type']
|
||||
ret = {}
|
||||
ret['jid'] = self.local.cmd_async(**pub_load)
|
||||
ret['minions'] = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
pub_load['tgt_type'])
|
||||
ret['minions'] = _res['minions']
|
||||
auth_cache = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'publish_auth')
|
||||
|
@ -1011,35 +1013,33 @@ class LocalFuncs(object):
|
|||
'''
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred '
|
||||
'for user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
# All runner opts pass through eauth
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.').format(auth_type, username)))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(load, auth_type)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
username = auth_check.get('username')
|
||||
if not runner_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and 'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
|
@ -1048,56 +1048,49 @@ class LocalFuncs(object):
|
|||
username)
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return {'error': {'name': exc.__class__.__name__,
|
||||
'args': exc.args,
|
||||
'message': str(exc)}}
|
||||
|
||||
def wheel(self, load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_key(load, self.key):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "user" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(
|
||||
load,
|
||||
auth_type,
|
||||
key=key,
|
||||
show_username=True
|
||||
)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
username = auth_check.get('username')
|
||||
if auth_type != 'user':
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(auth_type, username)))
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
if not wheel_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# Authenticated. Do the job.
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = load.pop('fun')
|
||||
tag = salt.utils.event.tagify(jid, prefix='wheel')
|
||||
data = {'fun': "wheel.{0}".format(fun),
|
||||
|
@ -1114,7 +1107,7 @@ class LocalFuncs(object):
|
|||
'data': data}
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
|
||||
fun,
|
||||
exc.__class__.__name__,
|
||||
|
@ -1167,11 +1160,12 @@ class LocalFuncs(object):
|
|||
|
||||
# Retrieve the minions list
|
||||
delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res['minions']
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
|
@ -1181,12 +1175,7 @@ class LocalFuncs(object):
|
|||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -1383,3 +1372,18 @@ class LocalFuncs(object):
|
|||
},
|
||||
'pub': pub_load
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, load):
|
||||
key = None
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -266,6 +266,12 @@ class SaltCacheError(SaltException):
|
|||
'''
|
||||
|
||||
|
||||
class TimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when an opration cannot be completet within a given time limit.
|
||||
'''
|
||||
|
||||
|
||||
class SaltReqTimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when a salt master request call fails to return within the timeout
|
||||
|
@ -393,7 +399,19 @@ class TemplateError(SaltException):
|
|||
# Validation related exceptions
|
||||
class InvalidConfigError(CommandExecutionError):
|
||||
'''
|
||||
Used when the input is invalid
|
||||
Used when the config is invalid
|
||||
'''
|
||||
|
||||
|
||||
class ArgumentValueError(CommandExecutionError):
|
||||
'''
|
||||
Used when an invalid argument was passed to a command execution
|
||||
'''
|
||||
|
||||
|
||||
class InvalidEntityError(CommandExecutionError):
|
||||
'''
|
||||
Used when an entity fails validation
|
||||
'''
|
||||
|
||||
|
||||
|
|
7
salt/ext/vsan/__init__.py
Normal file
7
salt/ext/vsan/__init__.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
# coding: utf-8 -*-
|
||||
'''
|
||||
This directory contains the object model and utils for the vsan VMware SDK
|
||||
extension.
|
||||
|
||||
They are governed under their respective licenses.
|
||||
'''
|
165
salt/ext/vsan/vsanapiutils.py
Normal file
165
salt/ext/vsan/vsanapiutils.py
Normal file
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Copyright 2016 VMware, Inc. All rights reserved.
|
||||
|
||||
This module defines basic helper functions used in the sampe codes
|
||||
"""
|
||||
|
||||
# pylint: skip-file
|
||||
__author__ = 'VMware, Inc'
|
||||
|
||||
from pyVmomi import vim, vmodl, SoapStubAdapter
|
||||
#import the VSAN API python bindings
|
||||
import vsanmgmtObjects
|
||||
|
||||
VSAN_API_VC_SERVICE_ENDPOINT = '/vsanHealth'
|
||||
VSAN_API_ESXI_SERVICE_ENDPOINT = '/vsan'
|
||||
|
||||
#Constuct a stub for VSAN API access using VC or ESXi sessions from existing
|
||||
#stubs. Correspoding VC or ESXi service endpoint is required. VC service
|
||||
#endpoint is used as default
|
||||
def _GetVsanStub(
|
||||
stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
|
||||
context=None, version='vim.version.version10'
|
||||
):
|
||||
|
||||
hostname = stub.host.split(':')[0]
|
||||
vsanStub = SoapStubAdapter(
|
||||
host=hostname,
|
||||
path=endpoint,
|
||||
version=version,
|
||||
sslContext=context
|
||||
)
|
||||
vsanStub.cookie = stub.cookie
|
||||
return vsanStub
|
||||
|
||||
#Construct a stub for access VC side VSAN APIs
|
||||
def GetVsanVcStub(stub, context=None):
|
||||
return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT,
|
||||
context=context)
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanEsxStub(stub, context=None):
|
||||
return _GetVsanStub(stub, endpoint=VSAN_API_ESXI_SERVICE_ENDPOINT,
|
||||
context=context)
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanVcMos(vcStub, context=None):
|
||||
vsanStub = GetVsanVcStub(vcStub, context)
|
||||
vcMos = {
|
||||
'vsan-disk-management-system' : vim.cluster.VsanVcDiskManagementSystem(
|
||||
'vsan-disk-management-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-stretched-cluster-system' : vim.cluster.VsanVcStretchedClusterSystem(
|
||||
'vsan-stretched-cluster-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-config-system' : vim.cluster.VsanVcClusterConfigSystem(
|
||||
'vsan-cluster-config-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-performance-manager' : vim.cluster.VsanPerformanceManager(
|
||||
'vsan-performance-manager',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-health-system' : vim.cluster.VsanVcClusterHealthSystem(
|
||||
'vsan-cluster-health-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-upgrade-systemex' : vim.VsanUpgradeSystemEx(
|
||||
'vsan-upgrade-systemex',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-cluster-space-report-system' : vim.cluster.VsanSpaceReportSystem(
|
||||
'vsan-cluster-space-report-system',
|
||||
vsanStub
|
||||
),
|
||||
|
||||
'vsan-cluster-object-system' : vim.cluster.VsanObjectSystem(
|
||||
'vsan-cluster-object-system',
|
||||
vsanStub
|
||||
),
|
||||
}
|
||||
|
||||
return vcMos
|
||||
|
||||
#Construct a stub for access ESXi side VSAN APIs
|
||||
def GetVsanEsxMos(esxStub, context=None):
|
||||
vsanStub = GetVsanEsxStub(esxStub, context)
|
||||
esxMos = {
|
||||
'vsan-performance-manager' : vim.cluster.VsanPerformanceManager(
|
||||
'vsan-performance-manager',
|
||||
vsanStub
|
||||
),
|
||||
'ha-vsan-health-system' : vim.host.VsanHealthSystem(
|
||||
'ha-vsan-health-system',
|
||||
vsanStub
|
||||
),
|
||||
'vsan-object-system' : vim.cluster.VsanObjectSystem(
|
||||
'vsan-object-system',
|
||||
vsanStub
|
||||
),
|
||||
}
|
||||
|
||||
return esxMos
|
||||
|
||||
#Convert a VSAN Task to a Task MO binding to VC service
|
||||
#@param vsanTask the VSAN Task MO
|
||||
#@param stub the stub for the VC API
|
||||
def ConvertVsanTaskToVcTask(vsanTask, vcStub):
|
||||
vcTask = vim.Task(vsanTask._moId, vcStub)
|
||||
return vcTask
|
||||
|
||||
def WaitForTasks(tasks, si):
|
||||
"""
|
||||
Given the service instance si and tasks, it returns after all the
|
||||
tasks are complete
|
||||
"""
|
||||
|
||||
pc = si.content.propertyCollector
|
||||
|
||||
taskList = [str(task) for task in tasks]
|
||||
|
||||
# Create filter
|
||||
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
|
||||
for task in tasks]
|
||||
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
|
||||
pathSet=[], all=True)
|
||||
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
|
||||
filterSpec.objectSet = objSpecs
|
||||
filterSpec.propSet = [propSpec]
|
||||
filter = pc.CreateFilter(filterSpec, True)
|
||||
|
||||
try:
|
||||
version, state = None, None
|
||||
|
||||
# Loop looking for updates till the state moves to a completed state.
|
||||
while len(taskList):
|
||||
update = pc.WaitForUpdates(version)
|
||||
for filterSet in update.filterSet:
|
||||
for objSet in filterSet.objectSet:
|
||||
task = objSet.obj
|
||||
for change in objSet.changeSet:
|
||||
if change.name == 'info':
|
||||
state = change.val.state
|
||||
elif change.name == 'info.state':
|
||||
state = change.val
|
||||
else:
|
||||
continue
|
||||
|
||||
if not str(task) in taskList:
|
||||
continue
|
||||
|
||||
if state == vim.TaskInfo.State.success:
|
||||
# Remove task from taskList
|
||||
taskList.remove(str(task))
|
||||
elif state == vim.TaskInfo.State.error:
|
||||
raise task.info.error
|
||||
# Move to next version
|
||||
version = update.version
|
||||
finally:
|
||||
if filter:
|
||||
filter.Destroy()
|
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
143
salt/ext/vsan/vsanmgmtObjects.py
Normal file
File diff suppressed because one or more lines are too long
|
@ -185,12 +185,13 @@ class Client(object):
|
|||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None):
|
||||
def cache_file(self, path, saltenv=u'base', cachedir=None, source_hash=None):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion
|
||||
file cache
|
||||
'''
|
||||
return self.get_url(path, u'', True, saltenv, cachedir=cachedir)
|
||||
return self.get_url(
|
||||
path, u'', True, saltenv, cachedir=cachedir, source_hash=source_hash)
|
||||
|
||||
def cache_files(self, paths, saltenv=u'base', cachedir=None):
|
||||
'''
|
||||
|
@ -470,7 +471,7 @@ class Client(object):
|
|||
return ret
|
||||
|
||||
def get_url(self, url, dest, makedirs=False, saltenv=u'base',
|
||||
no_cache=False, cachedir=None):
|
||||
no_cache=False, cachedir=None, source_hash=None):
|
||||
'''
|
||||
Get a single file from a URL.
|
||||
'''
|
||||
|
@ -525,6 +526,18 @@ class Client(object):
|
|||
return u''
|
||||
elif not no_cache:
|
||||
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
|
||||
if source_hash is not None:
|
||||
try:
|
||||
source_hash = source_hash.split('=')[-1]
|
||||
form = salt.utils.files.HASHES_REVMAP[len(source_hash)]
|
||||
if salt.utils.get_hash(dest, form) == source_hash:
|
||||
log.debug(
|
||||
'Cached copy of %s (%s) matches source_hash %s, '
|
||||
'skipping download', url, dest, source_hash
|
||||
)
|
||||
return dest
|
||||
except (AttributeError, KeyError, IOError, OSError):
|
||||
pass
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
|
@ -532,7 +545,9 @@ class Client(object):
|
|||
if url_data.scheme == u's3':
|
||||
try:
|
||||
def s3_opt(key, default=None):
|
||||
u'''Get value of s3.<key> from Minion config or from Pillar'''
|
||||
'''
|
||||
Get value of s3.<key> from Minion config or from Pillar
|
||||
'''
|
||||
if u's3.' + key in self.opts:
|
||||
return self.opts[u's3.' + key]
|
||||
try:
|
||||
|
@ -744,12 +759,7 @@ class Client(object):
|
|||
Cache a file then process it as a template
|
||||
'''
|
||||
if u'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
u'Oxygen',
|
||||
u'Parameter \'env\' has been detected in the argument list. This '
|
||||
u'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop(u'env')
|
||||
|
||||
kwargs[u'saltenv'] = saltenv
|
||||
|
@ -790,7 +800,7 @@ class Client(object):
|
|||
|
||||
def _extrn_path(self, url, saltenv, cachedir=None):
|
||||
'''
|
||||
Return the extn_filepath for a given url
|
||||
Return the extrn_filepath for a given url
|
||||
'''
|
||||
url_data = urlparse(url)
|
||||
if salt.utils.platform.is_windows():
|
||||
|
@ -1300,10 +1310,10 @@ class RemoteClient(Client):
|
|||
hash_type = self.opts.get(u'hash_type', u'md5')
|
||||
ret[u'hsum'] = salt.utils.get_hash(path, form=hash_type)
|
||||
ret[u'hash_type'] = hash_type
|
||||
return ret, list(os.stat(path))
|
||||
return ret
|
||||
load = {u'path': path,
|
||||
u'saltenv': saltenv,
|
||||
u'cmd': u'_file_hash_and_stat'}
|
||||
u'cmd': u'_file_hash'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv=u'base'):
|
||||
|
@ -1312,14 +1322,33 @@ class RemoteClient(Client):
|
|||
master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)[0]
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv=u'base'):
|
||||
'''
|
||||
The same as hash_file, but also return the file's mode, or None if no
|
||||
mode data is present.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
hash_result = self.hash_file(path, saltenv)
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
if not os.path.isfile(path):
|
||||
return hash_result, None
|
||||
else:
|
||||
try:
|
||||
return hash_result, list(os.stat(path))
|
||||
except Exception:
|
||||
return hash_result, None
|
||||
load = {'path': path,
|
||||
'saltenv': saltenv,
|
||||
'cmd': '_file_find'}
|
||||
fnd = self.channel.send(load)
|
||||
try:
|
||||
stat_result = fnd.get('stat')
|
||||
except AttributeError:
|
||||
stat_result = None
|
||||
return hash_result, stat_result
|
||||
|
||||
def list_env(self, saltenv=u'base'):
|
||||
'''
|
||||
|
|
|
@ -553,12 +553,7 @@ class Fileserver(object):
|
|||
kwargs[args[0]] = args[1]
|
||||
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs.pop('saltenv')
|
||||
|
@ -583,12 +578,7 @@ class Fileserver(object):
|
|||
'dest': ''}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
|
||||
|
@ -609,13 +599,7 @@ class Fileserver(object):
|
|||
Common code for hashing and stating files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. '
|
||||
'This parameter is no longer used and has been replaced by '
|
||||
'\'saltenv\' as of Salt 2016.11.0. This warning will be removed '
|
||||
'in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
|
@ -656,12 +640,7 @@ class Fileserver(object):
|
|||
Deletes the file_lists cache files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
saltenv = load.get('saltenv', [])
|
||||
|
@ -738,12 +717,7 @@ class Fileserver(object):
|
|||
Return a list of files from the dominant environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -769,12 +743,7 @@ class Fileserver(object):
|
|||
List all emptydirs in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -800,12 +769,7 @@ class Fileserver(object):
|
|||
List all directories in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -831,12 +795,7 @@ class Fileserver(object):
|
|||
Return a list of symlinked files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
|
|
@ -736,12 +736,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -770,12 +765,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
|
@ -804,12 +794,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs')
|
||||
|
@ -852,12 +837,7 @@ def _get_file_list(load):
|
|||
Get a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
@ -897,12 +877,7 @@ def _get_dir_list(load):
|
|||
Get a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
|
|
@ -165,12 +165,7 @@ def file_hash(load, fnd):
|
|||
ret = {}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
@ -235,12 +230,7 @@ def file_list(load):
|
|||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
@ -319,12 +309,7 @@ def dir_list(load):
|
|||
- source-minion/absolute/path
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
|
|
@ -40,12 +40,7 @@ def find_file(path, saltenv='base', **kwargs):
|
|||
Search the environment for the relative path.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
path = os.path.normpath(path)
|
||||
|
@ -117,12 +112,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -218,12 +208,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
|
@ -298,12 +283,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in __opts__['file_roots']:
|
||||
|
@ -444,12 +424,7 @@ def symlink_list(load):
|
|||
Return a dict of all symlinks based on a given path on the Master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
|
|
@ -126,12 +126,7 @@ def find_file(path, saltenv='base', **kwargs):
|
|||
is missing, or if the MD5 does not match.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
fnd = {'bucket': None,
|
||||
|
@ -168,12 +163,7 @@ def file_hash(load, fnd):
|
|||
Return an MD5 file hash
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
@ -201,12 +191,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -245,12 +230,7 @@ def file_list(load):
|
|||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
|
@ -286,12 +266,7 @@ def dir_list(load):
|
|||
Return a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
|
|
|
@ -631,12 +631,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -665,12 +660,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
|
@ -723,12 +713,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
|
38
salt/grains/cimc.py
Normal file
38
salt/grains/cimc.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Generate baseline proxy minion grains for cimc hosts.
|
||||
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.platform
|
||||
import salt.proxy.cimc
|
||||
|
||||
__proxyenabled__ = ['cimc']
|
||||
__virtualname__ = 'cimc'
|
||||
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
GRAINS_CACHE = {'os_family': 'Cisco UCS'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
try:
|
||||
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'cimc':
|
||||
return __virtualname__
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def cimc(proxy=None):
|
||||
if not proxy:
|
||||
return {}
|
||||
if proxy['cimc.initialized']() is False:
|
||||
return {}
|
||||
return {'cimc': proxy['cimc.grains']()}
|
|
@ -1205,6 +1205,10 @@ _OS_FAMILY_MAP = {
|
|||
'Raspbian': 'Debian',
|
||||
'Devuan': 'Debian',
|
||||
'antiX': 'Debian',
|
||||
'Kali': 'Debian',
|
||||
'neon': 'Debian',
|
||||
'Cumulus': 'Debian',
|
||||
'Deepin': 'Debian',
|
||||
'NILinuxRT': 'NILinuxRT',
|
||||
'NILinuxRT-XFCE': 'NILinuxRT',
|
||||
'KDE neon': 'Debian',
|
||||
|
@ -2425,4 +2429,46 @@ def get_master():
|
|||
# master
|
||||
return {'master': __opts__.get('master', '')}
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
|
||||
def default_gateway():
|
||||
'''
|
||||
Populates grains which describe whether a server has a default gateway
|
||||
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
|
||||
for a `default` at the beginning of any line. Assuming the standard
|
||||
`default via <ip>` format for default gateways, it will also parse out the
|
||||
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
|
||||
|
||||
If the `ip` command is unavailable, no grains will be populated.
|
||||
|
||||
Currently does not support multiple default gateways. The grains will be
|
||||
set to the first default gateway found.
|
||||
|
||||
List of grains:
|
||||
|
||||
ip4_gw: True # ip/True/False if default ipv4 gateway
|
||||
ip6_gw: True # ip/True/False if default ipv6 gateway
|
||||
ip_gw: True # True if either of the above is True, False otherwise
|
||||
'''
|
||||
grains = {}
|
||||
if not salt.utils.path.which('ip'):
|
||||
return {}
|
||||
grains['ip_gw'] = False
|
||||
grains['ip4_gw'] = False
|
||||
grains['ip6_gw'] = False
|
||||
if __salt__['cmd.run']('ip -4 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip4_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -4 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip4_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
if __salt__['cmd.run']('ip -6 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip6_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -6 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip6_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
return grains
|
||||
|
|
|
@ -11,6 +11,7 @@ import logging
|
|||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -21,7 +22,14 @@ def shell():
|
|||
'''
|
||||
# Provides:
|
||||
# shell
|
||||
return {'shell': os.environ.get('SHELL', '/bin/sh')}
|
||||
if salt.utils.platform.is_windows():
|
||||
env_var = 'COMSPEC'
|
||||
default = r'C:\Windows\system32\cmd.exe'
|
||||
else:
|
||||
env_var = 'SHELL'
|
||||
default = '/bin/sh'
|
||||
|
||||
return {'shell': os.environ.get(env_var, default)}
|
||||
|
||||
|
||||
def config():
|
||||
|
|
|
@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`.
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
|
||||
|
@ -47,16 +48,30 @@ def _search(prefix="latest/"):
|
|||
Recursively look up all grains in the metadata server
|
||||
'''
|
||||
ret = {}
|
||||
for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'):
|
||||
linedata = http.query(os.path.join(HOST, prefix))
|
||||
if 'body' not in linedata:
|
||||
return ret
|
||||
for line in linedata['body'].split('\n'):
|
||||
if line.endswith('/'):
|
||||
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
|
||||
elif prefix == 'latest/':
|
||||
# (gtmanfred) The first level should have a forward slash since
|
||||
# they have stuff underneath. This will not be doubled up though,
|
||||
# because lines ending with a slash are checked first.
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
|
||||
elif line.endswith(('dynamic', 'meta-data')):
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line))
|
||||
elif '=' in line:
|
||||
key, value = line.split('=')
|
||||
ret[value] = _search(prefix=os.path.join(prefix, key))
|
||||
else:
|
||||
ret[line] = http.query(os.path.join(HOST, prefix, line))['body']
|
||||
retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
|
||||
# (gtmanfred) This try except block is slightly faster than
|
||||
# checking if the string starts with a curly brace
|
||||
try:
|
||||
ret[line] = json.loads(retdata)
|
||||
except ValueError:
|
||||
ret[line] = retdata
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -447,8 +447,8 @@ def optional_args(proxy=None):
|
|||
device2:
|
||||
True
|
||||
'''
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy)
|
||||
if _FORBIDDEN_OPT_ARGS:
|
||||
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
|
||||
if opt_args and _FORBIDDEN_OPT_ARGS:
|
||||
for arg in _FORBIDDEN_OPT_ARGS:
|
||||
opt_args.pop(arg, None)
|
||||
return {'optional_args': opt_args}
|
||||
|
|
37
salt/key.py
37
salt/key.py
|
@ -496,7 +496,7 @@ class Key(object):
|
|||
minions = []
|
||||
for key, val in six.iteritems(keys):
|
||||
minions.extend(val)
|
||||
if not self.opts.get(u'preserve_minion_cache', False) or not preserve_minions:
|
||||
if not self.opts.get(u'preserve_minion_cache', False):
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], self.ACC)
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
|
@ -743,7 +743,7 @@ class Key(object):
|
|||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
|
@ -781,11 +781,10 @@ class Key(object):
|
|||
salt.utils.event.tagify(prefix=u'key'))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if preserve_minions:
|
||||
preserve_minions_list = matches.get(u'minions', [])
|
||||
if self.opts.get(u'preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
preserve_minions_list = []
|
||||
self.check_minion_cache(preserve_minions=preserve_minions_list)
|
||||
self.check_minion_cache()
|
||||
if self.opts.get(u'rotate_aes_key'):
|
||||
salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user'])
|
||||
return (
|
||||
|
@ -976,16 +975,17 @@ class RaetKey(Key):
|
|||
minions.extend(val)
|
||||
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], u'minions')
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if not self.opts.get('preserve_minion_cache', False):
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
|
||||
kind = self.opts.get(u'__role', u'') # application kind
|
||||
if kind not in kinds.APPL_KINDS:
|
||||
|
@ -1227,7 +1227,7 @@ class RaetKey(Key):
|
|||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
|
@ -1258,7 +1258,10 @@ class RaetKey(Key):
|
|||
os.remove(os.path.join(self.opts[u'pki_dir'], status, key))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
if self.opts.get('preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
self.check_minion_cache()
|
||||
return (
|
||||
self.name_match(match) if match is not None
|
||||
else self.dict_match(matches)
|
||||
|
|
|
@ -270,7 +270,7 @@ def raw_mod(opts, name, functions, mod=u'modules'):
|
|||
testmod['test.ping']()
|
||||
'''
|
||||
loader = LazyLoader(
|
||||
_module_dirs(opts, mod, u'rawmodule'),
|
||||
_module_dirs(opts, mod, u'module'),
|
||||
opts,
|
||||
tag=u'rawmodule',
|
||||
virtual_enable=False,
|
||||
|
|
|
@ -11,7 +11,18 @@
|
|||
Fluent Logging Handler
|
||||
-------------------
|
||||
|
||||
In the salt configuration file:
|
||||
In the `fluent` configuration file:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
<source>
|
||||
type forward
|
||||
bind localhost
|
||||
port 24224
|
||||
</source>
|
||||
|
||||
Then, to send logs via fluent in Logstash format, add the
|
||||
following to the salt (master and/or minion) configuration file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -19,14 +30,32 @@
|
|||
host: localhost
|
||||
port: 24224
|
||||
|
||||
In the `fluent`_ configuration file:
|
||||
To send logs via fluent in the Graylog raw json format, add the
|
||||
following to the salt (master and/or minion) configuration file:
|
||||
|
||||
.. code-block:: text
|
||||
.. code-block:: yaml
|
||||
|
||||
<source>
|
||||
type forward
|
||||
port 24224
|
||||
</source>
|
||||
fluent_handler:
|
||||
host: localhost
|
||||
port: 24224
|
||||
payload_type: graylog
|
||||
tags:
|
||||
- salt_master.SALT
|
||||
|
||||
The above also illustrates the `tags` option, which allows
|
||||
one to set descriptive (or useful) tags on records being
|
||||
sent. If not provided, this defaults to the single tag:
|
||||
'salt'. Also note that, via Graylog "magic", the 'facility'
|
||||
of the logged message is set to 'SALT' (the portion of the
|
||||
tag after the first period), while the tag itself will be
|
||||
set to simply 'salt_master'. This is a feature, not a bug :)
|
||||
|
||||
Note:
|
||||
There is a third emitter, for the GELF format, but it is
|
||||
largely untested, and I don't currently have a setup supporting
|
||||
this config, so while it runs cleanly and outputs what LOOKS to
|
||||
be valid GELF, any real-world feedback on its usefulness, and
|
||||
correctness, will be appreciated.
|
||||
|
||||
Log Level
|
||||
.........
|
||||
|
@ -53,7 +82,7 @@ import time
|
|||
import datetime
|
||||
import socket
|
||||
import threading
|
||||
|
||||
import types
|
||||
|
||||
# Import salt libs
|
||||
from salt.log.setup import LOG_LEVELS
|
||||
|
@ -91,6 +120,19 @@ __virtualname__ = 'fluent'
|
|||
|
||||
_global_sender = None
|
||||
|
||||
# Python logger's idea of "level" is wildly at variance with
|
||||
# Graylog's (and, incidentally, the rest of the civilized world).
|
||||
syslog_levels = {
|
||||
'EMERG': 0,
|
||||
'ALERT': 2,
|
||||
'CRIT': 2,
|
||||
'ERR': 3,
|
||||
'WARNING': 4,
|
||||
'NOTICE': 5,
|
||||
'INFO': 6,
|
||||
'DEBUG': 7
|
||||
}
|
||||
|
||||
|
||||
def setup(tag, **kwargs):
|
||||
host = kwargs.get('host', 'localhost')
|
||||
|
@ -116,55 +158,133 @@ def __virtual__():
|
|||
|
||||
|
||||
def setup_handlers():
|
||||
host = port = address = None
|
||||
host = port = None
|
||||
|
||||
if 'fluent_handler' in __opts__:
|
||||
host = __opts__['fluent_handler'].get('host', None)
|
||||
port = __opts__['fluent_handler'].get('port', None)
|
||||
version = __opts__['fluent_handler'].get('version', 1)
|
||||
payload_type = __opts__['fluent_handler'].get('payload_type', None)
|
||||
# in general, you want the value of tag to ALSO be a member of tags
|
||||
tags = __opts__['fluent_handler'].get('tags', ['salt'])
|
||||
tag = tags[0] if len(tags) else 'salt'
|
||||
if payload_type == 'graylog':
|
||||
version = 0
|
||||
elif payload_type == 'gelf':
|
||||
# We only support version 1.1 (the latest) of GELF...
|
||||
version = 1.1
|
||||
else:
|
||||
# Default to logstash for backwards compat
|
||||
payload_type = 'logstash'
|
||||
version = __opts__['fluent_handler'].get('version', 1)
|
||||
|
||||
if host is None and port is None:
|
||||
log.debug(
|
||||
'The required \'fluent_handler\' configuration keys, '
|
||||
'\'host\' and/or \'port\', are not properly configured. Not '
|
||||
'configuring the fluent logging handler.'
|
||||
'enabling the fluent logging handler.'
|
||||
)
|
||||
else:
|
||||
logstash_formatter = LogstashFormatter(version=version)
|
||||
fluent_handler = FluentHandler('salt', host=host, port=port)
|
||||
fluent_handler.setFormatter(logstash_formatter)
|
||||
formatter = MessageFormatter(payload_type=payload_type, version=version, tags=tags)
|
||||
fluent_handler = FluentHandler(tag, host=host, port=port)
|
||||
fluent_handler.setFormatter(formatter)
|
||||
fluent_handler.setLevel(
|
||||
LOG_LEVELS[
|
||||
__opts__['fluent_handler'].get(
|
||||
'log_level',
|
||||
# Not set? Get the main salt log_level setting on the
|
||||
# configuration file
|
||||
__opts__.get(
|
||||
'log_level',
|
||||
# Also not set?! Default to 'error'
|
||||
'error'
|
||||
)
|
||||
)
|
||||
]
|
||||
LOG_LEVELS[__opts__['fluent_handler'].get('log_level', __opts__.get('log_level', 'error'))]
|
||||
)
|
||||
yield fluent_handler
|
||||
|
||||
if host is None and port is None and address is None:
|
||||
if host is None and port is None:
|
||||
yield False
|
||||
|
||||
|
||||
class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
def __init__(self, msg_type='logstash', msg_path='logstash', version=1):
|
||||
self.msg_path = msg_path
|
||||
self.msg_type = msg_type
|
||||
class MessageFormatter(logging.Formatter, NewStyleClassMixIn):
|
||||
def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None):
|
||||
self.payload_type = payload_type
|
||||
self.version = version
|
||||
self.format = getattr(self, 'format_v{0}'.format(version))
|
||||
super(LogstashFormatter, self).__init__(fmt=None, datefmt=None)
|
||||
self.tag = tags[0] if len(tags) else 'salt' # 'salt' for backwards compat
|
||||
self.tags = tags
|
||||
self.msg_path = msg_path if msg_path else payload_type
|
||||
self.msg_type = msg_type if msg_type else payload_type
|
||||
format_func = 'format_{0}_v{1}'.format(payload_type, version).replace('.', '_')
|
||||
self.format = getattr(self, format_func)
|
||||
super(MessageFormatter, self).__init__(fmt=None, datefmt=None)
|
||||
|
||||
def formatTime(self, record, datefmt=None):
|
||||
if self.payload_type == 'gelf': # GELF uses epoch times
|
||||
return record.created
|
||||
return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z'
|
||||
|
||||
def format_v0(self, record):
|
||||
def format_graylog_v0(self, record):
|
||||
'''
|
||||
Graylog 'raw' format is essentially the raw record, minimally munged to provide
|
||||
the bare minimum that td-agent requires to accept and route the event. This is
|
||||
well suited to a config where the client td-agents log directly to Graylog.
|
||||
'''
|
||||
message_dict = {
|
||||
'message': record.getMessage(),
|
||||
'timestamp': self.formatTime(record),
|
||||
# Graylog uses syslog levels, not whatever it is Python does...
|
||||
'level': syslog_levels.get(record.levelname, 'ALERT'),
|
||||
'tag': self.tag
|
||||
}
|
||||
|
||||
if record.exc_info:
|
||||
exc_info = self.formatException(record.exc_info)
|
||||
message_dict.update({'full_message': exc_info})
|
||||
|
||||
# Add any extra attributes to the message field
|
||||
for key, value in six.iteritems(record.__dict__):
|
||||
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
|
||||
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
|
||||
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
|
||||
# These are already handled above or explicitly pruned.
|
||||
continue
|
||||
|
||||
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
|
||||
val = value
|
||||
else:
|
||||
val = repr(value)
|
||||
message_dict.update({'{0}'.format(key): val})
|
||||
return message_dict
|
||||
|
||||
def format_gelf_v1_1(self, record):
|
||||
'''
|
||||
If your agent is (or can be) configured to forward pre-formed GELF to Graylog
|
||||
with ZERO fluent processing, this function is for YOU, pal...
|
||||
'''
|
||||
message_dict = {
|
||||
'version': self.version,
|
||||
'host': salt.utils.network.get_fqhostname(),
|
||||
'short_message': record.getMessage(),
|
||||
'timestamp': self.formatTime(record),
|
||||
'level': syslog_levels.get(record.levelname, 'ALERT'),
|
||||
"_tag": self.tag
|
||||
}
|
||||
|
||||
if record.exc_info:
|
||||
exc_info = self.formatException(record.exc_info)
|
||||
message_dict.update({'full_message': exc_info})
|
||||
|
||||
# Add any extra attributes to the message field
|
||||
for key, value in six.iteritems(record.__dict__):
|
||||
if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess',
|
||||
'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs',
|
||||
'msecs', 'message', 'msg', 'relativeCreated', 'version'):
|
||||
# These are already handled above or explicitly avoided.
|
||||
continue
|
||||
|
||||
if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699
|
||||
val = value
|
||||
else:
|
||||
val = repr(value)
|
||||
# GELF spec require "non-standard" fields to be prefixed with '_' (underscore).
|
||||
message_dict.update({'_{0}'.format(key): val})
|
||||
|
||||
return message_dict
|
||||
|
||||
def format_logstash_v0(self, record):
|
||||
'''
|
||||
Messages are formatted in logstash's expected format.
|
||||
'''
|
||||
host = salt.utils.network.get_fqhostname()
|
||||
message_dict = {
|
||||
'@timestamp': self.formatTime(record),
|
||||
|
@ -186,7 +306,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
|||
),
|
||||
'@source_host': host,
|
||||
'@source_path': self.msg_path,
|
||||
'@tags': ['salt'],
|
||||
'@tags': self.tags,
|
||||
'@type': self.msg_type,
|
||||
}
|
||||
|
||||
|
@ -216,7 +336,10 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
|||
message_dict['@fields'][key] = repr(value)
|
||||
return message_dict
|
||||
|
||||
def format_v1(self, record):
|
||||
def format_logstash_v1(self, record):
|
||||
'''
|
||||
Messages are formatted in logstash's expected format.
|
||||
'''
|
||||
message_dict = {
|
||||
'@version': 1,
|
||||
'@timestamp': self.formatTime(record),
|
||||
|
@ -230,7 +353,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn):
|
|||
'funcName': record.funcName,
|
||||
'processName': record.processName,
|
||||
'message': record.getMessage(),
|
||||
'tags': ['salt'],
|
||||
'tags': self.tags,
|
||||
'type': self.msg_type
|
||||
}
|
||||
|
||||
|
|
175
salt/master.py
175
salt/master.py
|
@ -1311,7 +1311,8 @@ class AESFuncs(object):
|
|||
load.get(u'saltenv', load.get(u'env')),
|
||||
ext=load.get(u'ext'),
|
||||
pillar_override=load.get(u'pillar_override', {}),
|
||||
pillarenv=load.get(u'pillarenv'))
|
||||
pillarenv=load.get(u'pillarenv'),
|
||||
extra_minion_data=load.get(u'extra_minion_data'))
|
||||
data = pillar.compile_pillar()
|
||||
self.fs_.update_opts()
|
||||
if self.opts.get(u'minion_data_cache', False):
|
||||
|
@ -1667,49 +1668,36 @@ class ClearFuncs(object):
|
|||
Send a master control function back to the runner system
|
||||
'''
|
||||
# All runner ops pass through eauth
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not runner_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and u'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
clear_load.pop(u'password', None)
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
|
@ -1726,57 +1714,45 @@ class ClearFuncs(object):
|
|||
username)
|
||||
except Exception as exc:
|
||||
log.error(u'Exception occurred while introspecting %s: %s', fun, exc)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
return {u'error': {u'name': exc.__class__.__name__,
|
||||
u'args': exc.args,
|
||||
u'message': str(exc)}}
|
||||
|
||||
def wheel(self, clear_load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
username = None
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
clear_load.pop(u'password', None)
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not wheel_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and u'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
|
@ -1786,7 +1762,7 @@ class ClearFuncs(object):
|
|||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = clear_load.pop(u'fun')
|
||||
tag = tagify(jid, prefix=u'wheel')
|
||||
data = {u'fun': u"wheel.{0}".format(fun),
|
||||
|
@ -1852,11 +1828,13 @@ class ClearFuncs(object):
|
|||
|
||||
# Retrieve the minions list
|
||||
delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res.get('minions', list())
|
||||
missing = _res.get('missing', list())
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get(u'token', False):
|
||||
|
@ -1866,12 +1844,7 @@ class ClearFuncs(object):
|
|||
return u''
|
||||
|
||||
# Get acl
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
extra[u'eauth'] = token[u'eauth']
|
||||
extra[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -1961,7 +1934,7 @@ class ClearFuncs(object):
|
|||
if jid is None:
|
||||
return {u'enc': u'clear',
|
||||
u'load': {u'error': u'Master failed to assign jid'}}
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra)
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
|
||||
|
||||
# Send it!
|
||||
self._send_pub(payload)
|
||||
|
@ -1970,10 +1943,29 @@ class ClearFuncs(object):
|
|||
u'enc': u'clear',
|
||||
u'load': {
|
||||
u'jid': clear_load[u'jid'],
|
||||
u'minions': minions
|
||||
u'minions': minions,
|
||||
u'missing': missing
|
||||
}
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, clear_load):
|
||||
sensitive_load_keys = []
|
||||
key = None
|
||||
if u'token' in clear_load:
|
||||
auth_type = u'token'
|
||||
err_name = u'TokenAuthenticationError'
|
||||
sensitive_load_keys = [u'token']
|
||||
elif u'eauth' in clear_load:
|
||||
auth_type = u'eauth'
|
||||
err_name = u'EauthAuthenticationError'
|
||||
sensitive_load_keys = [u'username', u'password']
|
||||
else:
|
||||
auth_type = u'user'
|
||||
err_name = u'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key, sensitive_load_keys
|
||||
|
||||
def _prep_jid(self, clear_load, extra):
|
||||
'''
|
||||
Return a jid for this publication
|
||||
|
@ -2007,7 +1999,7 @@ class ClearFuncs(object):
|
|||
chan = salt.transport.server.PubServerChannel.factory(opts)
|
||||
chan.publish(load)
|
||||
|
||||
def _prep_pub(self, minions, jid, clear_load, extra):
|
||||
def _prep_pub(self, minions, jid, clear_load, extra, missing):
|
||||
'''
|
||||
Take a given load and perform the necessary steps
|
||||
to prepare a publication.
|
||||
|
@ -2028,6 +2020,7 @@ class ClearFuncs(object):
|
|||
u'fun': clear_load[u'fun'],
|
||||
u'arg': clear_load[u'arg'],
|
||||
u'minions': minions,
|
||||
u'missing': missing,
|
||||
}
|
||||
|
||||
# Announce the job on the event bus
|
||||
|
|
109
salt/minion.py
109
salt/minion.py
|
@ -21,6 +21,7 @@ import multiprocessing
|
|||
from random import randint, shuffle
|
||||
from stat import S_IMODE
|
||||
import salt.serializers.msgpack
|
||||
from binascii import crc32
|
||||
|
||||
# Import Salt Libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
|
@ -102,6 +103,7 @@ import salt.defaults.exitcodes
|
|||
import salt.cli.daemons
|
||||
import salt.log.setup
|
||||
|
||||
import salt.utils.dictupdate
|
||||
from salt.config import DEFAULT_MINION_OPTS
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.utils.debug import enable_sigusr1_handler
|
||||
|
@ -443,13 +445,30 @@ class MinionBase(object):
|
|||
if opts[u'master_type'] == u'func':
|
||||
eval_master_func(opts)
|
||||
|
||||
# if failover is set, master has to be of type list
|
||||
elif opts[u'master_type'] == u'failover':
|
||||
# if failover or distributed is set, master has to be of type list
|
||||
elif opts[u'master_type'] in (u'failover', u'distributed'):
|
||||
if isinstance(opts[u'master'], list):
|
||||
log.info(
|
||||
u'Got list of available master addresses: %s',
|
||||
opts[u'master']
|
||||
)
|
||||
|
||||
if opts[u'master_type'] == u'distributed':
|
||||
master_len = len(opts[u'master'])
|
||||
if master_len > 1:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
master_idx = crc32(opts[u'id']) % master_len
|
||||
try:
|
||||
preferred_masters = opts[u'master']
|
||||
preferred_masters[0] = opts[u'master'][master_idx]
|
||||
preferred_masters[1:] = [m for m in opts[u'master'] if m != preferred_masters[0]]
|
||||
opts[u'master'] = preferred_masters
|
||||
log.info(u'Distributed to the master at \'{0}\'.'.format(opts[u'master'][0]))
|
||||
except (KeyError, AttributeError, TypeError):
|
||||
log.warning(u'Failed to distribute to a specific master.')
|
||||
else:
|
||||
log.warning(u'master_type = distributed needs more than 1 master.')
|
||||
|
||||
if opts[u'master_shuffle']:
|
||||
if opts[u'master_failback']:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
|
@ -497,7 +516,7 @@ class MinionBase(object):
|
|||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
|
||||
# See issue 21082 for details
|
||||
if opts[u'retry_dns']:
|
||||
if opts[u'retry_dns'] and opts[u'master_type'] == u'failover':
|
||||
msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
|
||||
u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
|
||||
log.critical(msg)
|
||||
|
@ -845,7 +864,7 @@ class MinionManager(MinionBase):
|
|||
Spawn all the coroutines which will sign in to masters
|
||||
'''
|
||||
masters = self.opts[u'master']
|
||||
if self.opts[u'master_type'] == u'failover' or not isinstance(self.opts[u'master'], list):
|
||||
if (self.opts[u'master_type'] in (u'failover', u'distributed')) or not isinstance(self.opts[u'master'], list):
|
||||
masters = [masters]
|
||||
|
||||
for master in masters:
|
||||
|
@ -1624,13 +1643,24 @@ class Minion(MinionBase):
|
|||
minion side execution.
|
||||
'''
|
||||
salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid']))
|
||||
ret = {
|
||||
u'return': {},
|
||||
u'retcode': {},
|
||||
u'success': {}
|
||||
}
|
||||
for ind in range(0, len(data[u'fun'])):
|
||||
ret[u'success'][data[u'fun'][ind]] = False
|
||||
multifunc_ordered = opts.get(u'multifunc_ordered', False)
|
||||
num_funcs = len(data[u'fun'])
|
||||
if multifunc_ordered:
|
||||
ret = {
|
||||
u'return': [None] * num_funcs,
|
||||
u'retcode': [None] * num_funcs,
|
||||
u'success': [False] * num_funcs
|
||||
}
|
||||
else:
|
||||
ret = {
|
||||
u'return': {},
|
||||
u'retcode': {},
|
||||
u'success': {}
|
||||
}
|
||||
|
||||
for ind in range(0, num_funcs):
|
||||
if not multifunc_ordered:
|
||||
ret[u'success'][data[u'fun'][ind]] = False
|
||||
try:
|
||||
minion_blackout_violation = False
|
||||
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
|
||||
|
@ -1654,16 +1684,27 @@ class Minion(MinionBase):
|
|||
data[u'arg'][ind],
|
||||
data)
|
||||
minion_instance.functions.pack[u'__context__'][u'retcode'] = 0
|
||||
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
|
||||
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][data[u'fun'][ind]] = True
|
||||
if multifunc_ordered:
|
||||
ret[u'return'][ind] = func(*args, **kwargs)
|
||||
ret[u'retcode'][ind] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][ind] = True
|
||||
else:
|
||||
ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs)
|
||||
ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get(
|
||||
u'retcode',
|
||||
0
|
||||
)
|
||||
ret[u'success'][data[u'fun'][ind]] = True
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning(u'The minion function caused an exception: %s', exc)
|
||||
ret[u'return'][data[u'fun'][ind]] = trb
|
||||
if multifunc_ordered:
|
||||
ret[u'return'][ind] = trb
|
||||
else:
|
||||
ret[u'return'][data[u'fun'][ind]] = trb
|
||||
ret[u'jid'] = data[u'jid']
|
||||
ret[u'fun'] = data[u'fun']
|
||||
ret[u'fun_args'] = data[u'arg']
|
||||
|
@ -1930,6 +1971,10 @@ class Minion(MinionBase):
|
|||
self.beacons.disable_beacon(name)
|
||||
elif func == u'list':
|
||||
self.beacons.list_beacons()
|
||||
elif func == u'list_available':
|
||||
self.beacons.list_available_beacons()
|
||||
elif func == u'validate_beacon':
|
||||
self.beacons.validate_beacon(name, beacon_data)
|
||||
|
||||
def environ_setenv(self, tag, data):
|
||||
'''
|
||||
|
@ -2651,6 +2696,8 @@ class SyndicManager(MinionBase):
|
|||
'''
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
successful = False
|
||||
# Call for each master
|
||||
for master, syndic_future in self.iter_master_options(master_id):
|
||||
if not syndic_future.done() or syndic_future.exception():
|
||||
log.error(
|
||||
|
@ -2661,15 +2708,15 @@ class SyndicManager(MinionBase):
|
|||
|
||||
try:
|
||||
getattr(syndic_future.result(), func)(*args, **kwargs)
|
||||
return
|
||||
successful = True
|
||||
except SaltClientError:
|
||||
log.error(
|
||||
u'Unable to call %s on %s, trying another...',
|
||||
func, master
|
||||
)
|
||||
self._mark_master_dead(master)
|
||||
continue
|
||||
log.critical(u'Unable to call %s on any masters!', func)
|
||||
if not successful:
|
||||
log.critical(u'Unable to call %s on any masters!', func)
|
||||
|
||||
def _return_pub_syndic(self, values, master_id=None):
|
||||
'''
|
||||
|
@ -3191,6 +3238,26 @@ class ProxyMinion(Minion):
|
|||
if u'proxy' not in self.opts:
|
||||
self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy']
|
||||
|
||||
if self.opts.get(u'proxy_merge_pillar_in_opts'):
|
||||
# Override proxy opts with pillar data when the user required.
|
||||
self.opts = salt.utils.dictupdate.merge(self.opts,
|
||||
self.opts[u'pillar'],
|
||||
strategy=self.opts.get(u'proxy_merge_pillar_in_opts_strategy'),
|
||||
merge_lists=self.opts.get(u'proxy_deep_merge_pillar_in_opts', False))
|
||||
elif self.opts.get(u'proxy_mines_pillar'):
|
||||
# Even when not required, some details such as mine configuration
|
||||
# should be merged anyway whenever possible.
|
||||
if u'mine_interval' in self.opts[u'pillar']:
|
||||
self.opts[u'mine_interval'] = self.opts[u'pillar'][u'mine_interval']
|
||||
if u'mine_functions' in self.opts[u'pillar']:
|
||||
general_proxy_mines = self.opts.get(u'mine_functions', [])
|
||||
specific_proxy_mines = self.opts[u'pillar'][u'mine_functions']
|
||||
try:
|
||||
self.opts[u'mine_functions'] = general_proxy_mines + specific_proxy_mines
|
||||
except TypeError as terr:
|
||||
log.error(u'Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
|
||||
self.opts[u'id']))
|
||||
|
||||
fq_proxyname = self.opts[u'proxy'][u'proxytype']
|
||||
|
||||
# Need to load the modules so they get all the dunder variables
|
||||
|
|
|
@ -12,6 +12,7 @@ import logging
|
|||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
@ -241,4 +242,4 @@ def _read_link(name):
|
|||
Throws an OSError if the link does not exist
|
||||
'''
|
||||
alt_link_path = '/etc/alternatives/{0}'.format(name)
|
||||
return os.readlink(alt_link_path)
|
||||
return salt.utils.path.readlink(alt_link_path)
|
||||
|
|
|
@ -447,11 +447,15 @@ def config(name, config, edit=True):
|
|||
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
|
||||
'''
|
||||
|
||||
configs = []
|
||||
for entry in config:
|
||||
key = next(six.iterkeys(entry))
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configs)
|
||||
return configs
|
||||
configs.append(_parse_config(entry[key], key))
|
||||
|
||||
# Python auto-correct line endings
|
||||
configstext = "\n".join(configs)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configstext)
|
||||
return configstext
|
||||
|
|
|
@ -97,11 +97,15 @@ __virtualname__ = 'pkg'
|
|||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
Confirm this module is on a Debian-based system
|
||||
'''
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon'):
|
||||
return __virtualname__
|
||||
elif __grains__.get('os_family', False) == 'Cumulus':
|
||||
# If your minion is running an OS which is Debian-based but does not have
|
||||
# an "os_family" grain of Debian, then the proper fix is NOT to check for
|
||||
# the minion's "os_family" grain here in the __virtual__. The correct fix
|
||||
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
|
||||
# dict in salt/grains/core.py, so that we assign the correct "os_family"
|
||||
# grain to the minion.
|
||||
if __grains__.get('os_family') == 'Debian':
|
||||
return __virtualname__
|
||||
return (False, 'The pkg module could not be loaded: unsupported OS family')
|
||||
|
||||
|
|
|
@ -60,7 +60,8 @@ def list_(name,
|
|||
strip_components=None,
|
||||
clean=False,
|
||||
verbose=False,
|
||||
saltenv='base'):
|
||||
saltenv='base',
|
||||
source_hash=None):
|
||||
'''
|
||||
.. versionadded:: 2016.11.0
|
||||
.. versionchanged:: 2016.11.2
|
||||
|
@ -149,6 +150,14 @@ def list_(name,
|
|||
``archive``. This is only applicable when ``archive`` is a file from
|
||||
the ``salt://`` fileserver.
|
||||
|
||||
source_hash
|
||||
If ``name`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
.. _tarfile: https://docs.python.org/2/library/tarfile.html
|
||||
.. _xz: http://tukaani.org/xz/
|
||||
|
||||
|
@ -160,6 +169,7 @@ def list_(name,
|
|||
salt '*' archive.list /path/to/myfile.tar.gz strip_components=1
|
||||
salt '*' archive.list salt://foo.tar.gz
|
||||
salt '*' archive.list https://domain.tld/myfile.zip
|
||||
salt '*' archive.list https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
|
||||
salt '*' archive.list ftp://10.1.2.3/foo.rar
|
||||
'''
|
||||
def _list_tar(name, cached, decompress_cmd, failhard=False):
|
||||
|
@ -309,7 +319,7 @@ def list_(name,
|
|||
)
|
||||
return dirs, files, []
|
||||
|
||||
cached = __salt__['cp.cache_file'](name, saltenv)
|
||||
cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
|
||||
if not cached:
|
||||
raise CommandExecutionError('Failed to cache {0}'.format(name))
|
||||
|
||||
|
@ -1094,7 +1104,7 @@ def unzip(zip_file,
|
|||
return _trim_files(cleaned_files, trim_output)
|
||||
|
||||
|
||||
def is_encrypted(name, clean=False, saltenv='base'):
|
||||
def is_encrypted(name, clean=False, saltenv='base', source_hash=None):
|
||||
'''
|
||||
.. versionadded:: 2016.11.0
|
||||
|
||||
|
@ -1113,6 +1123,18 @@ def is_encrypted(name, clean=False, saltenv='base'):
|
|||
If there is an error listing the archive's contents, the cached
|
||||
file will not be removed, to allow for troubleshooting.
|
||||
|
||||
saltenv : base
|
||||
Specifies the fileserver environment from which to retrieve
|
||||
``archive``. This is only applicable when ``archive`` is a file from
|
||||
the ``salt://`` fileserver.
|
||||
|
||||
source_hash
|
||||
If ``name`` is an http(s)/ftp URL and the file exists in the minion's
|
||||
file cache, this option can be passed to keep the minion from
|
||||
re-downloading the archive if the cached copy matches the specified
|
||||
hash.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Examples:
|
||||
|
||||
|
@ -1122,9 +1144,10 @@ def is_encrypted(name, clean=False, saltenv='base'):
|
|||
salt '*' archive.is_encrypted salt://foo.zip
|
||||
salt '*' archive.is_encrypted salt://foo.zip saltenv=dev
|
||||
salt '*' archive.is_encrypted https://domain.tld/myfile.zip clean=True
|
||||
salt '*' archive.is_encrypted https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15
|
||||
salt '*' archive.is_encrypted ftp://10.1.2.3/foo.zip
|
||||
'''
|
||||
cached = __salt__['cp.cache_file'](name, saltenv)
|
||||
cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash)
|
||||
if not cached:
|
||||
raise CommandExecutionError('Failed to cache {0}'.format(name))
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue