mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'develop' into exec-code-all-fix
This commit is contained in:
commit
5f0bdd620e
151 changed files with 6192 additions and 2844 deletions
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
|
@ -1,8 +1,8 @@
|
|||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1115 is approximately 3 years and 1 month
|
||||
daysUntilStale: 1115
|
||||
# 1100 is approximately 3 years
|
||||
daysUntilStale: 1100
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
|
11
conf/cloud
11
conf/cloud
|
@ -97,3 +97,14 @@
|
|||
#
|
||||
#delete_sshkeys: False
|
||||
|
||||
# Whether or not to include grains information in the /etc/salt/minion file
|
||||
# which is generated when the minion is provisioned. For example...
|
||||
# grains:
|
||||
# salt-cloud:
|
||||
# driver: ec2
|
||||
# provider: my_ec2:ec2
|
||||
# profile: micro_ec2
|
||||
#
|
||||
# Default: 'True'
|
||||
#
|
||||
#enable_cloud_grains: 'True'
|
||||
|
|
|
@ -533,6 +533,9 @@
|
|||
# Add any additional locations to look for master runners:
|
||||
#runner_dirs: []
|
||||
|
||||
# Add any additional locations to look for master utils:
|
||||
#utils_dirs: []
|
||||
|
||||
# Enable Cython for master side modules:
|
||||
#cython_enable: False
|
||||
|
||||
|
|
|
@ -39,6 +39,13 @@ specified target expression.
|
|||
desitination will be assumed to be a directory. Finally, recursion is now
|
||||
supported, allowing for entire directories to be copied.
|
||||
|
||||
.. versionchanged:: 2016.11.7,2017.7.2
|
||||
Reverted back to the old copy mode to preserve backward compatibility. The
|
||||
new functionality added in 2016.6.6 and 2017.7.0 is now available using the
|
||||
``-C`` or ``--chunked`` CLI arguments. Note that compression, recursive
|
||||
copying, and support for copying large files is only available in chunked
|
||||
mode.
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
|
@ -56,9 +63,16 @@ Options
|
|||
.. include:: _includes/target-selection.rst
|
||||
|
||||
|
||||
.. option:: -C, --chunked
|
||||
|
||||
Use new chunked mode to copy files. This mode supports large files, recursive
|
||||
directories copying and compression.
|
||||
|
||||
.. versionadded:: 2016.11.7,2017.7.2
|
||||
|
||||
.. option:: -n, --no-compression
|
||||
|
||||
Disable gzip compression.
|
||||
Disable gzip compression in chunked mode.
|
||||
|
||||
.. versionadded:: 2016.3.7,2016.11.6,2017.7.0
|
||||
|
||||
|
|
6
doc/ref/clouds/all/salt.cloud.clouds.oneandone.rst
Normal file
6
doc/ref/clouds/all/salt.cloud.clouds.oneandone.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
===========================
|
||||
salt.cloud.clouds.oneandone
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.oneandone
|
||||
:members:
|
|
@ -234,6 +234,7 @@ Valid options:
|
|||
- clouds
|
||||
- tops
|
||||
- roster
|
||||
- tokens
|
||||
|
||||
.. conf_master:: module_dirs
|
||||
|
||||
|
@ -1730,6 +1731,22 @@ Set additional directories to search for runner modules.
|
|||
runner_dirs:
|
||||
- /var/lib/salt/runners
|
||||
|
||||
.. conf_master:: utils_dirs
|
||||
|
||||
``utils_dirs``
|
||||
---------------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Set additional directories to search for util modules.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
utils_dirs:
|
||||
- /var/lib/salt/utils
|
||||
|
||||
.. conf_master:: cython_enable
|
||||
|
||||
``cython_enable``
|
||||
|
@ -3770,7 +3787,7 @@ they were created by a different master.
|
|||
Default: ``True``
|
||||
|
||||
Normally, when processing :ref:`git_pillar remotes
|
||||
<git-pillar-2015-8-0-and-later>`, if more than one repo under the same ``git``
|
||||
<git-pillar-configuration>`, if more than one repo under the same ``git``
|
||||
section in the ``ext_pillar`` configuration refers to the same pillar
|
||||
environment, then each repo in a given environment will have access to the
|
||||
other repos' files to be referenced in their top files. However, it may be
|
||||
|
|
|
@ -13,7 +13,7 @@ salt.modules.kernelpkg
|
|||
Execution Module Used for
|
||||
============================================ ========================================
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_apt` Debian/Ubuntu-based distros which use
|
||||
``apt-get(8)`` for package management
|
||||
``apt-get`` for package management
|
||||
:py:mod:`~salt.modules.kernelpkg_linux_yum` RedHat-based distros and derivatives
|
||||
using ``yum(8)`` or ``dnf(8)``
|
||||
using ``yum`` or ``dnf``
|
||||
============================================ ========================================
|
||||
|
|
|
@ -21,7 +21,7 @@ Or you may specify a map which includes all VMs to perform the action on:
|
|||
|
||||
$ salt-cloud -a reboot -m /path/to/mapfile
|
||||
|
||||
The following is a list of actions currently supported by salt-cloud:
|
||||
The following is an example list of actions currently supported by ``salt-cloud``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -36,5 +36,5 @@ The following is a list of actions currently supported by salt-cloud:
|
|||
- start
|
||||
- stop
|
||||
|
||||
Another useful reference for viewing more salt-cloud actions is the
|
||||
:ref:Salt Cloud Feature Matrix <salt-cloud-feature-matrix>
|
||||
Another useful reference for viewing more ``salt-cloud`` actions is the
|
||||
:ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.
|
||||
|
|
|
@ -56,6 +56,24 @@ settings can be placed in the provider or profile:
|
|||
sls_list:
|
||||
- web
|
||||
|
||||
|
||||
When salt cloud creates a new minon, it can automatically add grain information
|
||||
to the minion configuration file identifying the sources originally used
|
||||
to define it.
|
||||
|
||||
The generated grain information will appear similar to:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grains:
|
||||
salt-cloud:
|
||||
driver: ec2
|
||||
provider: my_ec2:ec2
|
||||
profile: ec2-web
|
||||
|
||||
The generation of the salt-cloud grain can be surpressed by the
|
||||
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
|
||||
|
||||
Cloud Configuration Syntax
|
||||
==========================
|
||||
|
||||
|
|
|
@ -26,5 +26,5 @@ gathering information about instances on a provider basis:
|
|||
$ salt-cloud -f list_nodes_full linode
|
||||
$ salt-cloud -f list_nodes_select linode
|
||||
|
||||
Another useful reference for viewing salt-cloud functions is the
|
||||
Another useful reference for viewing ``salt-cloud`` functions is the
|
||||
:ref:`Salt Cloud Feature Matrix <salt-cloud-feature-matrix>`.
|
||||
|
|
|
@ -119,6 +119,7 @@ Cloud Provider Specifics
|
|||
Getting Started With Libvirt <libvirt>
|
||||
Getting Started With Linode <linode>
|
||||
Getting Started With LXC <lxc>
|
||||
Getting Started With OneAndOne <oneandone>
|
||||
Getting Started With OpenNebula <opennebula>
|
||||
Getting Started With OpenStack <openstack>
|
||||
Getting Started With Parallels <parallels>
|
||||
|
|
|
@ -406,4 +406,22 @@ configuration file. For example:
|
|||
- whoami
|
||||
- echo 'hello world!'
|
||||
|
||||
These commands will run in sequence **before** the bootstrap script is executed.
|
||||
These commands will run in sequence **before** the bootstrap script is executed.
|
||||
|
||||
Force Minion Config
|
||||
===================
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
The ``force_minion_config`` option requests the bootstrap process to overwrite
|
||||
an existing minion configuration file and public/private key files.
|
||||
Default: False
|
||||
|
||||
This might be important for drivers (such as ``saltify``) which are expected to
|
||||
take over a connection from a former salt master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my_saltify_provider:
|
||||
driver: saltify
|
||||
force_minion_config: true
|
||||
|
|
146
doc/topics/cloud/oneandone.rst
Normal file
146
doc/topics/cloud/oneandone.rst
Normal file
|
@ -0,0 +1,146 @@
|
|||
==========================
|
||||
Getting Started With 1and1
|
||||
==========================
|
||||
|
||||
1&1 is one of the world’s leading Web hosting providers. 1&1 currently offers
|
||||
a wide range of Web hosting products, including email solutions and high-end
|
||||
servers in 10 different countries including Germany, Spain, Great Britain
|
||||
and the United States. From domains to 1&1 MyWebsite to eBusiness solutions
|
||||
like Cloud Hosting and Web servers for complex tasks, 1&1 is well placed to deliver
|
||||
a high quality service to its customers. All 1&1 products are hosted in
|
||||
1&1‘s high-performance, green data centers in the USA and Europe.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
* 1and1 >= 1.2.0
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
* Using the new format, set up the cloud configuration at
|
||||
``/etc/salt/cloud.providers`` or
|
||||
``/etc/salt/cloud.providers.d/oneandone.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-oneandone-config:
|
||||
driver: oneandone
|
||||
|
||||
# Set the location of the salt-master
|
||||
#
|
||||
minion:
|
||||
master: saltmaster.example.com
|
||||
|
||||
# Configure oneandone authentication credentials
|
||||
#
|
||||
api_token: <api_token>
|
||||
ssh_private_key: /path/to/id_rsa
|
||||
ssh_public_key: /path/to/id_rsa.pub
|
||||
|
||||
Authentication
|
||||
==============
|
||||
|
||||
The ``api_key`` is used for API authorization. This token can be obtained
|
||||
from the CloudPanel in the Management section below Users.
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
||||
Here is an example of a profile:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
oneandone_fixed_size:
|
||||
provider: my-oneandone-config
|
||||
description: Small instance size server
|
||||
fixed_instance_size: S
|
||||
appliance_id: 8E3BAA98E3DFD37857810E0288DD8FBA
|
||||
|
||||
oneandone_custom_size:
|
||||
provider: my-oneandone-config
|
||||
description: Custom size server
|
||||
vcore: 2
|
||||
cores_per_processor: 2
|
||||
ram: 8
|
||||
appliance_id: 8E3BAA98E3DFD37857810E0288DD8FBA
|
||||
hdds:
|
||||
-
|
||||
is_main: true
|
||||
size: 20
|
||||
-
|
||||
is_main: false
|
||||
size: 20
|
||||
|
||||
The following list explains some of the important properties.
|
||||
|
||||
fixed_instance_size_id
|
||||
When creating a server, either ``fixed_instance_size_id`` or custom hardware params
|
||||
containing ``vcore``, ``cores_per_processor``, ``ram``, and ``hdds`` must be provided.
|
||||
Can be one of the IDs listed among the output of the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-sizes oneandone
|
||||
|
||||
vcore
|
||||
Total amount of processors.
|
||||
|
||||
cores_per_processor
|
||||
Number of cores per processor.
|
||||
|
||||
ram
|
||||
RAM memory size in GB.
|
||||
|
||||
hdds
|
||||
Hard disks.
|
||||
|
||||
appliance_id
|
||||
ID of the image that will be installed on server.
|
||||
Can be one of the IDs listed in the output of the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-images oneandone
|
||||
|
||||
datacenter_id
|
||||
ID of the datacenter where the server will be created.
|
||||
Can be one of the IDs listed in the output of the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-locations oneandone
|
||||
|
||||
description
|
||||
Description of the server.
|
||||
|
||||
password
|
||||
Password of the server. Password must contain more than 8 characters
|
||||
using uppercase letters, numbers and other special symbols.
|
||||
|
||||
power_on
|
||||
Power on server after creation. Default is set to true.
|
||||
|
||||
firewall_policy_id
|
||||
Firewall policy ID. If it is not provided, the server will assign
|
||||
the best firewall policy, creating a new one if necessary. If the parameter
|
||||
is sent with a 0 value, the server will be created with all ports blocked.
|
||||
|
||||
ip_id
|
||||
IP address ID.
|
||||
|
||||
load_balancer_id
|
||||
Load balancer ID.
|
||||
|
||||
monitoring_policy_id
|
||||
Monitoring policy ID.
|
||||
|
||||
deploy
|
||||
Set to False if Salt should not be installed on the node.
|
||||
|
||||
wait_for_timeout
|
||||
The timeout to wait in seconds for provisioning resources such as servers.
|
||||
The default wait_for_timeout is 15 minutes.
|
||||
|
||||
For more information concerning cloud profiles, see :ref:`here
|
||||
<salt-cloud-profiles>`.
|
|
@ -16,7 +16,7 @@ The Saltify driver has no external dependencies.
|
|||
Configuration
|
||||
=============
|
||||
|
||||
Because the Saltify driver does not use an actual cloud provider host, it has a
|
||||
Because the Saltify driver does not use an actual cloud provider host, it can have a
|
||||
simple provider configuration. The only thing that is required to be set is the
|
||||
driver name, and any other potentially useful information, like the location of
|
||||
the salt-master:
|
||||
|
@ -31,6 +31,12 @@ the salt-master:
|
|||
master: 111.222.333.444
|
||||
provider: saltify
|
||||
|
||||
However, if you wish to use the more advanced capabilities of salt-cloud, such as
|
||||
rebooting, listing, and disconnecting machines, then the salt master must fill
|
||||
the role usually performed by a vendor's cloud management system. In order to do
|
||||
that, you must configure your salt master as a salt-api server, and supply credentials
|
||||
to use it. (See ``salt-api setup`` below.)
|
||||
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
@ -72,6 +78,30 @@ to it can be verified with Salt:
|
|||
salt my-machine test.ping
|
||||
|
||||
|
||||
Destroy Options
|
||||
---------------
|
||||
|
||||
For obvious reasons, the ``destroy`` action does not actually vaporize hardware.
|
||||
If the salt master is connected using salt-api, it can tear down parts of
|
||||
the client machines. It will remove the client's key from the salt master,
|
||||
and will attempt the following options:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- remove_config_on_destroy: true
|
||||
# default: true
|
||||
# Deactivate salt-minion on reboot and
|
||||
# delete the minion config and key files from its ``/etc/salt`` directory,
|
||||
# NOTE: If deactivation is unsuccessful (older Ubuntu machines) then when
|
||||
# salt-minion restarts it will automatically create a new, unwanted, set
|
||||
# of key files. The ``force_minion_config`` option must be used in that case.
|
||||
|
||||
- shutdown_on_destroy: false
|
||||
# default: false
|
||||
# send a ``shutdown`` command to the client.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Using Map Files
|
||||
---------------
|
||||
The settings explained in the section above may also be set in a map file. An
|
||||
|
@ -135,3 +165,67 @@ Return values:
|
|||
- ``True``: Credential verification succeeded
|
||||
- ``False``: Credential verification succeeded
|
||||
- ``None``: Credential verification was not attempted.
|
||||
|
||||
Provisioning salt-api
|
||||
=====================
|
||||
|
||||
In order to query or control minions it created, saltify needs to send commands
|
||||
to the salt master. It does that using the network interface to salt-api.
|
||||
|
||||
The salt-api is not enabled by default. The following example will provide a
|
||||
simple installation.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.profiles.d/my_saltify_profiles.conf
|
||||
hw_41: # a theoretical example hardware machine
|
||||
ssh_host: 10.100.9.41 # the hard address of your target
|
||||
ssh_username: vagrant # a user name which has passwordless sudo
|
||||
password: vagrant # on your target machine
|
||||
provider: my_saltify_provider
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.providers.d/saltify_provider.conf
|
||||
my_saltify_provider:
|
||||
driver: saltify
|
||||
eauth: pam
|
||||
username: vagrant # supply some sudo-group-member's name
|
||||
password: vagrant # and password on the salt master
|
||||
minion:
|
||||
master: 10.100.9.5 # the hard address of the master
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/master.d/auth.conf
|
||||
# using salt-api ... members of the 'sudo' group can do anything ...
|
||||
external_auth:
|
||||
pam:
|
||||
sudo%:
|
||||
- .*
|
||||
- '@wheel'
|
||||
- '@runner'
|
||||
- '@jobs'
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/master.d/api.conf
|
||||
# see https://docs.saltstack.com/en/latest/ref/netapi/all/salt.netapi.rest_cherrypy.html
|
||||
rest_cherrypy:
|
||||
host: localhost
|
||||
port: 8000
|
||||
ssl_crt: /etc/pki/tls/certs/localhost.crt
|
||||
ssl_key: /etc/pki/tls/certs/localhost.key
|
||||
thread_pool: 30
|
||||
socket_queue_size: 10
|
||||
|
||||
|
||||
Start your target machine as a Salt minion named "node41" by:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo salt-cloud -p hw_41 node41
|
||||
|
||||
|
|
|
@ -93,6 +93,26 @@ By user, by minion:
|
|||
<minion compound target>:
|
||||
- <regex to match function>
|
||||
|
||||
By user, by runner/wheel:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_auth:
|
||||
<eauth backend>:
|
||||
<user or group%>:
|
||||
<@runner or @wheel>:
|
||||
- <regex to match function>
|
||||
|
||||
By user, by runner+wheel module:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_auth:
|
||||
<eauth backend>:
|
||||
<user or group%>:
|
||||
<@module_name>:
|
||||
- <regex to match function without module_name>
|
||||
|
||||
Groups
|
||||
------
|
||||
|
||||
|
@ -127,6 +147,14 @@ Positional arguments or keyword arguments to functions can also be whitelisted.
|
|||
kwargs:
|
||||
'kwa': 'kwa.*'
|
||||
'kwb': 'kwb'
|
||||
- '@runner':
|
||||
- 'runner_mod.*':
|
||||
args:
|
||||
- 'a.*'
|
||||
- 'b.*'
|
||||
kwargs:
|
||||
'kwa': 'kwa.*'
|
||||
'kwb': 'kwb'
|
||||
|
||||
The rules:
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ bringing with it the ability to access authenticated repositories.
|
|||
|
||||
Using the new features will require updates to the git ext_pillar
|
||||
configuration, further details can be found in the :ref:`pillar.git_pillar
|
||||
<git-pillar-2015-8-0-and-later>` docs.
|
||||
<git-pillar-configuration>` docs.
|
||||
|
||||
.. _pygit2: https://github.com/libgit2/pygit2
|
||||
|
||||
|
|
|
@ -3,3 +3,13 @@ Salt 2016.11.7 Release Notes
|
|||
============================
|
||||
|
||||
Version 2016.11.7 is a bugfix release for :ref:`2016.11.0 <release-2016-11-0>`.
|
||||
|
||||
Changes for v2016.11.6..v2016.11.7
|
||||
----------------------------------
|
||||
|
||||
Security Fix
|
||||
============
|
||||
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
||||
|
|
|
@ -4,23 +4,12 @@ Salt 2016.3.7 Release Notes
|
|||
|
||||
Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
Changes for v2016.3.6..v2016.3.7
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
Security Fix
|
||||
============
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
||||
|
|
29
doc/topics/releases/2016.3.8.rst
Normal file
29
doc/topics/releases/2016.3.8.rst
Normal file
|
@ -0,0 +1,29 @@
|
|||
===========================
|
||||
Salt 2016.3.8 Release Notes
|
||||
===========================
|
||||
|
||||
Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 <release-2016-3-0>`.
|
||||
|
||||
Changes for v2016.3.7..v2016.3.8
|
||||
--------------------------------
|
||||
|
||||
New master configuration option `allow_minion_key_revoke`, defaults to True. This option
|
||||
controls whether a minion can request that the master revoke its key. When True, a minion
|
||||
can request a key revocation and the master will comply. If it is False, the key will not
|
||||
be revoked by the msater.
|
||||
|
||||
New master configuration option `require_minion_sign_messages`
|
||||
This requires that minions cryptographically sign the messages they
|
||||
publish to the master. If minions are not signing, then log this information
|
||||
at loglevel 'INFO' and drop the message without acting on it.
|
||||
|
||||
New master configuration option `drop_messages_signature_fail`
|
||||
Drop messages from minions when their signatures do not validate.
|
||||
Note that when this option is False but `require_minion_sign_messages` is True
|
||||
minions MUST sign their messages but the validity of their signatures
|
||||
is ignored.
|
||||
|
||||
New minion configuration option `minion_sign_messages`
|
||||
Causes the minion to cryptographically sign the payload of messages it places
|
||||
on the event bus for the master. The payloads are signed with the minion's
|
||||
private key so the master can verify the signature with its public key.
|
|
@ -4,6 +4,13 @@ Salt 2017.7.1 Release Notes
|
|||
|
||||
Version 2017.7.1 is a bugfix release for :ref:`2017.7.0 <release-2017-7-0>`.
|
||||
|
||||
Security Fix
|
||||
============
|
||||
|
||||
CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master
|
||||
|
||||
Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com
|
||||
|
||||
Changes for v2017.7.0..v2017.7.1
|
||||
--------------------------------
|
||||
|
||||
|
|
|
@ -46,6 +46,11 @@ noon PST so the Stormpath external authentication module has been removed.
|
|||
|
||||
https://stormpath.com/oktaplusstormpath
|
||||
|
||||
New NaCl Renderer
|
||||
-----------------
|
||||
|
||||
A new renderer has been added for encrypted data.
|
||||
|
||||
New GitFS Features
|
||||
------------------
|
||||
|
||||
|
@ -691,6 +696,25 @@ For ``smartos`` some grains have been deprecated. These grains will be removed i
|
|||
- The ``hypervisor_uuid`` has been replaced with ``mdata:sdc:server_uuid`` grain.
|
||||
- The ``datacenter`` has been replaced with ``mdata:sdc:datacenter_name`` grain.
|
||||
|
||||
Minion Blackout
|
||||
---------------
|
||||
|
||||
During a blackout, minions will not execute any remote execution commands,
|
||||
except for :mod:`saltutil.refresh_pillar <salt.modules.saltutil.refresh_pillar>`.
|
||||
Previously, support was added so that blackouts are enabled using a special
|
||||
pillar key, ``minion_blackout`` set to ``True`` and an optional pillar key
|
||||
``minion_blackout_whitelist`` to specify additional functions that are permitted
|
||||
during blackout. This release adds support for using this feature in the grains
|
||||
as well, by using special grains keys ``minion_blackout`` and
|
||||
``minion_blackout_whitelist``.
|
||||
|
||||
Pillar Deprecations
|
||||
-------------------
|
||||
|
||||
The legacy configuration for ``git_pillar`` has been removed. Please use the new
|
||||
configuration for ``git_pillar``, which is documented in the external pillar module
|
||||
for :mod:`git_pillar <salt.pillar.git_pillar>`.
|
||||
|
||||
Utils Deprecations
|
||||
==================
|
||||
|
||||
|
|
|
@ -1110,15 +1110,8 @@ Using Git as an External Pillar Source
|
|||
The git external pillar (a.k.a. git_pillar) has been rewritten for the 2015.8.0
|
||||
release. This rewrite brings with it pygit2_ support (allowing for access to
|
||||
authenticated repositories), as well as more granular support for per-remote
|
||||
configuration.
|
||||
|
||||
To make use of the new features, changes to the git ext_pillar configuration
|
||||
must be made. The new configuration schema is detailed :ref:`here
|
||||
<git-pillar-2015-8-0-and-later>`.
|
||||
|
||||
For Salt releases before 2015.8.0, click :ref:`here <git-pillar-pre-2015-8-0>`
|
||||
for documentation.
|
||||
|
||||
configuration. This configuration schema is detailed :ref:`here
|
||||
<git-pillar-configuration>`.
|
||||
|
||||
.. _faq-gitfs-bug:
|
||||
|
||||
|
|
|
@ -81,6 +81,10 @@ the ``foo`` utility module with a ``__virtual__`` function.
|
|||
def bar():
|
||||
return 'baz'
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
Instantiating objects from classes declared in util modules works with
|
||||
Master side modules, such as Runners, Outputters, etc.
|
||||
|
||||
Also you could even write your utility modules in object oriented fashion:
|
||||
|
||||
.. code-block:: python
|
||||
|
|
|
@ -15,91 +15,119 @@
|
|||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Post install started on:" > /tmp/postinstall.txt
|
||||
date >> /tmp/postinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Check for existing minion config, copy if it doesn't exist
|
||||
###############################################################################
|
||||
if [ ! -f /etc/salt/minion ]; then
|
||||
echo "Config copy: Started..." >> /tmp/postinstall.txt
|
||||
cp /etc/salt/minion.dist /etc/salt/minion
|
||||
echo "Config copy: Successful" >> /tmp/postinstall.txt
|
||||
if [ ! -f "$CONFIG_DIR/minion" ]; then
|
||||
echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt"
|
||||
cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion"
|
||||
echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Create symlink to salt-config.sh
|
||||
###############################################################################
|
||||
# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/usr/local/sbin" ]; then
|
||||
mkdir /usr/local/sbin
|
||||
if [ ! -d "$SBIN_DIR" ]; then
|
||||
echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir "$SBIN_DIR"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config
|
||||
echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt"
|
||||
ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config"
|
||||
echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Add salt to paths.d
|
||||
###############################################################################
|
||||
# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt
|
||||
if [ ! -d "/etc/paths.d" ]; then
|
||||
echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt"
|
||||
mkdir /etc/paths.d
|
||||
echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt'
|
||||
sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt'
|
||||
echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt"
|
||||
sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt"
|
||||
sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt"
|
||||
echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Register Salt as a service
|
||||
###############################################################################
|
||||
setup_services_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop running service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi;
|
||||
echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_services_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/postinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt"
|
||||
echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl enable system/com.saltstack.salt.minion
|
||||
echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Service is running" >> /tmp/postinstall.txt
|
||||
echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt"
|
||||
else
|
||||
echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl kickstart -kp system/com.saltstack.salt.minion
|
||||
echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Service start: Successful" >> /tmp/postinstall.txt
|
||||
|
||||
echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt
|
||||
echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
|
||||
echo "Service start: Enabling service..." >> /tmp/postinstall.txt
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
setup_services_maverick;
|
||||
|
@ -108,7 +136,9 @@ case $MINOR in
|
|||
setup_services_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
echo "Post install completed successfully" >> /tmp/postinstall.txt
|
||||
echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
# Date: December 2015
|
||||
#
|
||||
# Description: This script stops the salt minion service before attempting to
|
||||
# install Salt on macOS
|
||||
# install Salt on macOS. It also removes the /opt/salt/bin
|
||||
# directory, symlink to salt-config, and salt from paths.d.
|
||||
#
|
||||
# Requirements:
|
||||
# - None
|
||||
|
@ -15,12 +16,29 @@
|
|||
# This script is run as a part of the macOS Salt Installation
|
||||
#
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > /tmp/preinstall.txt
|
||||
date >> /tmp/preinstall.txt
|
||||
|
||||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
# Get Minor Version
|
||||
OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]')
|
||||
MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
||||
# Path Variables
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
TEMP_DIR="/tmp"
|
||||
SBIN_DIR="/usr/local/sbin"
|
||||
|
||||
###############################################################################
|
||||
# Set up logging and error handling
|
||||
###############################################################################
|
||||
echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
trap 'quit_on_error $LINENO $BASH_COMMAND' ERR
|
||||
|
||||
quit_on_error() {
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt
|
||||
echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt"
|
||||
exit -1
|
||||
}
|
||||
|
||||
|
@ -31,24 +49,58 @@ MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.)
|
|||
# Stop the service
|
||||
###############################################################################
|
||||
stop_service_maverick() {
|
||||
echo "Using old (< 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service_yosemite_and_later() {
|
||||
echo "Using new (>= 10.10) launchctl interface" >> /tmp/preinstall.txt
|
||||
echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt"
|
||||
if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then
|
||||
echo "Stop service: Started..." >> /tmp/preinstall.txt
|
||||
echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.minion
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist
|
||||
echo "Stop service: Successful" >> /tmp/preinstall.txt
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then
|
||||
echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.master
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then
|
||||
echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.syndic
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then
|
||||
echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt"
|
||||
launchctl disable system/com.saltstack.salt.api
|
||||
launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist
|
||||
echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt"
|
||||
case $MINOR in
|
||||
9 )
|
||||
stop_service_maverick;
|
||||
|
@ -57,6 +109,36 @@ case $MINOR in
|
|||
stop_service_yosemite_and_later;
|
||||
;;
|
||||
esac
|
||||
echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt
|
||||
echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
###############################################################################
|
||||
# Remove the Symlink to salt-config.sh
|
||||
###############################################################################
|
||||
if [ -L "$SBIN_DIR/salt-config" ]; then
|
||||
echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "$SBIN_DIR/salt-config"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the $INSTALL_DIR directory
|
||||
###############################################################################
|
||||
if [ -d "$INSTALL_DIR" ]; then
|
||||
echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt"
|
||||
rm -rf "$INSTALL_DIR"
|
||||
echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
fi
|
||||
|
||||
echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt"
|
||||
date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -383,8 +383,8 @@ Section -Post
|
|||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1"
|
||||
|
||||
RMDir /R "$INSTDIR\var\cache\salt" ; removing cache from old version
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"
|
||||
|
||||
Call updateMinionConfig
|
||||
|
||||
|
|
|
@ -17,9 +17,7 @@ from __future__ import absolute_import
|
|||
|
||||
# Import python libs
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import collections
|
||||
import hashlib
|
||||
import time
|
||||
import logging
|
||||
import random
|
||||
|
@ -56,6 +54,7 @@ class LoadAuth(object):
|
|||
self.max_fail = 1.0
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.auth = salt.loader.auth(opts)
|
||||
self.tokens = salt.loader.eauth_tokens(opts)
|
||||
self.ckminions = ckminions or salt.utils.minions.CkMinions(opts)
|
||||
|
||||
def load_name(self, load):
|
||||
|
@ -200,13 +199,6 @@ class LoadAuth(object):
|
|||
'''
|
||||
if not self.authenticate_eauth(load):
|
||||
return {}
|
||||
fstr = '{0}.auth'.format(load['eauth'])
|
||||
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
|
||||
tok = str(hash_type(os.urandom(512)).hexdigest())
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
while os.path.isfile(t_path):
|
||||
tok = str(hash_type(os.urandom(512)).hexdigest())
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
|
||||
if self._allow_custom_expire(load):
|
||||
token_expire = load.pop('token_expire', self.opts['token_expire'])
|
||||
|
@ -217,8 +209,7 @@ class LoadAuth(object):
|
|||
tdata = {'start': time.time(),
|
||||
'expire': time.time() + token_expire,
|
||||
'name': self.load_name(load),
|
||||
'eauth': load['eauth'],
|
||||
'token': tok}
|
||||
'eauth': load['eauth']}
|
||||
|
||||
if self.opts['keep_acl_in_token']:
|
||||
acl_ret = self.__get_acl(load)
|
||||
|
@ -227,29 +218,17 @@ class LoadAuth(object):
|
|||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
|
||||
try:
|
||||
with salt.utils.files.set_umask(0o177):
|
||||
with salt.utils.files.fopen(t_path, 'w+b') as fp_:
|
||||
fp_.write(self.serial.dumps(tdata))
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not write token file "{0}".'.format(t_path))
|
||||
return {}
|
||||
return tdata
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
def get_tok(self, tok):
|
||||
'''
|
||||
Return the name associated with the token, or False if the token is
|
||||
not valid
|
||||
'''
|
||||
t_path = os.path.join(self.opts['token_dir'], tok)
|
||||
if not os.path.isfile(t_path):
|
||||
return {}
|
||||
try:
|
||||
with salt.utils.files.fopen(t_path, 'rb') as fp_:
|
||||
tdata = self.serial.loads(fp_.read())
|
||||
except (IOError, OSError):
|
||||
log.warning('Authentication failure: can not read token file "{0}".'.format(t_path))
|
||||
tdata = self.tokens["{0}.get_token".format(self.opts['eauth_tokens'])](self.opts, tok)
|
||||
if not tdata:
|
||||
return {}
|
||||
|
||||
rm_tok = False
|
||||
if 'expire' not in tdata:
|
||||
# invalid token, delete it!
|
||||
|
@ -257,13 +236,22 @@ class LoadAuth(object):
|
|||
if tdata.get('expire', '0') < time.time():
|
||||
rm_tok = True
|
||||
if rm_tok:
|
||||
try:
|
||||
os.remove(t_path)
|
||||
return {}
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
self.rm_token(tok)
|
||||
|
||||
return tdata
|
||||
|
||||
def list_tokens(self):
|
||||
'''
|
||||
List all tokens in eauth_tokn storage.
|
||||
'''
|
||||
return self.tokens["{0}.list_tokens".format(self.opts['eauth_tokens'])](self.opts)
|
||||
|
||||
def rm_token(self, tok):
|
||||
'''
|
||||
Remove the given token from token storage.
|
||||
'''
|
||||
self.tokens["{0}.rm_token".format(self.opts['eauth_tokens'])](self.opts, tok)
|
||||
|
||||
def authenticate_token(self, load):
|
||||
'''
|
||||
Authenticate a user by the token specified in load.
|
||||
|
|
6
salt/cache/__init__.py
vendored
6
salt/cache/__init__.py
vendored
|
@ -224,7 +224,7 @@ class Cache(object):
|
|||
fun = '{0}.flush'.format(self.driver)
|
||||
return self.modules[fun](bank, key=key, **self._kwargs)
|
||||
|
||||
def ls(self, bank):
|
||||
def list(self, bank):
|
||||
'''
|
||||
Lists entries stored in the specified bank.
|
||||
|
||||
|
@ -240,11 +240,9 @@ class Cache(object):
|
|||
Raises an exception if cache driver detected an error accessing data
|
||||
in the cache backend (auth, permissions, etc).
|
||||
'''
|
||||
fun = '{0}.ls'.format(self.driver)
|
||||
fun = '{0}.list'.format(self.driver)
|
||||
return self.modules[fun](bank, **self._kwargs)
|
||||
|
||||
list = ls
|
||||
|
||||
def contains(self, bank, key=None):
|
||||
'''
|
||||
Checks if the specified bank contains the specified key.
|
||||
|
|
4
salt/cache/consul.py
vendored
4
salt/cache/consul.py
vendored
|
@ -61,7 +61,7 @@ api = None
|
|||
# Define the module's virtual name
|
||||
__virtualname__ = 'consul'
|
||||
|
||||
__func_alias__ = {'list': 'ls'}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
@ -139,7 +139,7 @@ def flush(bank, key=None):
|
|||
)
|
||||
|
||||
|
||||
def ls(bank):
|
||||
def list_(bank):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
|
4
salt/cache/localfs.py
vendored
4
salt/cache/localfs.py
vendored
|
@ -24,7 +24,7 @@ import salt.utils.files
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__func_alias__ = {'list': 'ls'}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
|
||||
def __cachedir(kwargs=None):
|
||||
|
@ -144,7 +144,7 @@ def flush(bank, key=None, cachedir=None):
|
|||
return True
|
||||
|
||||
|
||||
def ls(bank, cachedir):
|
||||
def list_(bank, cachedir):
|
||||
'''
|
||||
Return an iterable object containing all entries stored in the specified bank.
|
||||
'''
|
||||
|
|
7
salt/cache/redis_cache.py
vendored
7
salt/cache/redis_cache.py
vendored
|
@ -161,9 +161,7 @@ from salt.exceptions import SaltCacheError
|
|||
# -----------------------------------------------------------------------------
|
||||
|
||||
__virtualname__ = 'redis'
|
||||
__func_alias__ = {
|
||||
'list_': 'list'
|
||||
}
|
||||
__func_alias__ = {'list_': 'list'}
|
||||
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
|
@ -196,6 +194,9 @@ def __virtual__():
|
|||
# helper functions -- will not be exported
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def init_kwargs(kwargs):
|
||||
return {}
|
||||
|
||||
|
||||
def _get_redis_cache_opts():
|
||||
'''
|
||||
|
|
|
@ -18,14 +18,15 @@ import sys
|
|||
|
||||
# Import salt libs
|
||||
import salt.client
|
||||
import salt.output
|
||||
import salt.utils
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.itertools
|
||||
import salt.utils.minions
|
||||
import salt.utils.parsers
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
from salt.utils.verify import verify_log
|
||||
import salt.output
|
||||
import salt.utils.verify
|
||||
|
||||
# Import 3rd party libs
|
||||
from salt.ext import six
|
||||
|
@ -46,7 +47,7 @@ class SaltCPCli(salt.utils.parsers.SaltCPOptionParser):
|
|||
|
||||
# Setup file logging!
|
||||
self.setup_logfile_logger()
|
||||
verify_log(self.config)
|
||||
salt.utils.verify.verify_log(self.config)
|
||||
|
||||
cp_ = SaltCP(self.config)
|
||||
cp_.run()
|
||||
|
@ -103,10 +104,70 @@ class SaltCP(object):
|
|||
empty_dirs.update(empty_dirs_)
|
||||
return files, sorted(empty_dirs)
|
||||
|
||||
def _file_dict(self, fn_):
|
||||
'''
|
||||
Take a path and return the contents of the file as a string
|
||||
'''
|
||||
if not os.path.isfile(fn_):
|
||||
err = 'The referenced file, {0} is not available.'.format(fn_)
|
||||
sys.stderr.write(err + '\n')
|
||||
sys.exit(42)
|
||||
with salt.utils.fopen(fn_, 'r') as fp_:
|
||||
data = fp_.read()
|
||||
return {fn_: data}
|
||||
|
||||
def _load_files(self):
|
||||
'''
|
||||
Parse the files indicated in opts['src'] and load them into a python
|
||||
object for transport
|
||||
'''
|
||||
files = {}
|
||||
for fn_ in self.opts['src']:
|
||||
if os.path.isfile(fn_):
|
||||
files.update(self._file_dict(fn_))
|
||||
elif os.path.isdir(fn_):
|
||||
salt.utils.print_cli(fn_ + ' is a directory, only files are supported '
|
||||
'in non-chunked mode. Use "--chunked" command '
|
||||
'line argument.')
|
||||
sys.exit(1)
|
||||
return files
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Make the salt client call
|
||||
'''
|
||||
if self.opts['chunked']:
|
||||
ret = self.run_chunked()
|
||||
else:
|
||||
ret = self.run_oldstyle()
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
|
||||
def run_oldstyle(self):
|
||||
'''
|
||||
Make the salt client call in old-style all-in-one call method
|
||||
'''
|
||||
arg = [self._load_files(), self.opts['dest']]
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
args = [self.opts['tgt'],
|
||||
'cp.recv',
|
||||
arg,
|
||||
self.opts['timeout'],
|
||||
]
|
||||
|
||||
selected_target_option = self.opts.get('selected_target_option', None)
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
return local.cmd(*args)
|
||||
|
||||
def run_chunked(self):
|
||||
'''
|
||||
Make the salt client call in the new fasion chunked multi-call way
|
||||
'''
|
||||
files, empty_dirs = self._list_files()
|
||||
dest = self.opts['dest']
|
||||
gzip = self.opts['gzip']
|
||||
|
@ -168,7 +229,7 @@ class SaltCP(object):
|
|||
)
|
||||
args = [
|
||||
tgt,
|
||||
'cp.recv',
|
||||
'cp.recv_chunked',
|
||||
[remote_path, chunk, append, gzip, mode],
|
||||
timeout,
|
||||
]
|
||||
|
@ -214,14 +275,11 @@ class SaltCP(object):
|
|||
else '',
|
||||
tgt,
|
||||
)
|
||||
args = [tgt, 'cp.recv', [remote_path, None], timeout]
|
||||
args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout]
|
||||
if selected_target_option is not None:
|
||||
args.append(selected_target_option)
|
||||
|
||||
for minion_id, minion_ret in six.iteritems(local.cmd(*args)):
|
||||
ret.setdefault(minion_id, {})[remote_path] = minion_ret
|
||||
|
||||
salt.output.display_output(
|
||||
ret,
|
||||
self.opts.get('output', 'nested'),
|
||||
self.opts)
|
||||
return ret
|
||||
|
|
|
@ -189,15 +189,14 @@ class LocalClient(object):
|
|||
key_user = key_user.replace(u'\\', u'_')
|
||||
keyfile = os.path.join(self.opts[u'cachedir'],
|
||||
u'.{0}_key'.format(key_user))
|
||||
# Make sure all key parent directories are accessible
|
||||
salt.utils.verify.check_path_traversal(self.opts[u'cachedir'],
|
||||
key_user,
|
||||
self.skip_perm_errors)
|
||||
|
||||
try:
|
||||
# Make sure all key parent directories are accessible
|
||||
salt.utils.verify.check_path_traversal(self.opts[u'cachedir'],
|
||||
key_user,
|
||||
self.skip_perm_errors)
|
||||
with salt.utils.files.fopen(keyfile, u'r') as key:
|
||||
return key.read()
|
||||
except (OSError, IOError):
|
||||
except (OSError, IOError, SaltClientError):
|
||||
# Fall back to eauth
|
||||
return u''
|
||||
|
||||
|
|
|
@ -445,6 +445,7 @@ class SyncClientMixin(object):
|
|||
_use_fnmatch = True
|
||||
else:
|
||||
target_mod = arg + u'.' if not arg.endswith(u'.') else arg
|
||||
_use_fnmatch = False
|
||||
if _use_fnmatch:
|
||||
docs = [(fun, self.functions[fun].__doc__)
|
||||
for fun in fnmatch.filter(self.functions, target_mod)]
|
||||
|
|
|
@ -1049,8 +1049,7 @@ class Single(object):
|
|||
opts_pkg[u'id'],
|
||||
opts_pkg.get(u'environment', u'base')
|
||||
)
|
||||
pillar_dirs = {}
|
||||
pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
pillar_data = pillar.compile_pillar()
|
||||
|
||||
# TODO: cache minion opts in datap in master.py
|
||||
data = {u'opts': opts_pkg,
|
||||
|
|
|
@ -3432,34 +3432,7 @@ def list_nodes_full(location=None, call=None):
|
|||
'or --function.'
|
||||
)
|
||||
|
||||
if not location:
|
||||
ret = {}
|
||||
locations = set(
|
||||
get_location(vm_) for vm_ in six.itervalues(__opts__['profiles'])
|
||||
if _vm_provider_driver(vm_)
|
||||
)
|
||||
|
||||
# If there aren't any profiles defined for EC2, check
|
||||
# the provider config file, or use the default location.
|
||||
if not locations:
|
||||
locations = [get_location()]
|
||||
|
||||
for loc in locations:
|
||||
ret.update(_list_nodes_full(loc))
|
||||
return ret
|
||||
|
||||
return _list_nodes_full(location)
|
||||
|
||||
|
||||
def _vm_provider_driver(vm_):
|
||||
alias, driver = vm_['driver'].split(':')
|
||||
if alias not in __opts__['providers']:
|
||||
return None
|
||||
|
||||
if driver not in __opts__['providers'][alias]:
|
||||
return None
|
||||
|
||||
return driver == 'ec2'
|
||||
return _list_nodes_full(location or get_location())
|
||||
|
||||
|
||||
def _extract_name_tag(item):
|
||||
|
|
|
@ -728,12 +728,18 @@ def request_instance(vm_=None, call=None):
|
|||
|
||||
else:
|
||||
pool = floating_ip_conf.get('pool', 'public')
|
||||
for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
|
||||
if opts['fixed_ip'] is None and opts['pool'] == pool:
|
||||
floating_ip = fl_ip
|
||||
break
|
||||
if floating_ip is None:
|
||||
try:
|
||||
floating_ip = conn.floating_ip_create(pool)['ip']
|
||||
except Exception:
|
||||
log.info('A new IP address was unable to be allocated. '
|
||||
'An IP address will be pulled from the already allocated list, '
|
||||
'This will cause a race condition when building in parallel.')
|
||||
for fl_ip, opts in six.iteritems(conn.floating_ip_list()):
|
||||
if opts['fixed_ip'] is None and opts['pool'] == pool:
|
||||
floating_ip = fl_ip
|
||||
break
|
||||
if floating_ip is None:
|
||||
log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name']))
|
||||
|
||||
def __query_node_data(vm_):
|
||||
try:
|
||||
|
|
849
salt/cloud/clouds/oneandone.py
Normal file
849
salt/cloud/clouds/oneandone.py
Normal file
|
@ -0,0 +1,849 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
1&1 Cloud Server Module
|
||||
=======================
|
||||
|
||||
=======
|
||||
The 1&1 SaltStack cloud module allows a 1&1 server to
|
||||
be automatically deployed and bootstrapped with Salt.
|
||||
|
||||
:depends: 1and1 >= 1.2.0
|
||||
|
||||
The module requires the 1&1 api_token to be provided.
|
||||
The server should also be assigned a public LAN, a private LAN,
|
||||
or both along with SSH key pairs.
|
||||
...
|
||||
|
||||
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
|
||||
``/etc/salt/cloud.providers.d/oneandone.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-oneandone-config:
|
||||
driver: oneandone
|
||||
# The 1&1 api token
|
||||
api_token: <your-token>
|
||||
# SSH private key filename
|
||||
ssh_private_key: /path/to/private_key
|
||||
# SSH public key filename
|
||||
ssh_public_key: /path/to/public_key
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-oneandone-profile:
|
||||
provider: my-oneandone-config
|
||||
# Either provide fixed_instance_size_id or vcore, cores_per_processor, ram, and hdds.
|
||||
# Size of the ID desired for the server
|
||||
fixed_instance_size: S
|
||||
# Total amount of processors
|
||||
vcore: 2
|
||||
# Number of cores per processor
|
||||
cores_per_processor: 2
|
||||
# RAM memory size in GB
|
||||
ram: 4
|
||||
# Hard disks
|
||||
hdds:
|
||||
-
|
||||
is_main: true
|
||||
size: 20
|
||||
-
|
||||
is_main: false
|
||||
size: 20
|
||||
# ID of the appliance image that will be installed on server
|
||||
appliance_id: <ID>
|
||||
# ID of the datacenter where the server will be created
|
||||
datacenter_id: <ID>
|
||||
# Description of the server
|
||||
description: My server description
|
||||
# Password of the server. Password must contain more than 8 characters
|
||||
# using uppercase letters, numbers and other special symbols.
|
||||
password: P4$$w0rD
|
||||
# Power on server after creation - default True
|
||||
power_on: true
|
||||
# Firewall policy ID. If it is not provided, the server will assign
|
||||
# the best firewall policy, creating a new one if necessary.
|
||||
# If the parameter is sent with a 0 value, the server will be created with all ports blocked.
|
||||
firewall_policy_id: <ID>
|
||||
# IP address ID
|
||||
ip_id: <ID>
|
||||
# Load balancer ID
|
||||
load_balancer_id: <ID>
|
||||
# Monitoring policy ID
|
||||
monitoring_policy_id: <ID>
|
||||
|
||||
Set ``deploy`` to False if Salt should not be installed on the node.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-oneandone-profile:
|
||||
deploy: False
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
from salt.exceptions import (
|
||||
SaltCloudConfigError,
|
||||
SaltCloudNotFound,
|
||||
SaltCloudExecutionFailure,
|
||||
SaltCloudExecutionTimeout,
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Import salt.cloud libs
|
||||
import salt.utils.cloud
|
||||
from salt.ext import six
|
||||
|
||||
try:
|
||||
from oneandone.client import (
|
||||
OneAndOneService, Server, Hdd
|
||||
)
|
||||
HAS_ONEANDONE = True
|
||||
except ImportError:
|
||||
HAS_ONEANDONE = False
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'oneandone'
|
||||
|
||||
|
||||
# Only load in this module if the 1&1 configurations are in place
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for 1&1 configurations.
|
||||
'''
|
||||
if get_configured_provider() is False:
|
||||
return False
|
||||
|
||||
if get_dependencies() is False:
|
||||
return False
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def get_configured_provider():
|
||||
'''
|
||||
Return the first configured instance.
|
||||
'''
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('api_token',)
|
||||
)
|
||||
|
||||
|
||||
def get_dependencies():
|
||||
'''
|
||||
Warn if dependencies are not met.
|
||||
'''
|
||||
return config.check_driver_dependencies(
|
||||
__virtualname__,
|
||||
{'oneandone': HAS_ONEANDONE}
|
||||
)
|
||||
|
||||
|
||||
def get_conn():
|
||||
'''
|
||||
Return a conn object for the passed VM data
|
||||
'''
|
||||
return OneAndOneService(
|
||||
api_token=config.get_cloud_config_value(
|
||||
'api_token',
|
||||
get_configured_provider(),
|
||||
__opts__,
|
||||
search_global=False
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_size(vm_):
|
||||
'''
|
||||
Return the VM's size object
|
||||
'''
|
||||
vm_size = config.get_cloud_config_value(
|
||||
'fixed_instance_size', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
sizes = avail_sizes()
|
||||
|
||||
if not vm_size:
|
||||
size = next((item for item in sizes if item['name'] == 'S'), None)
|
||||
return size
|
||||
|
||||
size = next((item for item in sizes if item['name'] == vm_size or item['id'] == vm_size), None)
|
||||
if size:
|
||||
return size
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
'The specified size, \'{0}\', could not be found.'.format(vm_size)
|
||||
)
|
||||
|
||||
|
||||
def get_image(vm_):
|
||||
'''
|
||||
Return the image object to use
|
||||
'''
|
||||
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
|
||||
'ascii', 'salt-cloud-force-ascii'
|
||||
)
|
||||
|
||||
images = avail_images()
|
||||
for key, value in six.iteritems(images):
|
||||
if vm_image and vm_image in (images[key]['id'], images[key]['name']):
|
||||
return images[key]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
'The specified image, \'{0}\', could not be found.'.format(vm_image)
|
||||
)
|
||||
|
||||
|
||||
def avail_locations(conn=None, call=None):
|
||||
'''
|
||||
List available locations/datacenters for 1&1
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The avail_locations function must be called with '
|
||||
'-f or --function, or with the --list-locations option'
|
||||
)
|
||||
|
||||
datacenters = []
|
||||
|
||||
if not conn:
|
||||
conn = get_conn()
|
||||
|
||||
for datacenter in conn.list_datacenters():
|
||||
datacenters.append({datacenter['country_code']: datacenter})
|
||||
|
||||
return {'Locations': datacenters}
|
||||
|
||||
|
||||
def avail_images(conn=None, call=None):
|
||||
'''
|
||||
Return a list of the server appliances that are on the provider
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The avail_images function must be called with '
|
||||
'-f or --function, or with the --list-images option'
|
||||
)
|
||||
|
||||
if not conn:
|
||||
conn = get_conn()
|
||||
|
||||
ret = {}
|
||||
|
||||
for appliance in conn.list_appliances():
|
||||
ret[appliance['name']] = appliance
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def avail_sizes(call=None):
|
||||
'''
|
||||
Return a dict of all available VM sizes on the cloud provider with
|
||||
relevant data.
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The avail_sizes function must be called with '
|
||||
'-f or --function, or with the --list-sizes option'
|
||||
)
|
||||
|
||||
conn = get_conn()
|
||||
|
||||
sizes = conn.fixed_server_flavors()
|
||||
|
||||
return sizes
|
||||
|
||||
|
||||
def script(vm_):
|
||||
'''
|
||||
Return the script deployment object
|
||||
'''
|
||||
return salt.utils.cloud.os_script(
|
||||
config.get_cloud_config_value('script', vm_, __opts__),
|
||||
vm_,
|
||||
__opts__,
|
||||
salt.utils.cloud.salt_config_to_yaml(
|
||||
salt.utils.cloud.minion_config(__opts__, vm_)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def list_nodes(conn=None, call=None):
|
||||
'''
|
||||
Return a list of VMs that are on the provider
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes function must be called with -f or --function.'
|
||||
)
|
||||
|
||||
if not conn:
|
||||
conn = get_conn()
|
||||
|
||||
ret = {}
|
||||
nodes = conn.list_servers()
|
||||
|
||||
for node in nodes:
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
ret = {}
|
||||
|
||||
size = node.get('hardware').get('fixed_instance_size_id', 'Custom size')
|
||||
|
||||
if node.get('private_networks') and len(node['private_networks']) > 0:
|
||||
for private_ip in node['private_networks']:
|
||||
private_ips.append(private_ip)
|
||||
|
||||
if node.get('ips') and len(node['ips']) > 0:
|
||||
for public_ip in node['ips']:
|
||||
public_ips.append(public_ip['ip'])
|
||||
|
||||
server = {
|
||||
'id': node['id'],
|
||||
'image': node['image']['id'],
|
||||
'size': size,
|
||||
'state': node['status']['state'],
|
||||
'private_ips': private_ips,
|
||||
'public_ips': public_ips
|
||||
}
|
||||
ret[node['name']] = server
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_full(conn=None, call=None):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider, with all fields
|
||||
'''
|
||||
if call == 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The list_nodes_full function must be called with -f or '
|
||||
'--function.'
|
||||
)
|
||||
|
||||
if not conn:
|
||||
conn = get_conn()
|
||||
|
||||
ret = {}
|
||||
nodes = conn.list_servers()
|
||||
|
||||
for node in nodes:
|
||||
ret[node['name']] = node
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_select(conn=None, call=None):
|
||||
'''
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
'''
|
||||
if not conn:
|
||||
conn = get_conn()
|
||||
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
list_nodes_full(conn, 'function'),
|
||||
__opts__['query.selection'],
|
||||
call,
|
||||
)
|
||||
|
||||
|
||||
def show_instance(name, call=None):
|
||||
'''
|
||||
Show the details from the provider concerning an instance
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudSystemExit(
|
||||
'The show_instance action must be called with -a or --action.'
|
||||
)
|
||||
|
||||
nodes = list_nodes_full()
|
||||
__utils__['cloud.cache_node'](
|
||||
nodes[name],
|
||||
__active_provider_name__,
|
||||
__opts__
|
||||
)
|
||||
return nodes[name]
|
||||
|
||||
|
||||
def _get_server(vm_):
|
||||
'''
|
||||
Construct server instance from cloud profile config
|
||||
'''
|
||||
description = config.get_cloud_config_value(
|
||||
'description', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
ssh_key = load_public_key(vm_)
|
||||
|
||||
vcore = None
|
||||
cores_per_processor = None
|
||||
ram = None
|
||||
fixed_instance_size_id = None
|
||||
|
||||
if 'fixed_instance_size' in vm_:
|
||||
fixed_instance_size = get_size(vm_)
|
||||
fixed_instance_size_id = fixed_instance_size['id']
|
||||
elif (vm_['vcore'] and vm_['cores_per_processor'] and
|
||||
vm_['ram'] and vm_['hdds']):
|
||||
vcore = config.get_cloud_config_value(
|
||||
'vcore', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
cores_per_processor = config.get_cloud_config_value(
|
||||
'cores_per_processor', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
ram = config.get_cloud_config_value(
|
||||
'ram', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
else:
|
||||
raise SaltCloudConfigError("'fixed_instance_size' or 'vcore',"
|
||||
"'cores_per_processor', 'ram', and 'hdds'"
|
||||
"must be provided.")
|
||||
|
||||
appliance_id = config.get_cloud_config_value(
|
||||
'appliance_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
password = config.get_cloud_config_value(
|
||||
'password', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
firewall_policy_id = config.get_cloud_config_value(
|
||||
'firewall_policy_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
ip_id = config.get_cloud_config_value(
|
||||
'ip_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
load_balancer_id = config.get_cloud_config_value(
|
||||
'load_balancer_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
monitoring_policy_id = config.get_cloud_config_value(
|
||||
'monitoring_policy_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
datacenter_id = config.get_cloud_config_value(
|
||||
'datacenter_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
private_network_id = config.get_cloud_config_value(
|
||||
'private_network_id', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
power_on = config.get_cloud_config_value(
|
||||
'power_on', vm_, __opts__, default=True,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
# Contruct server object
|
||||
return Server(
|
||||
name=vm_['name'],
|
||||
description=description,
|
||||
fixed_instance_size_id=fixed_instance_size_id,
|
||||
vcore=vcore,
|
||||
cores_per_processor=cores_per_processor,
|
||||
ram=ram,
|
||||
appliance_id=appliance_id,
|
||||
password=password,
|
||||
power_on=power_on,
|
||||
firewall_policy_id=firewall_policy_id,
|
||||
ip_id=ip_id,
|
||||
load_balancer_id=load_balancer_id,
|
||||
monitoring_policy_id=monitoring_policy_id,
|
||||
datacenter_id=datacenter_id,
|
||||
rsa_key=ssh_key,
|
||||
private_network_id=private_network_id
|
||||
)
|
||||
|
||||
|
||||
def _get_hdds(vm_):
|
||||
'''
|
||||
Construct VM hdds from cloud profile config
|
||||
'''
|
||||
_hdds = config.get_cloud_config_value(
|
||||
'hdds', vm_, __opts__, default=None,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
hdds = []
|
||||
|
||||
for hdd in _hdds:
|
||||
hdds.append(
|
||||
Hdd(
|
||||
size=hdd['size'],
|
||||
is_main=hdd['is_main']
|
||||
)
|
||||
)
|
||||
|
||||
return hdds
|
||||
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
Create a single VM from a data dict
|
||||
'''
|
||||
try:
|
||||
# Check for required profile parameters before sending any API calls.
|
||||
if (vm_['profile'] and
|
||||
config.is_profile_configured(__opts__,
|
||||
(__active_provider_name__ or
|
||||
'oneandone'),
|
||||
vm_['profile']) is False):
|
||||
return False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
data = None
|
||||
conn = get_conn()
|
||||
hdds = []
|
||||
|
||||
# Assemble the composite server object.
|
||||
server = _get_server(vm_)
|
||||
|
||||
if not bool(server.specs['hardware']['fixed_instance_size_id']):
|
||||
# Assemble the hdds object.
|
||||
hdds = _get_hdds(vm_)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_['name']),
|
||||
args={'name': vm_['name']},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
try:
|
||||
data = conn.create_server(server=server, hdds=hdds)
|
||||
|
||||
_wait_for_completion(conn,
|
||||
get_wait_timeout(vm_),
|
||||
data['id'])
|
||||
except Exception as exc: # pylint: disable=W0703
|
||||
log.error(
|
||||
'Error creating {0} on 1and1\n\n'
|
||||
'The following exception was thrown by the 1and1 library '
|
||||
'when trying to run the initial deployment: \n{1}'.format(
|
||||
vm_['name'], exc
|
||||
),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
return False
|
||||
|
||||
vm_['server_id'] = data['id']
|
||||
password = data['first_password']
|
||||
|
||||
def __query_node_data(vm_, data):
|
||||
'''
|
||||
Query node data until node becomes available.
|
||||
'''
|
||||
running = False
|
||||
try:
|
||||
data = show_instance(vm_['name'], 'action')
|
||||
if not data:
|
||||
return False
|
||||
log.debug(
|
||||
'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format(
|
||||
vm_['name'],
|
||||
pprint.pformat(data['name']),
|
||||
data['status']['state']
|
||||
)
|
||||
)
|
||||
except Exception as err:
|
||||
log.error(
|
||||
'Failed to get nodes list: {0}'.format(
|
||||
err
|
||||
),
|
||||
# Show the trackback if the debug logging level is enabled
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
# Trigger a failure in the wait for IP function
|
||||
return False
|
||||
|
||||
running = data['status']['state'].lower() == 'powered_on'
|
||||
if not running:
|
||||
# Still not running, trigger another iteration
|
||||
return
|
||||
|
||||
vm_['ssh_host'] = data['ips'][0]['ip']
|
||||
|
||||
return data
|
||||
|
||||
try:
|
||||
data = salt.utils.cloud.wait_for_ip(
|
||||
__query_node_data,
|
||||
update_args=(vm_, data),
|
||||
timeout=config.get_cloud_config_value(
|
||||
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
|
||||
interval=config.get_cloud_config_value(
|
||||
'wait_for_ip_interval', vm_, __opts__, default=10),
|
||||
)
|
||||
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
|
||||
try:
|
||||
# It might be already up, let's destroy it!
|
||||
destroy(vm_['name'])
|
||||
except SaltCloudSystemExit:
|
||||
pass
|
||||
finally:
|
||||
raise SaltCloudSystemExit(str(exc.message))
|
||||
|
||||
log.debug('VM is now running')
|
||||
log.info('Created Cloud VM {0}'.format(vm_))
|
||||
log.debug(
|
||||
'{0} VM creation details:\n{1}'.format(
|
||||
vm_, pprint.pformat(data)
|
||||
)
|
||||
)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created instance',
|
||||
'salt/cloud/{0}/created'.format(vm_['name']),
|
||||
args={
|
||||
'name': vm_['name'],
|
||||
'profile': vm_['profile'],
|
||||
'provider': vm_['driver'],
|
||||
},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
if 'ssh_host' in vm_:
|
||||
vm_['password'] = password
|
||||
vm_['key_filename'] = get_key_filename(vm_)
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
ret.update(data)
|
||||
return ret
|
||||
else:
|
||||
raise SaltCloudSystemExit('A valid IP address was not found.')
|
||||
|
||||
|
||||
def destroy(name, call=None):
|
||||
'''
|
||||
destroy a server by name
|
||||
|
||||
:param name: name given to the server
|
||||
:param call: call value in this case is 'action'
|
||||
:return: array of booleans , true if successfully stopped and true if
|
||||
successfully removed
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -d vm_name
|
||||
|
||||
'''
|
||||
if call == 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The destroy action must be called with -d, --destroy, '
|
||||
'-a or --action.'
|
||||
)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
conn = get_conn()
|
||||
node = get_node(conn, name)
|
||||
|
||||
conn.delete_server(server_id=node['id'])
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=__opts__['sock_dir'],
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
if __opts__.get('update_cachedir', False) is True:
|
||||
__utils__['cloud.delete_minion_cachedir'](
|
||||
name,
|
||||
__active_provider_name__.split(':')[0],
|
||||
__opts__
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def reboot(name, call=None):
|
||||
'''
|
||||
reboot a server by name
|
||||
:param name: name given to the machine
|
||||
:param call: call value in this case is 'action'
|
||||
:return: true if successful
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a reboot vm_name
|
||||
'''
|
||||
conn = get_conn()
|
||||
node = get_node(conn, name)
|
||||
|
||||
conn.modify_server_status(server_id=node['id'], action='REBOOT')
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def stop(name, call=None):
|
||||
'''
|
||||
stop a server by name
|
||||
:param name: name given to the machine
|
||||
:param call: call value in this case is 'action'
|
||||
:return: true if successful
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a stop vm_name
|
||||
'''
|
||||
conn = get_conn()
|
||||
node = get_node(conn, name)
|
||||
|
||||
conn.stop_server(server_id=node['id'])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def start(name, call=None):
|
||||
'''
|
||||
start a server by name
|
||||
:param name: name given to the machine
|
||||
:param call: call value in this case is 'action'
|
||||
:return: true if successful
|
||||
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a start vm_name
|
||||
'''
|
||||
conn = get_conn()
|
||||
node = get_node(conn, name)
|
||||
|
||||
conn.start_server(server_id=node['id'])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_node(conn, name):
|
||||
'''
|
||||
Return a node for the named VM
|
||||
'''
|
||||
for node in conn.list_servers(per_page=1000):
|
||||
if node['name'] == name:
|
||||
return node
|
||||
|
||||
|
||||
def get_key_filename(vm_):
|
||||
'''
|
||||
Check SSH private key file and return absolute path if exists.
|
||||
'''
|
||||
key_filename = config.get_cloud_config_value(
|
||||
'ssh_private_key', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
if key_filename is not None:
|
||||
key_filename = os.path.expanduser(key_filename)
|
||||
if not os.path.isfile(key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
'The defined ssh_private_key \'{0}\' does not exist'.format(
|
||||
key_filename
|
||||
)
|
||||
)
|
||||
|
||||
return key_filename
|
||||
|
||||
|
||||
def load_public_key(vm_):
|
||||
'''
|
||||
Load the public key file if exists.
|
||||
'''
|
||||
public_key_filename = config.get_cloud_config_value(
|
||||
'ssh_public_key', vm_, __opts__, search_global=False, default=None
|
||||
)
|
||||
if public_key_filename is not None:
|
||||
public_key_filename = os.path.expanduser(public_key_filename)
|
||||
if not os.path.isfile(public_key_filename):
|
||||
raise SaltCloudConfigError(
|
||||
'The defined ssh_public_key \'{0}\' does not exist'.format(
|
||||
public_key_filename
|
||||
)
|
||||
)
|
||||
|
||||
with salt.utils.fopen(public_key_filename, 'r') as public_key:
|
||||
key = public_key.read().replace('\n', '')
|
||||
|
||||
return key
|
||||
|
||||
|
||||
def get_wait_timeout(vm_):
|
||||
'''
|
||||
Return the wait_for_timeout for resource provisioning.
|
||||
'''
|
||||
return config.get_cloud_config_value(
|
||||
'wait_for_timeout', vm_, __opts__, default=15 * 60,
|
||||
search_global=False
|
||||
)
|
||||
|
||||
|
||||
def _wait_for_completion(conn, wait_timeout, server_id):
|
||||
'''
|
||||
Poll request status until resource is provisioned.
|
||||
'''
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time():
|
||||
time.sleep(5)
|
||||
|
||||
server = conn.get_server(server_id)
|
||||
server_state = server['status']['state'].lower()
|
||||
|
||||
if server_state == "powered_on":
|
||||
return
|
||||
elif server_state == 'failed':
|
||||
raise Exception('Server creation failed for {0}'.format(server_id))
|
||||
elif server_state in ('active',
|
||||
'enabled',
|
||||
'deploying',
|
||||
'configuring'):
|
||||
continue
|
||||
else:
|
||||
raise Exception(
|
||||
'Unknown server state {0}'.format(server_state))
|
||||
raise Exception(
|
||||
'Timed out waiting for server create completion for {0}'.format(server_id)
|
||||
)
|
|
@ -17,8 +17,16 @@ from __future__ import absolute_import
|
|||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
from salt.exceptions import SaltCloudException
|
||||
import salt.netapi
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -47,28 +55,188 @@ def __virtual__():
|
|||
return True
|
||||
|
||||
|
||||
def list_nodes():
|
||||
def _get_connection_info():
|
||||
'''
|
||||
Because this module is not specific to any cloud providers, there will be
|
||||
no nodes to list.
|
||||
Return connection information for the passed VM data
|
||||
'''
|
||||
vm_ = get_configured_provider()
|
||||
|
||||
try:
|
||||
ret = {'username': vm_['username'],
|
||||
'password': vm_['password'],
|
||||
'eauth': vm_['eauth'],
|
||||
'vm': vm_,
|
||||
}
|
||||
except KeyError:
|
||||
raise SaltCloudException(
|
||||
'Configuration must define salt-api "username", "password" and "eauth"')
|
||||
return ret
|
||||
|
||||
|
||||
def avail_locations(call=None):
|
||||
'''
|
||||
This function returns a list of locations available.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-locations my-cloud-provider
|
||||
|
||||
[ saltify will always returns an empty dictionary ]
|
||||
'''
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def avail_images(call=None):
|
||||
'''
|
||||
This function returns a list of images available for this cloud provider.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-images saltify
|
||||
|
||||
returns a list of available profiles.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
|
||||
'''
|
||||
vm_ = get_configured_provider()
|
||||
return {'Profiles': [profile for profile in vm_['profiles']]}
|
||||
|
||||
|
||||
def avail_sizes(call=None):
|
||||
'''
|
||||
This function returns a list of sizes available for this cloud provider.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-sizes saltify
|
||||
|
||||
[ saltify always returns an empty dictionary ]
|
||||
'''
|
||||
return {}
|
||||
|
||||
|
||||
def list_nodes_full():
|
||||
def list_nodes(call=None):
|
||||
'''
|
||||
Because this module is not specific to any cloud providers, there will be
|
||||
no nodes to list.
|
||||
List the nodes which have salt-cloud:driver:saltify grains.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -Q
|
||||
|
||||
returns a list of dictionaries of defined standard fields.
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
|
||||
'''
|
||||
return {}
|
||||
nodes = _list_nodes_full(call)
|
||||
return _build_required_items(nodes)
|
||||
|
||||
|
||||
def list_nodes_select():
|
||||
def _build_required_items(nodes):
|
||||
ret = {}
|
||||
for name, grains in nodes.items():
|
||||
if grains:
|
||||
private_ips = []
|
||||
public_ips = []
|
||||
ips = grains['ipv4'] + grains['ipv6']
|
||||
for adrs in ips:
|
||||
ip_ = ipaddress.ip_address(adrs)
|
||||
if not ip_.is_loopback:
|
||||
if ip_.is_private:
|
||||
private_ips.append(adrs)
|
||||
else:
|
||||
public_ips.append(adrs)
|
||||
|
||||
ret[name] = {
|
||||
'id': grains['id'],
|
||||
'image': grains['salt-cloud']['profile'],
|
||||
'private_ips': private_ips,
|
||||
'public_ips': public_ips,
|
||||
'size': '',
|
||||
'state': 'running'
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_full(call=None):
|
||||
'''
|
||||
Because this module is not specific to any cloud providers, there will be
|
||||
no nodes to list.
|
||||
Lists complete information for all nodes.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -F
|
||||
|
||||
returns a list of dictionaries.
|
||||
for 'saltify' minions, returns dict of grains (enhanced).
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
'''
|
||||
return {}
|
||||
|
||||
ret = _list_nodes_full(call)
|
||||
|
||||
for key, grains in ret.items(): # clean up some hyperverbose grains -- everything is too much
|
||||
try:
|
||||
del grains['cpu_flags'], grains['disks'], grains['pythonpath'], grains['dns'], grains['gpus']
|
||||
except KeyError:
|
||||
pass # ignore absence of things we are eliminating
|
||||
except TypeError:
|
||||
del ret[key] # eliminate all reference to unexpected (None) values.
|
||||
|
||||
reqs = _build_required_items(ret)
|
||||
|
||||
for name in ret:
|
||||
ret[name].update(reqs[name])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _list_nodes_full(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'saltify' minions, return dict of grains.
|
||||
'''
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': 'salt-cloud:driver:saltify',
|
||||
'fun': 'grains.items',
|
||||
'arg': '',
|
||||
'tgt_type': 'grain',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
|
||||
return local.run(cmd)
|
||||
|
||||
|
||||
def list_nodes_select(call=None):
|
||||
'''
|
||||
Return a list of the minions that have salt-cloud grains, with
|
||||
select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
||||
def show_instance(name, call=None):
|
||||
'''
|
||||
List the a single node, return dict of grains.
|
||||
'''
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': 'name',
|
||||
'fun': 'grains.items',
|
||||
'arg': '',
|
||||
'tgt_type': 'glob',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
ret = local.run(cmd)
|
||||
ret.update(_build_required_items(ret))
|
||||
return ret
|
||||
|
||||
|
||||
def create(vm_):
|
||||
|
@ -190,3 +358,130 @@ def _verify(vm_):
|
|||
except SaltCloudException as exc:
|
||||
log.error('Exception: %s', exc)
|
||||
return False
|
||||
|
||||
|
||||
def destroy(name, call=None):
|
||||
''' Destroy a node.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --destroy mymachine
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
'''
|
||||
if call == 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The destroy action must be called with -d, --destroy, '
|
||||
'-a, or --action.'
|
||||
)
|
||||
|
||||
opts = __opts__
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
|
||||
local = salt.netapi.NetapiClient(opts)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': name,
|
||||
'fun': 'grains.get',
|
||||
'arg': ['salt-cloud'],
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
vm_ = cmd['vm']
|
||||
my_info = local.run(cmd)
|
||||
try:
|
||||
vm_.update(my_info[name]) # get profile name to get config value
|
||||
except (IndexError, TypeError):
|
||||
pass
|
||||
if config.get_cloud_config_value(
|
||||
'remove_config_on_destroy', vm_, opts, default=True
|
||||
):
|
||||
cmd.update({'fun': 'service.disable', 'arg': ['salt-minion']})
|
||||
ret = local.run(cmd) # prevent generating new keys on restart
|
||||
if ret and ret[name]:
|
||||
log.info('disabled salt-minion service on %s', name)
|
||||
cmd.update({'fun': 'config.get', 'arg': ['conf_file']})
|
||||
ret = local.run(cmd)
|
||||
if ret and ret[name]:
|
||||
confile = ret[name]
|
||||
cmd.update({'fun': 'file.remove', 'arg': [confile]})
|
||||
ret = local.run(cmd)
|
||||
if ret and ret[name]:
|
||||
log.info('removed minion %s configuration file %s',
|
||||
name, confile)
|
||||
cmd.update({'fun': 'config.get', 'arg': ['pki_dir']})
|
||||
ret = local.run(cmd)
|
||||
if ret and ret[name]:
|
||||
pki_dir = ret[name]
|
||||
cmd.update({'fun': 'file.remove', 'arg': [pki_dir]})
|
||||
ret = local.run(cmd)
|
||||
if ret and ret[name]:
|
||||
log.info(
|
||||
'removed minion %s key files in %s',
|
||||
name,
|
||||
pki_dir)
|
||||
|
||||
if config.get_cloud_config_value(
|
||||
'shutdown_on_destroy', vm_, opts, default=False
|
||||
):
|
||||
cmd.update({'fun': 'system.shutdown', 'arg': ''})
|
||||
ret = local.run(cmd)
|
||||
if ret and ret[name]:
|
||||
log.info('system.shutdown for minion %s successful', name)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
|
||||
return {'Destroyed': '{0} was destroyed.'.format(name)}
|
||||
|
||||
|
||||
def reboot(name, call=None):
|
||||
'''
|
||||
Reboot a saltify minion.
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
|
||||
name
|
||||
The name of the VM to reboot.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a reboot vm_name
|
||||
'''
|
||||
|
||||
if call != 'action':
|
||||
raise SaltCloudException(
|
||||
'The reboot action must be called with -a or --action.'
|
||||
)
|
||||
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': name,
|
||||
'fun': 'system.reboot',
|
||||
'arg': '',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
ret = local.run(cmd)
|
||||
|
||||
return ret
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -719,6 +719,10 @@ VALID_OPTS = {
|
|||
# same module used for external authentication.
|
||||
'eauth_acl_module': str,
|
||||
|
||||
# Subsystem to use to maintain eauth tokens. By default, tokens are stored on the local
|
||||
# filesystem
|
||||
'eauth_tokens': str,
|
||||
|
||||
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
|
||||
# higher than the system default in order to account for the way zeromq consumes file handles.
|
||||
'max_open_files': int,
|
||||
|
@ -1469,8 +1473,9 @@ DEFAULT_MASTER_OPTS = {
|
|||
'syndic_forward_all_events': False,
|
||||
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
|
||||
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
|
||||
'runner_dirs': [],
|
||||
'outputter_dirs': [],
|
||||
'runner_dirs': [],
|
||||
'utils_dirs': [],
|
||||
'client_acl_verify': True,
|
||||
'publisher_acl': {},
|
||||
'publisher_acl_blacklist': {},
|
||||
|
@ -1480,6 +1485,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'token_expire_user_override': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'eauth_tokens': 'localfs',
|
||||
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
|
||||
'file_recv': False,
|
||||
'file_recv_max_size': 100,
|
||||
|
@ -1663,7 +1669,8 @@ DEFAULT_PROXY_MINION_OPTS = {
|
|||
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'proxy'),
|
||||
'add_proxymodule_to_opts': False,
|
||||
'proxy_merge_grains_in_module': True,
|
||||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include'],
|
||||
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'proxy', 'extmods'),
|
||||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
|
@ -3225,12 +3232,12 @@ def is_profile_configured(opts, provider, profile_name, vm_=None):
|
|||
alias, driver = provider.split(':')
|
||||
|
||||
# Most drivers need an image to be specified, but some do not.
|
||||
non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer']
|
||||
non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone']
|
||||
|
||||
# Most drivers need a size, but some do not.
|
||||
non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',
|
||||
'softlayer', 'softlayer_hw', 'vmware', 'vsphere',
|
||||
'virtualbox', 'profitbricks', 'libvirt']
|
||||
'virtualbox', 'profitbricks', 'libvirt', 'oneandone']
|
||||
|
||||
provider_key = opts['providers'][alias][driver]
|
||||
profile_key = opts['providers'][alias][driver]['profiles'][profile_name]
|
||||
|
@ -3595,12 +3602,23 @@ def apply_master_config(overrides=None, defaults=None):
|
|||
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
|
||||
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
|
||||
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Make sure ext_mods gets set if it is an untrue value
|
||||
# (here to catch older bad configs)
|
||||
opts['extension_modules'] = (
|
||||
opts.get('extension_modules') or
|
||||
os.path.join(opts['cachedir'], 'extmods')
|
||||
)
|
||||
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
|
||||
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
|
||||
# Set up the utils_dirs location from the extension_modules location
|
||||
opts['utils_dirs'] = (
|
||||
opts.get('utils_dirs') or
|
||||
[os.path.join(opts['extension_modules'], 'utils')]
|
||||
)
|
||||
|
||||
# Insert all 'utils_dirs' directories to the system path
|
||||
insert_system_path(opts, opts['utils_dirs'])
|
||||
|
||||
if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic':
|
||||
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
|
||||
if 'ipc_write_buffer' not in overrides:
|
||||
|
|
|
@ -12,7 +12,6 @@ import os
|
|||
import re
|
||||
import time
|
||||
import stat
|
||||
import msgpack
|
||||
|
||||
# Import salt libs
|
||||
import salt.crypt
|
||||
|
@ -33,6 +32,8 @@ import salt.utils.atomicfile
|
|||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
import salt.utils.gitfs
|
||||
import salt.utils.verify
|
||||
import salt.utils.minions
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
|
@ -65,44 +66,19 @@ def init_git_pillar(opts):
|
|||
ret = []
|
||||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
if isinstance(opts_dict['git'], six.string_types):
|
||||
# Legacy git pillar code
|
||||
try:
|
||||
import git
|
||||
except ImportError:
|
||||
return ret
|
||||
parts = opts_dict['git'].strip().split()
|
||||
try:
|
||||
br = parts[0]
|
||||
loc = parts[1]
|
||||
except IndexError:
|
||||
log.critical(
|
||||
'Unable to extract external pillar data: {0}'
|
||||
.format(opts_dict['git'])
|
||||
)
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
ret.append(
|
||||
git_pillar._LegacyGitPillar(
|
||||
br,
|
||||
loc,
|
||||
opts
|
||||
)
|
||||
)
|
||||
else:
|
||||
# New git_pillar code
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
raise
|
||||
else:
|
||||
log.critical('Could not initialize git_pillar')
|
||||
log.critical('Could not initialize git_pillar')
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -152,22 +128,11 @@ def clean_expired_tokens(opts):
|
|||
'''
|
||||
Clean expired tokens from the master
|
||||
'''
|
||||
serializer = salt.payload.Serial(opts)
|
||||
for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']):
|
||||
for token in filenames:
|
||||
token_path = os.path.join(dirpath, token)
|
||||
with salt.utils.files.fopen(token_path, 'rb') as token_file:
|
||||
try:
|
||||
token_data = serializer.loads(token_file.read())
|
||||
except msgpack.UnpackValueError:
|
||||
# Bad token file or empty. Remove.
|
||||
os.remove(token_path)
|
||||
return
|
||||
if 'expire' not in token_data or token_data.get('expire', 0) < time.time():
|
||||
try:
|
||||
os.remove(token_path)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
loadauth = salt.auth.LoadAuth(opts)
|
||||
for tok in loadauth.list_tokens():
|
||||
token_data = loadauth.get_tok(tok)
|
||||
if 'expire' not in token_data or token_data.get('expire', 0) < time.time():
|
||||
loadauth.rm_token(tok)
|
||||
|
||||
|
||||
def clean_pub_auth(opts):
|
||||
|
@ -717,8 +682,7 @@ class RemoteFuncs(object):
|
|||
load.get('ext'),
|
||||
self.mminion.functions,
|
||||
pillar_override=load.get('pillar_override', {}))
|
||||
pillar_dirs = {}
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
data = pillar.compile_pillar()
|
||||
if self.opts.get('minion_data_cache', False):
|
||||
self.cache.store('minions/{0}'.format(load['id']),
|
||||
'data',
|
||||
|
@ -1071,7 +1035,7 @@ class LocalFuncs(object):
|
|||
'for user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, load['fun']):
|
||||
if not self.ckminions.runner_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.').format(auth_type, username)))
|
||||
|
@ -1127,7 +1091,7 @@ class LocalFuncs(object):
|
|||
'user {0}.').format(username)))
|
||||
|
||||
if auth_type != 'user':
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun']):
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(auth_type, username)))
|
||||
|
|
|
@ -640,6 +640,13 @@ class Client(object):
|
|||
|
||||
def on_header(hdr):
|
||||
if write_body[1] is not False and write_body[2] is None:
|
||||
if not hdr.strip() and 'Content-Type' not in write_body[1]:
|
||||
# We've reached the end of the headers and not yet
|
||||
# found the Content-Type. Reset the values we're
|
||||
# tracking so that we properly follow the redirect.
|
||||
write_body[0] = None
|
||||
write_body[1] = False
|
||||
return
|
||||
# Try to find out what content type encoding is used if
|
||||
# this is a text file
|
||||
write_body[1].parse_line(hdr) # pylint: disable=no-member
|
||||
|
|
|
@ -503,7 +503,7 @@ class Key(object):
|
|||
if minion not in minions and minion not in preserve_minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.ls(self.ACC)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
|
@ -981,7 +981,7 @@ class RaetKey(Key):
|
|||
if minion not in minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.ls(self.ACC)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
|
|
|
@ -82,14 +82,11 @@ else:
|
|||
# which simplifies code readability, it adds some unsupported functions into
|
||||
# the driver's module scope.
|
||||
# We list un-supported functions here. These will be removed from the loaded.
|
||||
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
|
||||
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
|
||||
u'parallels.avail_sizes',
|
||||
u'parallels.avail_locations',
|
||||
u'proxmox.avail_sizes',
|
||||
u'saltify.destroy',
|
||||
u'saltify.avail_sizes',
|
||||
u'saltify.avail_images',
|
||||
u'saltify.avail_locations',
|
||||
u'rackspace.reboot',
|
||||
u'openstack.list_locations',
|
||||
u'rackspace.list_locations'
|
||||
|
@ -418,6 +415,19 @@ def serializers(opts):
|
|||
)
|
||||
|
||||
|
||||
def eauth_tokens(opts):
|
||||
'''
|
||||
Returns the tokens modules
|
||||
:param dict opts: The Salt options dictionary
|
||||
:returns: LazyLoader instance, with only token backends present in the keyspace
|
||||
'''
|
||||
return LazyLoader(
|
||||
_module_dirs(opts, 'tokens'),
|
||||
opts,
|
||||
tag='tokens',
|
||||
)
|
||||
|
||||
|
||||
def auth(opts, whitelist=None):
|
||||
'''
|
||||
Returns the auth modules
|
||||
|
|
|
@ -315,7 +315,7 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
|||
'''
|
||||
try:
|
||||
for pillar in self.git_pillar:
|
||||
pillar.update()
|
||||
pillar.fetch_remotes()
|
||||
except Exception as exc:
|
||||
log.error(u'Exception caught while updating git_pillar',
|
||||
exc_info=True)
|
||||
|
@ -471,18 +471,18 @@ class Master(SMaster):
|
|||
pass
|
||||
|
||||
if self.opts.get(u'git_pillar_verify_config', True):
|
||||
non_legacy_git_pillars = [
|
||||
git_pillars = [
|
||||
x for x in self.opts.get(u'ext_pillar', [])
|
||||
if u'git' in x
|
||||
and not isinstance(x[u'git'], six.string_types)
|
||||
]
|
||||
if non_legacy_git_pillars:
|
||||
if git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
|
||||
PER_REMOTE_ONLY as per_remote_only
|
||||
for repo in non_legacy_git_pillars:
|
||||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
|
@ -1304,7 +1304,6 @@ class AESFuncs(object):
|
|||
return False
|
||||
load[u'grains'][u'id'] = load[u'id']
|
||||
|
||||
pillar_dirs = {}
|
||||
pillar = salt.pillar.get_pillar(
|
||||
self.opts,
|
||||
load[u'grains'],
|
||||
|
@ -1313,7 +1312,7 @@ class AESFuncs(object):
|
|||
ext=load.get(u'ext'),
|
||||
pillar_override=load.get(u'pillar_override', {}),
|
||||
pillarenv=load.get(u'pillarenv'))
|
||||
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
|
||||
data = pillar.compile_pillar()
|
||||
self.fs_.update_opts()
|
||||
if self.opts.get(u'minion_data_cache', False):
|
||||
self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']),
|
||||
|
@ -1684,7 +1683,7 @@ class ClearFuncs(object):
|
|||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun']):
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
|
@ -1697,7 +1696,7 @@ class ClearFuncs(object):
|
|||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun']):
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
@ -1751,7 +1750,7 @@ class ClearFuncs(object):
|
|||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun']):
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
|
@ -1764,7 +1763,7 @@ class ClearFuncs(object):
|
|||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun']):
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
|
|
@ -1275,7 +1275,7 @@ class Minion(MinionBase):
|
|||
ret = yield channel.send(load, timeout=timeout)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
||||
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
|
||||
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
|
||||
'''
|
||||
Fire an event on the master, or drop message if unable to send.
|
||||
'''
|
||||
|
@ -1294,10 +1294,6 @@ class Minion(MinionBase):
|
|||
else:
|
||||
return
|
||||
|
||||
def timeout_handler(*_):
|
||||
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
|
||||
return True
|
||||
|
||||
if sync:
|
||||
try:
|
||||
self._send_req_sync(load, timeout)
|
||||
|
@ -1308,6 +1304,12 @@ class Minion(MinionBase):
|
|||
log.info(u'fire_master failed: %s', traceback.format_exc())
|
||||
return False
|
||||
else:
|
||||
if timeout_handler is None:
|
||||
def handle_timeout(*_):
|
||||
log.info(u'fire_master failed: master could not be contacted. Request timed out.')
|
||||
return True
|
||||
timeout_handler = handle_timeout
|
||||
|
||||
with tornado.stack_context.ExceptionStackContext(timeout_handler):
|
||||
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
|
||||
return True
|
||||
|
@ -1453,13 +1455,21 @@ class Minion(MinionBase):
|
|||
function_name = data[u'fun']
|
||||
if function_name in minion_instance.functions:
|
||||
try:
|
||||
minion_blackout_violation = False
|
||||
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar
|
||||
if function_name != u'saltutil.refresh_pillar' and \
|
||||
function_name not in minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []):
|
||||
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
|
||||
u'to False in pillar to resume operations. Only '
|
||||
u'saltutil.refresh_pillar allowed in blackout mode.')
|
||||
whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', [])
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
|
||||
if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist:
|
||||
minion_blackout_violation = True
|
||||
elif minion_instance.opts[u'grains'].get(u'minion_blackout', False):
|
||||
whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', [])
|
||||
if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist:
|
||||
minion_blackout_violation = True
|
||||
if minion_blackout_violation:
|
||||
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
|
||||
u'to False in pillar or grains to resume operations. Only '
|
||||
u'saltutil.refresh_pillar allowed in blackout mode.')
|
||||
|
||||
func = minion_instance.functions[function_name]
|
||||
args, kwargs = load_args_and_kwargs(
|
||||
func,
|
||||
|
@ -1622,14 +1632,23 @@ class Minion(MinionBase):
|
|||
for ind in range(0, len(data[u'fun'])):
|
||||
ret[u'success'][data[u'fun'][ind]] = False
|
||||
try:
|
||||
minion_blackout_violation = False
|
||||
if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False):
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar
|
||||
if data[u'fun'][ind] != u'saltutil.refresh_pillar' and \
|
||||
data[u'fun'][ind] not in minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []):
|
||||
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
|
||||
u'to False in pillar to resume operations. Only '
|
||||
u'saltutil.refresh_pillar allowed in blackout mode.')
|
||||
whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', [])
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
|
||||
if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist:
|
||||
minion_blackout_violation = True
|
||||
elif minion_instance.opts[u'grains'].get(u'minion_blackout', False):
|
||||
whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', [])
|
||||
if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist:
|
||||
minion_blackout_violation = True
|
||||
if minion_blackout_violation:
|
||||
raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' '
|
||||
u'to False in pillar or grains to resume operations. Only '
|
||||
u'saltutil.refresh_pillar allowed in blackout mode.')
|
||||
|
||||
func = minion_instance.functions[data[u'fun'][ind]]
|
||||
|
||||
args, kwargs = load_args_and_kwargs(
|
||||
func,
|
||||
data[u'arg'][ind],
|
||||
|
@ -2010,8 +2029,9 @@ class Minion(MinionBase):
|
|||
elif tag.startswith(u'_minion_mine'):
|
||||
self._mine_send(tag, data)
|
||||
elif tag.startswith(u'fire_master'):
|
||||
log.debug(u'Forwarding master event tag=%s', data[u'tag'])
|
||||
self._fire_master(data[u'data'], data[u'tag'], data[u'events'], data[u'pretag'])
|
||||
if self.connected:
|
||||
log.debug(u'Forwarding master event tag=%s', data[u'tag'])
|
||||
self._fire_master(data[u'data'], data[u'tag'], data[u'events'], data[u'pretag'])
|
||||
elif tag.startswith(master_event(type=u'disconnected')) or tag.startswith(master_event(type=u'failback')):
|
||||
# if the master disconnect event is for a different master, raise an exception
|
||||
if tag.startswith(master_event(type=u'disconnected')) and data[u'master'] != self.opts[u'master']:
|
||||
|
@ -2232,13 +2252,15 @@ class Minion(MinionBase):
|
|||
if ping_interval > 0 and self.connected:
|
||||
def ping_master():
|
||||
try:
|
||||
if not self._fire_master(u'ping', u'minion_ping'):
|
||||
def ping_timeout_handler(*_):
|
||||
if not self.opts.get(u'auth_safemode', True):
|
||||
log.error(u'** Master Ping failed. Attempting to restart minion**')
|
||||
delay = self.opts.get(u'random_reauth_delay', 5)
|
||||
log.info(u'delaying random_reauth_delay %ss', delay)
|
||||
# regular sys.exit raises an exception -- which isn't sufficient in a thread
|
||||
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
|
||||
|
||||
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
|
||||
except Exception:
|
||||
log.warning(u'Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
|
||||
self.periodic_callbacks[u'ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
|
||||
|
@ -2253,7 +2275,7 @@ class Minion(MinionBase):
|
|||
except Exception:
|
||||
log.critical(u'The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons)
|
||||
self._fire_master(events=beacons, sync=False)
|
||||
|
||||
self.periodic_callbacks[u'beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
|
|
|
@ -2699,11 +2699,11 @@ def request_vpc_peering_connection(requester_vpc_id=None, requester_vpc_name=Non
|
|||
Name tag of the requesting VPC. Exclusive with requester_vpc_id.
|
||||
|
||||
peer_vpc_id
|
||||
ID of the VPC tp crete VPC peering connection with. This can be a VPC in
|
||||
ID of the VPC to create VPC peering connection with. This can be a VPC in
|
||||
another account. Exclusive with peer_vpc_name.
|
||||
|
||||
peer_vpc_name
|
||||
Name tag of the VPC tp crete VPC peering connection with. This can only
|
||||
Name tag of the VPC to create VPC peering connection with. This can only
|
||||
be a VPC in the same account, else resolving it into a vpc ID will almost
|
||||
certainly fail. Exclusive with peer_vpc_id.
|
||||
|
||||
|
|
|
@ -60,7 +60,36 @@ def _gather_pillar(pillarenv, pillar_override):
|
|||
return ret
|
||||
|
||||
|
||||
def recv(dest, chunk, append=False, compressed=True, mode=None):
|
||||
def recv(files, dest):
|
||||
'''
|
||||
Used with salt-cp, pass the files dict, and the destination.
|
||||
|
||||
This function receives small fast copy files from the master via salt-cp.
|
||||
It does not work via the CLI.
|
||||
'''
|
||||
ret = {}
|
||||
for path, data in six.iteritems(files):
|
||||
if os.path.basename(path) == os.path.basename(dest) \
|
||||
and not os.path.isdir(dest):
|
||||
final = dest
|
||||
elif os.path.isdir(dest):
|
||||
final = os.path.join(dest, os.path.basename(path))
|
||||
elif os.path.isdir(os.path.dirname(dest)):
|
||||
final = dest
|
||||
else:
|
||||
return 'Destination unavailable'
|
||||
|
||||
try:
|
||||
with salt.utils.fopen(final, 'w+') as fp_:
|
||||
fp_.write(data)
|
||||
ret[final] = True
|
||||
except IOError:
|
||||
ret[final] = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def recv_chunked(dest, chunk, append=False, compressed=True, mode=None):
|
||||
'''
|
||||
This function receives files copied to the minion using ``salt-cp`` and is
|
||||
not intended to be used directly on the CLI.
|
||||
|
|
|
@ -801,7 +801,7 @@ def get_client_args():
|
|||
|
||||
salt myminion docker.get_client_args
|
||||
'''
|
||||
return salt.utils.docker.get_client_args()
|
||||
return __utils__['docker.get_client_args']()
|
||||
|
||||
|
||||
def _get_create_kwargs(image,
|
||||
|
@ -3848,7 +3848,6 @@ def save(name,
|
|||
if os.path.exists(path) and not overwrite:
|
||||
raise CommandExecutionError('{0} already exists'.format(path))
|
||||
|
||||
compression = kwargs.get('compression')
|
||||
if compression is None:
|
||||
if path.endswith('.tar.gz') or path.endswith('.tgz'):
|
||||
compression = 'gzip'
|
||||
|
@ -3954,7 +3953,7 @@ def save(name,
|
|||
ret['Size_Human'] = _size_fmt(ret['Size'])
|
||||
|
||||
# Process push
|
||||
if kwargs.get(push, False):
|
||||
if kwargs.get('push', False):
|
||||
ret['Push'] = __salt__['cp.push'](path)
|
||||
|
||||
return ret
|
||||
|
|
|
@ -73,7 +73,7 @@ def __virtual__():
|
|||
'python etcd library not available.')
|
||||
|
||||
|
||||
def get_(key, recurse=False, profile=None):
|
||||
def get_(key, recurse=False, profile=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -86,15 +86,16 @@ def get_(key, recurse=False, profile=None):
|
|||
salt myminion etcd.get /path/to/key
|
||||
salt myminion etcd.get /path/to/key profile=my_etcd_config
|
||||
salt myminion etcd.get /path/to/key recurse=True profile=my_etcd_config
|
||||
salt myminion etcd.get /path/to/key host=127.0.0.1 port=2379
|
||||
'''
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
if recurse:
|
||||
return client.tree(key)
|
||||
else:
|
||||
return client.get(key, recurse=recurse)
|
||||
|
||||
|
||||
def set_(key, value, profile=None, ttl=None, directory=False):
|
||||
def set_(key, value, profile=None, ttl=None, directory=False, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -107,15 +108,16 @@ def set_(key, value, profile=None, ttl=None, directory=False):
|
|||
|
||||
salt myminion etcd.set /path/to/key value
|
||||
salt myminion etcd.set /path/to/key value profile=my_etcd_config
|
||||
salt myminion etcd.set /path/to/key value host=127.0.0.1 port=2379
|
||||
salt myminion etcd.set /path/to/dir '' directory=True
|
||||
salt myminion etcd.set /path/to/key value ttl=5
|
||||
'''
|
||||
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.set(key, value, ttl=ttl, directory=directory)
|
||||
|
||||
|
||||
def update(fields, path='', profile=None):
|
||||
def update(fields, path='', profile=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
|
@ -162,13 +164,14 @@ def update(fields, path='', profile=None):
|
|||
|
||||
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}"
|
||||
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" profile=my_etcd_config
|
||||
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" host=127.0.0.1 port=2379
|
||||
salt myminion etcd.update "{'/path/to/key': 'baz', '/another/key': 'bar'}" path='/some/root'
|
||||
'''
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.update(fields, path)
|
||||
|
||||
|
||||
def watch(key, recurse=False, profile=None, timeout=0, index=None):
|
||||
def watch(key, recurse=False, profile=None, timeout=0, index=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
|
@ -186,13 +189,14 @@ def watch(key, recurse=False, profile=None, timeout=0, index=None):
|
|||
salt myminion etcd.watch /path/to/key
|
||||
salt myminion etcd.watch /path/to/key timeout=10
|
||||
salt myminion etcd.watch /patch/to/key profile=my_etcd_config index=10
|
||||
salt myminion etcd.watch /patch/to/key host=127.0.0.1 port=2379
|
||||
'''
|
||||
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.watch(key, recurse=recurse, timeout=timeout, index=index)
|
||||
|
||||
|
||||
def ls_(path='/', profile=None):
|
||||
def ls_(path='/', profile=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -206,12 +210,13 @@ def ls_(path='/', profile=None):
|
|||
|
||||
salt myminion etcd.ls /path/to/dir/
|
||||
salt myminion etcd.ls /path/to/dir/ profile=my_etcd_config
|
||||
salt myminion etcd.ls /path/to/dir/ host=127.0.0.1 port=2379
|
||||
'''
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.ls(path)
|
||||
|
||||
|
||||
def rm_(key, recurse=False, profile=None):
|
||||
def rm_(key, recurse=False, profile=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -225,13 +230,14 @@ def rm_(key, recurse=False, profile=None):
|
|||
|
||||
salt myminion etcd.rm /path/to/key
|
||||
salt myminion etcd.rm /path/to/key profile=my_etcd_config
|
||||
salt myminion etcd.rm /path/to/key host=127.0.0.1 port=2379
|
||||
salt myminion etcd.rm /path/to/dir recurse=True profile=my_etcd_config
|
||||
'''
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.rm(key, recurse=recurse)
|
||||
|
||||
|
||||
def tree(path='/', profile=None):
|
||||
def tree(path='/', profile=None, **kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
|
@ -244,7 +250,8 @@ def tree(path='/', profile=None):
|
|||
|
||||
salt myminion etcd.tree
|
||||
salt myminion etcd.tree profile=my_etcd_config
|
||||
salt myminion etcd.tree host=127.0.0.1 port=2379
|
||||
salt myminion etcd.tree /path/to/keys profile=my_etcd_config
|
||||
'''
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile)
|
||||
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
|
||||
return client.tree(path)
|
||||
|
|
|
@ -501,8 +501,11 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None,
|
|||
after_jump.append('--{0} {1}'.format(after_jump_argument, value))
|
||||
del kwargs[after_jump_argument]
|
||||
|
||||
for key, value in kwargs.items():
|
||||
for key in kwargs:
|
||||
negation = maybe_add_negation(key)
|
||||
# don't use .items() since maybe_add_negation removes the prefix from
|
||||
# the value in the kwargs, thus we need to fetch it after that has run
|
||||
value = kwargs[key]
|
||||
flag = '-' if len(key) == 1 else '--'
|
||||
value = '' if value in (None, '') else ' {0}'.format(value)
|
||||
rule.append('{0}{1}{2}{3}'.format(negation, flag, key, value))
|
||||
|
|
|
@ -37,7 +37,7 @@ import salt.utils.files
|
|||
|
||||
# Import 3rd-party libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
# pylint: enable=import-error,no-name-in-module
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -89,9 +89,22 @@ def _connect():
|
|||
password=jenkins_password)
|
||||
|
||||
|
||||
def _retrieve_config_xml(config_xml, saltenv):
|
||||
'''
|
||||
Helper to cache the config XML and raise a CommandExecutionError if we fail
|
||||
to do so. If we successfully cache the file, return the cached path.
|
||||
'''
|
||||
ret = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
|
||||
if not ret:
|
||||
raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def run(script):
|
||||
'''
|
||||
.. versionadded:: Carbon
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Execute a groovy script on the jenkins master
|
||||
|
||||
|
@ -166,7 +179,7 @@ def job_exists(name=None):
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
if server.job_exists(name):
|
||||
|
@ -190,12 +203,12 @@ def get_job_info(name=None):
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exist.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
job_info = server.get_job_info(name)
|
||||
if job_info:
|
||||
|
@ -219,17 +232,19 @@ def build_job(name=None, parameters=None):
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exist.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name))
|
||||
|
||||
try:
|
||||
server.build_job(name, parameters)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error building job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -254,15 +269,15 @@ def create_job(name=None,
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
if job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` already exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' already exists'.format(name))
|
||||
|
||||
if not config_xml:
|
||||
config_xml = jenkins.EMPTY_CONFIG_XML
|
||||
else:
|
||||
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
config_xml_file = _retrieve_config_xml(config_xml, saltenv)
|
||||
|
||||
with salt.utils.files.fopen(config_xml_file) as _fp:
|
||||
config_xml = _fp.read()
|
||||
|
@ -271,7 +286,9 @@ def create_job(name=None,
|
|||
try:
|
||||
server.create_job(name, config_xml)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error creating job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return config_xml
|
||||
|
||||
|
||||
|
@ -296,12 +313,12 @@ def update_job(name=None,
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
if not config_xml:
|
||||
config_xml = jenkins.EMPTY_CONFIG_XML
|
||||
else:
|
||||
config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv)
|
||||
config_xml_file = _retrieve_config_xml(config_xml, saltenv)
|
||||
|
||||
with salt.utils.files.fopen(config_xml_file) as _fp:
|
||||
config_xml = _fp.read()
|
||||
|
@ -310,7 +327,9 @@ def update_job(name=None,
|
|||
try:
|
||||
server.reconfig_job(name, config_xml)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error updating job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return config_xml
|
||||
|
||||
|
||||
|
@ -329,17 +348,19 @@ def delete_job(name=None):
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.delete_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error deleting job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -358,17 +379,19 @@ def enable_job(name=None):
|
|||
|
||||
'''
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.enable_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error enabling job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -388,17 +411,19 @@ def disable_job(name=None):
|
|||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
try:
|
||||
server.disable_job(name)
|
||||
except jenkins.JenkinsException as err:
|
||||
raise SaltInvocationError('Something went wrong {0}.'.format(err))
|
||||
raise CommandExecutionError(
|
||||
'Encountered error disabling job \'{0}\': {1}'.format(name, err)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -418,12 +443,12 @@ def job_status(name=None):
|
|||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
return server.get_job_info('empty')['buildable']
|
||||
|
||||
|
@ -444,12 +469,12 @@ def get_job_config(name=None):
|
|||
'''
|
||||
|
||||
if not name:
|
||||
raise SaltInvocationError('Required parameter `name` is missing.')
|
||||
raise SaltInvocationError('Required parameter \'name\' is missing')
|
||||
|
||||
server = _connect()
|
||||
|
||||
if not job_exists(name):
|
||||
raise SaltInvocationError('Job `{0}` does not exists.'.format(name))
|
||||
raise CommandExecutionError('Job \'{0}\' does not exist'.format(name))
|
||||
|
||||
job_info = server.get_job_config(name)
|
||||
return job_info
|
||||
|
|
|
@ -7,12 +7,12 @@ import functools
|
|||
import logging
|
||||
import re
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
|
||||
try:
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
from salt.ext.six.moves import filter # pylint: disable=import-error,redefined-builtin
|
||||
from salt.exceptions import CommandExecutionError
|
||||
HAS_REQUIRED_LIBS = True
|
||||
except ImportError:
|
||||
HAS_REQUIRED_LIBS = False
|
||||
|
@ -116,9 +116,9 @@ def latest_installed():
|
|||
.. note::
|
||||
|
||||
This function may not return the same value as
|
||||
:py:func:`~salt.modules.kernelpkg.active` if a new kernel
|
||||
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
|
||||
has been installed and the system has not yet been rebooted.
|
||||
The :py:func:`~salt.modules.kernelpkg.needs_reboot` function
|
||||
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
|
||||
exists to detect this condition.
|
||||
'''
|
||||
pkgs = list_installed()
|
||||
|
@ -200,6 +200,69 @@ def upgrade_available():
|
|||
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
|
||||
|
||||
|
||||
def remove(release):
|
||||
'''
|
||||
Remove a specific version of the kernel.
|
||||
|
||||
release
|
||||
The release number of an installed kernel. This must be the entire release
|
||||
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
|
||||
not the package name.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' kernelpkg.remove 4.4.0-70-generic
|
||||
'''
|
||||
if release not in list_installed():
|
||||
raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release))
|
||||
|
||||
if release == active():
|
||||
raise CommandExecutionError('Active kernel cannot be removed')
|
||||
|
||||
target = '{0}-{1}'.format(_package_prefix(), release)
|
||||
log.info('Removing kernel package {0}'.format(target))
|
||||
|
||||
__salt__['pkg.purge'](target)
|
||||
|
||||
return {'removed': [target]}
|
||||
|
||||
|
||||
def cleanup(keep_latest=True):
|
||||
'''
|
||||
Remove all unused kernel packages from the system.
|
||||
|
||||
keep_latest : True
|
||||
In the event that the active kernel is not the latest one installed, setting this to True
|
||||
will retain the latest kernel package, in addition to the active one. If False, all kernel
|
||||
packages other than the active one will be removed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' kernelpkg.cleanup
|
||||
'''
|
||||
removed = []
|
||||
|
||||
# Loop over all installed kernel packages
|
||||
for kernel in list_installed():
|
||||
|
||||
# Keep the active kernel package
|
||||
if kernel == active():
|
||||
continue
|
||||
|
||||
# Optionally keep the latest kernel package
|
||||
if keep_latest and kernel == latest_installed():
|
||||
continue
|
||||
|
||||
# Remove the kernel package
|
||||
removed.extend(remove(kernel)['removed'])
|
||||
|
||||
return {'removed': removed}
|
||||
|
||||
|
||||
def _package_prefix():
|
||||
'''
|
||||
Return static string for the package prefix
|
||||
|
@ -218,11 +281,11 @@ def _cmp_version(item1, item2):
|
|||
'''
|
||||
Compare function for package version sorting
|
||||
'''
|
||||
v1 = _LooseVersion(item1)
|
||||
v2 = _LooseVersion(item2)
|
||||
vers1 = _LooseVersion(item1)
|
||||
vers2 = _LooseVersion(item2)
|
||||
|
||||
if v1 < v2:
|
||||
if vers1 < vers2:
|
||||
return -1
|
||||
if v1 > v2:
|
||||
if vers1 > vers2:
|
||||
return 1
|
||||
return 0
|
||||
|
|
|
@ -6,11 +6,13 @@ from __future__ import absolute_import
|
|||
import functools
|
||||
import logging
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
|
||||
try:
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils.systemd
|
||||
import salt.modules.yumpkg
|
||||
HAS_REQUIRED_LIBS = True
|
||||
except ImportError:
|
||||
HAS_REQUIRED_LIBS = False
|
||||
|
@ -20,6 +22,9 @@ log = logging.getLogger(__name__)
|
|||
# Define the module's virtual name
|
||||
__virtualname__ = 'kernelpkg'
|
||||
|
||||
# Import functions from yumpkg
|
||||
_yum = salt.utils.namespaced_function(salt.modules.yumpkg._yum, globals()) # pylint: disable=invalid-name, protected-access
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -103,9 +108,9 @@ def latest_installed():
|
|||
.. note::
|
||||
|
||||
This function may not return the same value as
|
||||
:py:func:`~salt.modules.kernelpkg.active` if a new kernel
|
||||
:py:func:`~salt.modules.kernelpkg_linux_yum.active` if a new kernel
|
||||
has been installed and the system has not yet been rebooted.
|
||||
The :py:func:`~salt.modules.kernelpkg.needs_reboot` function
|
||||
The :py:func:`~salt.modules.kernelpkg_linux_yum.needs_reboot` function
|
||||
exists to detect this condition.
|
||||
'''
|
||||
pkgs = list_installed()
|
||||
|
@ -186,6 +191,94 @@ def upgrade_available():
|
|||
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
|
||||
|
||||
|
||||
def remove(release):
|
||||
'''
|
||||
Remove a specific version of the kernel.
|
||||
|
||||
release
|
||||
The release number of an installed kernel. This must be the entire release
|
||||
number as returned by :py:func:`~salt.modules.kernelpkg_linux_yum.list_installed`,
|
||||
not the package name.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' kernelpkg.remove 3.10.0-327.el7
|
||||
'''
|
||||
if release not in list_installed():
|
||||
raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release))
|
||||
|
||||
if release == active():
|
||||
raise CommandExecutionError('Active kernel cannot be removed')
|
||||
|
||||
target = '{0}-{1}'.format(_package_name(), release)
|
||||
log.info('Removing kernel package {0}'.format(target))
|
||||
old = __salt__['pkg.list_pkgs']()
|
||||
|
||||
# Build the command string
|
||||
cmd = []
|
||||
if salt.utils.systemd.has_scope(__context__) \
|
||||
and __salt__['config.get']('systemd.scope', True):
|
||||
cmd.extend(['systemd-run', '--scope'])
|
||||
cmd.extend([_yum(), '-y', 'remove', target])
|
||||
|
||||
# Execute the command
|
||||
out = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
|
||||
# Look for the changes in installed packages
|
||||
__context__.pop('pkg.list_pkgs', None)
|
||||
new = __salt__['pkg.list_pkgs']()
|
||||
ret = salt.utils.compare_dicts(old, new)
|
||||
|
||||
# Look for command execution errors
|
||||
if out['retcode'] != 0:
|
||||
raise CommandExecutionError(
|
||||
'Error occurred removing package(s)',
|
||||
info={'errors': [out['stderr']], 'changes': ret}
|
||||
)
|
||||
|
||||
return {'removed': [target]}
|
||||
|
||||
|
||||
def cleanup(keep_latest=True):
|
||||
'''
|
||||
Remove all unused kernel packages from the system.
|
||||
|
||||
keep_latest : True
|
||||
In the event that the active kernel is not the latest one installed, setting this to True
|
||||
will retain the latest kernel package, in addition to the active one. If False, all kernel
|
||||
packages other than the active one will be removed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' kernelpkg.cleanup
|
||||
'''
|
||||
removed = []
|
||||
|
||||
# Loop over all installed kernel packages
|
||||
for kernel in list_installed():
|
||||
|
||||
# Keep the active kernel package
|
||||
if kernel == active():
|
||||
continue
|
||||
|
||||
# Optionally keep the latest kernel package
|
||||
if keep_latest and kernel == latest_installed():
|
||||
continue
|
||||
|
||||
# Remove the kernel package
|
||||
removed.extend(remove(kernel)['removed'])
|
||||
|
||||
return {'removed': removed}
|
||||
|
||||
|
||||
def _package_name():
|
||||
'''
|
||||
Return static string for the package name
|
||||
|
@ -197,11 +290,11 @@ def _cmp_version(item1, item2):
|
|||
'''
|
||||
Compare function for package version sorting
|
||||
'''
|
||||
v1 = _LooseVersion(item1)
|
||||
v2 = _LooseVersion(item2)
|
||||
vers1 = _LooseVersion(item1)
|
||||
vers2 = _LooseVersion(item2)
|
||||
|
||||
if v1 < v2:
|
||||
if vers1 < vers2:
|
||||
return -1
|
||||
if v1 > v2:
|
||||
if vers1 > vers2:
|
||||
return 1
|
||||
return 0
|
||||
|
|
|
@ -51,20 +51,18 @@ try:
|
|||
import kubernetes.client
|
||||
from kubernetes.client.rest import ApiException
|
||||
from urllib3.exceptions import HTTPError
|
||||
try:
|
||||
# There is an API change in Kubernetes >= 2.0.0.
|
||||
from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment
|
||||
from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec
|
||||
except ImportError:
|
||||
from kubernetes.client import AppsV1beta1Deployment
|
||||
from kubernetes.client import AppsV1beta1DeploymentSpec
|
||||
|
||||
HAS_LIBS = True
|
||||
except ImportError:
|
||||
HAS_LIBS = False
|
||||
|
||||
try:
|
||||
# There is an API change in Kubernetes >= 2.0.0.
|
||||
from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment
|
||||
from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec
|
||||
except ImportError:
|
||||
from kubernetes.client import AppsV1beta1Deployment
|
||||
from kubernetes.client import AppsV1beta1DeploymentSpec
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'kubernetes'
|
||||
|
|
|
@ -430,7 +430,7 @@ def lvcreate(lvname,
|
|||
cmd.extend(extra_arguments)
|
||||
|
||||
if force:
|
||||
cmd.append('-yes')
|
||||
cmd.append('--yes')
|
||||
|
||||
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||
lvdev = '/dev/{0}/{1}'.format(vgname, lvname)
|
||||
|
|
|
@ -25,6 +25,9 @@ Module to provide MS SQL Server compatibility to salt.
|
|||
from __future__ import absolute_import
|
||||
from json import JSONEncoder, loads
|
||||
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
try:
|
||||
import pymssql
|
||||
HAS_ALL_IMPORTS = True
|
||||
|
@ -127,6 +130,37 @@ def db_exists(database_name, **kwargs):
|
|||
return len(tsql_query("SELECT database_id FROM sys.databases WHERE NAME='{0}'".format(database_name), **kwargs)) == 1
|
||||
|
||||
|
||||
def db_create(database, containment='NONE', new_database_options=None, **kwargs):
|
||||
'''
|
||||
Creates a new database.
|
||||
Does not update options of existing databases.
|
||||
new_database_options can only be a list of strings
|
||||
|
||||
CLI Example:
|
||||
.. code-block:: bash
|
||||
salt minion mssql.db_create DB_NAME
|
||||
'''
|
||||
if containment not in ['NONE', 'PARTIAL']:
|
||||
return 'CONTAINMENT can be one of NONE and PARTIAL'
|
||||
sql = "CREATE DATABASE [{0}] CONTAINMENT = {1} ".format(database, containment)
|
||||
if new_database_options:
|
||||
sql += ' WITH ' + ', '.join(new_database_options)
|
||||
conn = None
|
||||
try:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
# cur = conn.cursor()
|
||||
# cur.execute(sql)
|
||||
conn.cursor().execute(sql)
|
||||
except Exception as e:
|
||||
return 'Could not create the login: {0}'.format(e)
|
||||
finally:
|
||||
if conn:
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
|
||||
|
||||
def db_remove(database_name, **kwargs):
|
||||
'''
|
||||
Drops a specific database from the MS SQL server.
|
||||
|
@ -183,31 +217,41 @@ def role_exists(role, **kwargs):
|
|||
return len(tsql_query(query='sp_helprole "{0}"'.format(role), as_dict=True, **kwargs)) == 1
|
||||
|
||||
|
||||
def role_create(role, owner=None, **kwargs):
|
||||
def role_create(role, owner=None, grants=None, **kwargs):
|
||||
'''
|
||||
Creates a new database role.
|
||||
If no owner is specified, the role will be owned by the user that
|
||||
executes CREATE ROLE, which is the user argument or mssql.user option.
|
||||
grants is list of strings.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt minion mssql.role_create role=product01 owner=sysdba
|
||||
salt minion mssql.role_create role=product01 owner=sysdba grants='["SELECT", "INSERT", "UPDATE", "DELETE", "EXECUTE"]'
|
||||
'''
|
||||
if not grants:
|
||||
grants = []
|
||||
|
||||
sql = 'CREATE ROLE {0}'.format(role)
|
||||
if owner:
|
||||
sql += ' AUTHORIZATION {0}'.format(owner)
|
||||
conn = None
|
||||
try:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
cur = conn.cursor()
|
||||
if owner:
|
||||
cur.execute('CREATE ROLE {0} AUTHORIZATION {1}'.format(role, owner))
|
||||
else:
|
||||
cur.execute('CREATE ROLE {0}'.format(role))
|
||||
conn.autocommit(True)
|
||||
conn.close()
|
||||
return True
|
||||
# cur = conn.cursor()
|
||||
# cur.execute(sql)
|
||||
conn.cursor().execute(sql)
|
||||
for grant in grants:
|
||||
conn.cursor().execute('GRANT {0} TO [{1}]'.format(grant, role))
|
||||
except Exception as e:
|
||||
return 'Could not create the role: {0}'.format(e)
|
||||
finally:
|
||||
if conn:
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
|
||||
|
||||
def role_remove(role, **kwargs):
|
||||
|
@ -232,9 +276,10 @@ def role_remove(role, **kwargs):
|
|||
return 'Could not create the role: {0}'.format(e)
|
||||
|
||||
|
||||
def login_exists(login, **kwargs):
|
||||
def login_exists(login, domain='', **kwargs):
|
||||
'''
|
||||
Find if a login exists in the MS SQL server.
|
||||
domain, if provided, will be prepended to login
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -242,6 +287,8 @@ def login_exists(login, **kwargs):
|
|||
|
||||
salt minion mssql.login_exists 'LOGIN'
|
||||
'''
|
||||
if domain:
|
||||
login = '{0}\\{1}'.format(domain, login)
|
||||
try:
|
||||
# We should get one, and only one row
|
||||
return len(tsql_query(query="SELECT name FROM sys.syslogins WHERE name='{0}'".format(login), **kwargs)) == 1
|
||||
|
@ -250,12 +297,87 @@ def login_exists(login, **kwargs):
|
|||
return 'Could not find the login: {0}'.format(e)
|
||||
|
||||
|
||||
def user_exists(username, **kwargs):
|
||||
def login_create(login, new_login_password=None, new_login_domain='', new_login_roles=None, new_login_options=None, **kwargs):
|
||||
'''
|
||||
Creates a new login.
|
||||
Does not update password of existing logins.
|
||||
For Windows authentication, provide new_login_domain.
|
||||
For SQL Server authentication, prvide new_login_password.
|
||||
Since hashed passwords are varbinary values, if the
|
||||
new_login_password is 'int / long', it will be considered
|
||||
to be HASHED.
|
||||
new_login_roles can only be a list of SERVER roles
|
||||
new_login_options can only be a list of strings
|
||||
|
||||
CLI Example:
|
||||
.. code-block:: bash
|
||||
salt minion mssql.login_create LOGIN_NAME database=DBNAME [new_login_password=PASSWORD]
|
||||
'''
|
||||
# One and only one of password and domain should be specifies
|
||||
if bool(new_login_password) == bool(new_login_domain):
|
||||
return False
|
||||
if login_exists(login, new_login_domain, **kwargs):
|
||||
return False
|
||||
if new_login_domain:
|
||||
login = '{0}\\{1}'.format(new_login_domain, login)
|
||||
if not new_login_roles:
|
||||
new_login_roles = []
|
||||
if not new_login_options:
|
||||
new_login_options = []
|
||||
|
||||
sql = "CREATE LOGIN [{0}] ".format(login)
|
||||
if new_login_domain:
|
||||
sql += " FROM WINDOWS "
|
||||
elif isinstance(new_login_password, six.integer_types):
|
||||
new_login_options.insert(0, "PASSWORD=0x{0:x} HASHED".format(new_login_password))
|
||||
else: # Plain test password
|
||||
new_login_options.insert(0, "PASSWORD=N'{0}'".format(new_login_password))
|
||||
if new_login_options:
|
||||
sql += ' WITH ' + ', '.join(new_login_options)
|
||||
conn = None
|
||||
try:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
# cur = conn.cursor()
|
||||
# cur.execute(sql)
|
||||
conn.cursor().execute(sql)
|
||||
for role in new_login_roles:
|
||||
conn.cursor().execute('ALTER SERVER ROLE [{0}] ADD MEMBER [{1}]'.format(role, login))
|
||||
except Exception as e:
|
||||
return 'Could not create the login: {0}'.format(e)
|
||||
finally:
|
||||
if conn:
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
|
||||
|
||||
def login_remove(login, **kwargs):
|
||||
'''
|
||||
Removes an login.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt minion mssql.login_remove LOGINNAME
|
||||
'''
|
||||
try:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
cur = conn.cursor()
|
||||
cur.execute("DROP LOGIN [{0}]".format(login))
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
except Exception as e:
|
||||
return 'Could not remove the login: {0}'.format(e)
|
||||
|
||||
|
||||
def user_exists(username, domain='', database=None, **kwargs):
|
||||
'''
|
||||
Find if an user exists in a specific database on the MS SQL server.
|
||||
|
||||
Note:
|
||||
*database* argument is mandatory
|
||||
domain, if provided, will be prepended to username
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -263,10 +385,10 @@ def user_exists(username, **kwargs):
|
|||
|
||||
salt minion mssql.user_exists 'USERNAME' [database='DBNAME']
|
||||
'''
|
||||
# 'database' argument is mandatory
|
||||
if 'database' not in kwargs:
|
||||
return False
|
||||
|
||||
if domain:
|
||||
username = '{0}\\{1}'.format(domain, username)
|
||||
if database:
|
||||
kwargs['database'] = database
|
||||
# We should get one, and only one row
|
||||
return len(tsql_query(query="SELECT name FROM sysusers WHERE name='{0}'".format(username), **kwargs)) == 1
|
||||
|
||||
|
@ -284,42 +406,57 @@ def user_list(**kwargs):
|
|||
return [row[0] for row in tsql_query("SELECT name FROM sysusers where issqluser=1 or isntuser=1", as_dict=False, **kwargs)]
|
||||
|
||||
|
||||
def user_create(username, new_login_password=None, **kwargs):
|
||||
def user_create(username, login=None, domain='', database=None, roles=None, options=None, **kwargs):
|
||||
'''
|
||||
Creates a new user.
|
||||
If new_login_password is not specified, the user will be created without a login.
|
||||
If login is not specified, the user will be created without a login.
|
||||
domain, if provided, will be prepended to username.
|
||||
options can only be a list of strings
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt minion mssql.user_create USERNAME database=DBNAME [new_login_password=PASSWORD]
|
||||
salt minion mssql.user_create USERNAME database=DBNAME
|
||||
'''
|
||||
# 'database' argument is mandatory
|
||||
if 'database' not in kwargs:
|
||||
return False
|
||||
if user_exists(username, **kwargs):
|
||||
return False
|
||||
if domain and not login:
|
||||
return 'domain cannot be set without login'
|
||||
if user_exists(username, domain, **kwargs):
|
||||
return 'User {0} already exists'.format(username)
|
||||
if domain:
|
||||
username = '{0}\\{1}'.format(domain, username)
|
||||
login = '{0}\\{1}'.format(domain, login) if login else login
|
||||
if database:
|
||||
kwargs['database'] = database
|
||||
if not roles:
|
||||
roles = []
|
||||
if not options:
|
||||
options = []
|
||||
|
||||
sql = "CREATE USER [{0}] ".format(username)
|
||||
if login:
|
||||
# If the login does not exist, user creation will throw
|
||||
# if not login_exists(name, **kwargs):
|
||||
# return False
|
||||
sql += " FOR LOGIN [{0}]".format(login)
|
||||
else: # Plain test password
|
||||
sql += " WITHOUT LOGIN"
|
||||
if options:
|
||||
sql += ' WITH ' + ', '.join(options)
|
||||
conn = None
|
||||
try:
|
||||
conn = _get_connection(**kwargs)
|
||||
conn.autocommit(True)
|
||||
cur = conn.cursor()
|
||||
|
||||
if new_login_password:
|
||||
if login_exists(username, **kwargs):
|
||||
conn.close()
|
||||
return False
|
||||
cur.execute("CREATE LOGIN {0} WITH PASSWORD='{1}',check_policy = off".format(username, new_login_password))
|
||||
cur.execute("CREATE USER {0} FOR LOGIN {1}".format(username, username))
|
||||
else: # new_login_password is not specified
|
||||
cur.execute("CREATE USER {0} WITHOUT LOGIN".format(username))
|
||||
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
# cur = conn.cursor()
|
||||
# cur.execute(sql)
|
||||
conn.cursor().execute(sql)
|
||||
for role in roles:
|
||||
conn.cursor().execute('ALTER ROLE [{0}] ADD MEMBER [{1}]'.format(role, username))
|
||||
except Exception as e:
|
||||
return 'Could not create the user: {0}'.format(e)
|
||||
finally:
|
||||
if conn:
|
||||
conn.autocommit(False)
|
||||
conn.close()
|
||||
return True
|
||||
|
||||
|
||||
def user_remove(username, **kwargs):
|
||||
|
|
|
@ -152,7 +152,8 @@ Optional small program to encrypt data without needing salt modules.
|
|||
from __future__ import absolute_import
|
||||
import base64
|
||||
import os
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_functions
|
||||
import salt.utils.win_dacl
|
||||
import salt.syspaths
|
||||
|
@ -203,7 +204,7 @@ def _get_sk(**kwargs):
|
|||
key = config['sk']
|
||||
sk_file = config['sk_file']
|
||||
if not key and sk_file:
|
||||
with salt.utils.fopen(sk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
|
||||
key = str(keyf.read()).rstrip('\n')
|
||||
if key is None:
|
||||
raise Exception('no key or sk_file found')
|
||||
|
@ -218,7 +219,7 @@ def _get_pk(**kwargs):
|
|||
pubkey = config['pk']
|
||||
pk_file = config['pk_file']
|
||||
if not pubkey and pk_file:
|
||||
with salt.utils.fopen(pk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'rb') as keyf:
|
||||
pubkey = str(keyf.read()).rstrip('\n')
|
||||
if pubkey is None:
|
||||
raise Exception('no pubkey or pk_file found')
|
||||
|
@ -256,9 +257,9 @@ def keygen(sk_file=None, pk_file=None):
|
|||
if sk_file and pk_file is None:
|
||||
if not os.path.isfile(sk_file):
|
||||
kp = libnacl.public.SecretKey()
|
||||
with salt.utils.fopen(sk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.sk))
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.platform.is_windows():
|
||||
cur_user = salt.utils.win_functions.get_current_user()
|
||||
salt.utils.win_dacl.set_owner(sk_file, cur_user)
|
||||
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
|
||||
|
@ -277,25 +278,25 @@ def keygen(sk_file=None, pk_file=None):
|
|||
|
||||
if os.path.isfile(sk_file) and not os.path.isfile(pk_file):
|
||||
# generate pk using the sk
|
||||
with salt.utils.fopen(sk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
|
||||
sk = str(keyf.read()).rstrip('\n')
|
||||
sk = base64.b64decode(sk)
|
||||
kp = libnacl.public.SecretKey(sk)
|
||||
with salt.utils.fopen(pk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.pk))
|
||||
return 'saved pk_file: {0}'.format(pk_file)
|
||||
|
||||
kp = libnacl.public.SecretKey()
|
||||
with salt.utils.fopen(sk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.sk))
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.platform.is_windows():
|
||||
cur_user = salt.utils.win_functions.get_current_user()
|
||||
salt.utils.win_dacl.set_owner(sk_file, cur_user)
|
||||
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
|
||||
else:
|
||||
# chmod 0600 file
|
||||
os.chmod(sk_file, 1536)
|
||||
with salt.utils.fopen(pk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.pk))
|
||||
return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file)
|
||||
|
||||
|
@ -335,13 +336,13 @@ def enc_file(name, out=None, **kwargs):
|
|||
data = __salt__['cp.get_file_str'](name)
|
||||
except Exception as e:
|
||||
# likly using salt-run so fallback to local filesystem
|
||||
with salt.utils.fopen(name, 'rb') as f:
|
||||
with salt.utils.files.fopen(name, 'rb') as f:
|
||||
data = f.read()
|
||||
d = enc(data, **kwargs)
|
||||
if out:
|
||||
if os.path.isfile(out):
|
||||
raise Exception('file:{0} already exist.'.format(out))
|
||||
with salt.utils.fopen(out, 'wb') as f:
|
||||
with salt.utils.files.fopen(out, 'wb') as f:
|
||||
f.write(d)
|
||||
return 'Wrote: {0}'.format(out)
|
||||
return d
|
||||
|
@ -382,13 +383,13 @@ def dec_file(name, out=None, **kwargs):
|
|||
data = __salt__['cp.get_file_str'](name)
|
||||
except Exception as e:
|
||||
# likly using salt-run so fallback to local filesystem
|
||||
with salt.utils.fopen(name, 'rb') as f:
|
||||
with salt.utils.files.fopen(name, 'rb') as f:
|
||||
data = f.read()
|
||||
d = dec(data, **kwargs)
|
||||
if out:
|
||||
if os.path.isfile(out):
|
||||
raise Exception('file:{0} already exist.'.format(out))
|
||||
with salt.utils.fopen(out, 'wb') as f:
|
||||
with salt.utils.files.fopen(out, 'wb') as f:
|
||||
f.write(d)
|
||||
return 'Wrote: {0}'.format(out)
|
||||
return d
|
||||
|
|
|
@ -41,7 +41,11 @@ def list_exports(exports='/etc/exports'):
|
|||
if line.startswith('#'):
|
||||
continue
|
||||
comps = line.split()
|
||||
ret[comps[0]] = []
|
||||
|
||||
# Handle the case where the same path is given twice
|
||||
if not comps[0] in ret:
|
||||
ret[comps[0]] = []
|
||||
|
||||
newshares = []
|
||||
for perm in comps[1:]:
|
||||
if perm.startswith('/'):
|
||||
|
@ -49,7 +53,10 @@ def list_exports(exports='/etc/exports'):
|
|||
continue
|
||||
permcomps = perm.split('(')
|
||||
permcomps[1] = permcomps[1].replace(')', '')
|
||||
hosts = permcomps[0].split(',')
|
||||
hosts = permcomps[0]
|
||||
if type(hosts) is not str:
|
||||
# Lists, etc would silently mangle /etc/exports
|
||||
raise TypeError('hosts argument must be a string')
|
||||
options = permcomps[1].split(',')
|
||||
ret[comps[0]].append({'hosts': hosts, 'options': options})
|
||||
for share in newshares:
|
||||
|
@ -73,6 +80,31 @@ def del_export(exports='/etc/exports', path=None):
|
|||
return edict
|
||||
|
||||
|
||||
def add_export(exports='/etc/exports', path=None, hosts=None, options=None):
|
||||
'''
|
||||
Add an export
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' nfs3.add_export path='/srv/test' hosts='127.0.0.1' options=['rw']
|
||||
'''
|
||||
if options is None:
|
||||
options = []
|
||||
if type(hosts) is not str:
|
||||
# Lists, etc would silently mangle /etc/exports
|
||||
raise TypeError('hosts argument must be a string')
|
||||
edict = list_exports(exports)
|
||||
if path not in edict:
|
||||
edict[path] = []
|
||||
new = {'hosts': hosts, 'options': options}
|
||||
edict[path].append(new)
|
||||
_write_exports(exports, edict)
|
||||
|
||||
return new
|
||||
|
||||
|
||||
def _write_exports(exports, edict):
|
||||
'''
|
||||
Write an exports file to disk
|
||||
|
@ -90,7 +122,29 @@ def _write_exports(exports, edict):
|
|||
for export in edict:
|
||||
line = export
|
||||
for perms in edict[export]:
|
||||
hosts = ','.join(perms['hosts'])
|
||||
hosts = perms['hosts']
|
||||
options = ','.join(perms['options'])
|
||||
line += ' {0}({1})'.format(hosts, options)
|
||||
efh.write('{0}\n'.format(line))
|
||||
|
||||
|
||||
def reload_exports():
|
||||
'''
|
||||
Trigger a reload of the exports file to apply changes
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' nfs3.reload_exports
|
||||
'''
|
||||
ret = {}
|
||||
|
||||
command = 'exportfs -r'
|
||||
|
||||
output = __salt__['cmd.run_all'](command)
|
||||
ret['stdout'] = output['stdout']
|
||||
ret['stderr'] = output['stderr']
|
||||
ret['result'] = not output['retcode']
|
||||
|
||||
return ret
|
||||
|
|
|
@ -37,7 +37,7 @@ def _table_attrs(table):
|
|||
'''
|
||||
Helper function to find valid table attributes
|
||||
'''
|
||||
cmd = 'osqueryi --json "pragma table_info({0})"'.format(table)
|
||||
cmd = ['osqueryi'] + ['--json'] + ['pragma table_info{0}'.format(table)]
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
if res['retcode'] == 0:
|
||||
attrs = []
|
||||
|
@ -56,7 +56,7 @@ def _osquery(sql, format='json'):
|
|||
'result': True,
|
||||
}
|
||||
|
||||
cmd = 'osqueryi --json "{0}"'.format(sql)
|
||||
cmd = ['osqueryi'] + ['--json'] + [sql]
|
||||
res = __salt__['cmd.run_all'](cmd)
|
||||
if res['stderr']:
|
||||
ret['result'] = False
|
||||
|
|
|
@ -886,8 +886,8 @@ def user_list(user=None, host=None, port=None, maintenance_db=None,
|
|||
for date_key in ('expiry time',):
|
||||
try:
|
||||
retrow[date_key] = datetime.datetime.strptime(
|
||||
row['date_key'], '%Y-%m-%d %H:%M:%S')
|
||||
except (ValueError, KeyError):
|
||||
row[date_key], '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
retrow[date_key] = None
|
||||
retrow['defaults variables'] = row['defaults variables']
|
||||
if return_password:
|
||||
|
@ -1025,6 +1025,7 @@ def _role_cmd_args(name,
|
|||
groups=None,
|
||||
replication=None,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
db_role=None):
|
||||
if createuser is not None and superuser is None:
|
||||
superuser = createuser
|
||||
|
@ -1041,6 +1042,7 @@ def _role_cmd_args(name,
|
|||
encrypted = _DEFAULT_PASSWORDS_ENCRYPTION
|
||||
skip_passwd = False
|
||||
escaped_password = ''
|
||||
escaped_valid_until = ''
|
||||
if not (
|
||||
rolepassword is not None
|
||||
# first is passwd set
|
||||
|
@ -1058,6 +1060,10 @@ def _role_cmd_args(name,
|
|||
_maybe_encrypt_password(name,
|
||||
rolepassword.replace('\'', '\'\''),
|
||||
encrypted=encrypted))
|
||||
if isinstance(valid_until, six.string_types) and bool(valid_until):
|
||||
escaped_valid_until = '\'{0}\''.format(
|
||||
valid_until.replace('\'', '\'\''),
|
||||
)
|
||||
skip_superuser = False
|
||||
if bool(db_role) and bool(superuser) == bool(db_role['superuser']):
|
||||
skip_superuser = True
|
||||
|
@ -1081,6 +1087,10 @@ def _role_cmd_args(name,
|
|||
{'flag': 'PASSWORD', 'test': bool(rolepassword),
|
||||
'skip': skip_passwd,
|
||||
'addtxt': escaped_password},
|
||||
{'flag': 'VALID UNTIL',
|
||||
'test': bool(valid_until),
|
||||
'skip': valid_until is None,
|
||||
'addtxt': escaped_valid_until},
|
||||
)
|
||||
for data in flags:
|
||||
sub_cmd = _add_role_flag(sub_cmd, **data)
|
||||
|
@ -1110,6 +1120,7 @@ def _role_create(name,
|
|||
inherit=None,
|
||||
replication=None,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
typ_='role',
|
||||
groups=None,
|
||||
runas=None):
|
||||
|
@ -1138,7 +1149,8 @@ def _role_create(name,
|
|||
superuser=superuser,
|
||||
groups=groups,
|
||||
replication=replication,
|
||||
rolepassword=rolepassword
|
||||
rolepassword=rolepassword,
|
||||
valid_until=valid_until
|
||||
))
|
||||
ret = _psql_prepare_and_run(['-c', sub_cmd],
|
||||
runas=runas, host=host, user=user, port=port,
|
||||
|
@ -1164,6 +1176,7 @@ def user_create(username,
|
|||
superuser=None,
|
||||
replication=None,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
groups=None,
|
||||
runas=None):
|
||||
'''
|
||||
|
@ -1175,7 +1188,7 @@ def user_create(username,
|
|||
|
||||
salt '*' postgres.user_create 'username' user='user' \\
|
||||
host='hostname' port='port' password='password' \\
|
||||
rolepassword='rolepassword'
|
||||
rolepassword='rolepassword' valid_until='valid_until'
|
||||
'''
|
||||
return _role_create(username,
|
||||
typ_='user',
|
||||
|
@ -1194,6 +1207,7 @@ def user_create(username,
|
|||
superuser=superuser,
|
||||
replication=replication,
|
||||
rolepassword=rolepassword,
|
||||
valid_until=valid_until,
|
||||
groups=groups,
|
||||
runas=runas)
|
||||
|
||||
|
@ -1215,6 +1229,7 @@ def _role_update(name,
|
|||
superuser=None,
|
||||
replication=None,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
groups=None,
|
||||
runas=None):
|
||||
'''
|
||||
|
@ -1250,6 +1265,7 @@ def _role_update(name,
|
|||
groups=groups,
|
||||
replication=replication,
|
||||
rolepassword=rolepassword,
|
||||
valid_until=valid_until,
|
||||
db_role=role
|
||||
))
|
||||
ret = _psql_prepare_and_run(['-c', sub_cmd],
|
||||
|
@ -1276,6 +1292,7 @@ def user_update(username,
|
|||
connlimit=None,
|
||||
replication=None,
|
||||
rolepassword=None,
|
||||
valid_until=None,
|
||||
groups=None,
|
||||
runas=None):
|
||||
'''
|
||||
|
@ -1287,7 +1304,7 @@ def user_update(username,
|
|||
|
||||
salt '*' postgres.user_update 'username' user='user' \\
|
||||
host='hostname' port='port' password='password' \\
|
||||
rolepassword='rolepassword'
|
||||
rolepassword='rolepassword' valid_until='valid_until'
|
||||
'''
|
||||
return _role_update(username,
|
||||
user=user,
|
||||
|
@ -1306,6 +1323,7 @@ def user_update(username,
|
|||
superuser=superuser,
|
||||
replication=replication,
|
||||
rolepassword=rolepassword,
|
||||
valid_until=valid_until,
|
||||
groups=groups,
|
||||
runas=runas)
|
||||
|
||||
|
|
|
@ -80,7 +80,8 @@ for service_dir in VALID_SERVICE_DIRS:
|
|||
AVAIL_SVR_DIRS = []
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'service'
|
||||
__virtualname__ = 'runit'
|
||||
__virtual_aliases__ = ('runit',)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
@ -91,8 +92,12 @@ def __virtual__():
|
|||
if __grains__.get('init') == 'runit':
|
||||
if __grains__['os'] == 'Void':
|
||||
add_svc_avail_path('/etc/sv')
|
||||
global __virtualname__
|
||||
__virtualname__ = 'service'
|
||||
return __virtualname__
|
||||
return False
|
||||
if salt.utils.which('sv'):
|
||||
return __virtualname__
|
||||
return (False, 'Runit not available. Please install sv')
|
||||
|
||||
|
||||
def _service_path(name):
|
||||
|
|
|
@ -404,7 +404,7 @@ def _context_string_to_dict(context):
|
|||
return ret
|
||||
|
||||
|
||||
def _filetype_id_to_string(filetype='a'):
|
||||
def filetype_id_to_string(filetype='a'):
|
||||
'''
|
||||
Translates SELinux filetype single-letter representation
|
||||
to a more human-readable version (which is also used in `semanage fcontext -l`).
|
||||
|
@ -445,7 +445,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
|
|||
'sel_role': '[^:]+', # se_role for file context is always object_r
|
||||
'sel_type': sel_type or '[^:]+',
|
||||
'sel_level': sel_level or '[^:]+'}
|
||||
cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else _filetype_id_to_string(filetype)
|
||||
cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype)
|
||||
cmd = 'semanage fcontext -l | egrep ' + \
|
||||
"'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs)
|
||||
current_entry_text = __salt__['cmd.shell'](cmd)
|
||||
|
|
|
@ -153,12 +153,19 @@ def _replace_auth_key(
|
|||
# open the file for both reading AND writing
|
||||
with salt.utils.files.fopen(full, 'r') as _fh:
|
||||
for line in _fh:
|
||||
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
|
||||
line = line.strip()
|
||||
if line == '':
|
||||
continue
|
||||
line += '\n'
|
||||
|
||||
if line.startswith('#'):
|
||||
# Commented Line
|
||||
lines.append(line)
|
||||
continue
|
||||
comps = re.findall(r'((.*)\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\s([a-zA-Z0-9+/]+={0,2})(\s(.*))?', line)
|
||||
if len(comps) > 0 and len(comps[0]) > 3 and comps[0][3] == key:
|
||||
# Found our key, replace it
|
||||
lines.append(auth_line)
|
||||
else:
|
||||
lines.append(line)
|
||||
|
@ -183,6 +190,12 @@ def _validate_keys(key_file, fingerprint_hash_type):
|
|||
try:
|
||||
with salt.utils.files.fopen(key_file, 'r') as _fh:
|
||||
for line in _fh:
|
||||
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
|
||||
line = line.strip()
|
||||
if line == '':
|
||||
continue
|
||||
line += '\n'
|
||||
|
||||
if line.startswith('#'):
|
||||
# Commented Line
|
||||
continue
|
||||
|
@ -572,6 +585,12 @@ def rm_auth_key(user,
|
|||
# and then write out the correct one. Open the file once
|
||||
with salt.utils.files.fopen(full, 'r') as _fh:
|
||||
for line in _fh:
|
||||
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
|
||||
line = line.strip()
|
||||
if line == '':
|
||||
continue
|
||||
line += '\n'
|
||||
|
||||
if line.startswith('#'):
|
||||
# Commented Line
|
||||
lines.append(line)
|
||||
|
@ -779,6 +798,12 @@ def _parse_openssh_output(lines, fingerprint_hash_type=None):
|
|||
and yield dict with keys information, one by one.
|
||||
'''
|
||||
for line in lines:
|
||||
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
|
||||
line = line.strip()
|
||||
if line == '':
|
||||
continue
|
||||
line += '\n'
|
||||
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
try:
|
||||
|
|
|
@ -1463,6 +1463,17 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
|||
saltenv
|
||||
Specify a salt fileserver environment to be used when applying states
|
||||
|
||||
pillar
|
||||
Custom Pillar values, passed as a dictionary of key-value pairs
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.show_low_sls test pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source.
|
||||
|
||||
pillarenv
|
||||
Specify a Pillar environment to be used when applying states. This
|
||||
can also be set in the minion config file using the
|
||||
|
@ -1497,12 +1508,26 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
|||
# the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
|
|
|
@ -242,6 +242,8 @@ def _copy_function(module_name, name=None):
|
|||
elif hasattr(mod, '__call__'):
|
||||
mod_sig = inspect.getargspec(mod.__call__)
|
||||
parameters = mod_sig.args
|
||||
log.debug('Parameters accepted by module {0}: {1}'.format(module_name,
|
||||
parameters))
|
||||
additional_args = {}
|
||||
for arg in set(parameters).intersection(set(methods)):
|
||||
additional_args[arg] = methods.pop(arg)
|
||||
|
@ -251,12 +253,15 @@ def _copy_function(module_name, name=None):
|
|||
else:
|
||||
modinstance = mod()
|
||||
except TypeError:
|
||||
modinstance = None
|
||||
methods = {}
|
||||
log.exception('Module failed to instantiate')
|
||||
raise
|
||||
valid_methods = {}
|
||||
log.debug('Called methods are: {0}'.format(methods))
|
||||
for meth_name in methods:
|
||||
if not meth_name.startswith('_'):
|
||||
methods[meth_name] = methods[meth_name]
|
||||
for meth, arg in methods.items():
|
||||
valid_methods[meth_name] = methods[meth_name]
|
||||
log.debug('Valid methods are: {0}'.format(valid_methods))
|
||||
for meth, arg in valid_methods.items():
|
||||
result = _get_method_result(mod, modinstance, meth, arg)
|
||||
assertion_result = _apply_assertion(arg, result)
|
||||
if not assertion_result:
|
||||
|
|
|
@ -31,6 +31,25 @@ Functions to interact with Hashicorp Vault.
|
|||
Currently only token auth is supported. The token must be able to create
|
||||
tokens with the policies that should be assigned to minions. Required.
|
||||
|
||||
You can still use the token via a OS environment variable via this
|
||||
config example:
|
||||
|
||||
.. code-block: yaml
|
||||
|
||||
vault:
|
||||
url: https://vault.service.domain:8200
|
||||
auth:
|
||||
method: token
|
||||
token: sdb://osenv/VAULT_TOKEN
|
||||
osenv:
|
||||
driver: env
|
||||
|
||||
|
||||
And then export the VAULT_TOKEN variable in your OS:
|
||||
|
||||
.. code-block: bash
|
||||
export VAULT_TOKEN=11111111-1111-1111-1111-1111111111111
|
||||
|
||||
policies
|
||||
Policies that are assigned to minions when requesting a token. These can
|
||||
either be static, eg saltstack/minions, or templated, eg
|
||||
|
|
|
@ -12,6 +12,7 @@ from __future__ import absolute_import
|
|||
|
||||
# Import Salt libs
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_functions
|
||||
|
||||
|
||||
try:
|
||||
|
@ -35,10 +36,18 @@ def __virtual__():
|
|||
return (False, "Module win_groupadd: module only works on Windows systems")
|
||||
|
||||
|
||||
def add(name, gid=None, system=False):
|
||||
def add(name, **kwargs):
|
||||
'''
|
||||
Add the specified group
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group to add
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of results
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -57,29 +66,32 @@ def add(name, gid=None, system=False):
|
|||
compObj = nt.GetObject('', 'WinNT://.,computer')
|
||||
newGroup = compObj.Create('group', name)
|
||||
newGroup.SetInfo()
|
||||
ret['changes'].append((
|
||||
'Successfully created group {0}'
|
||||
).format(name))
|
||||
ret['changes'].append('Successfully created group {0}'.format(name))
|
||||
except pywintypes.com_error as com_err:
|
||||
ret['result'] = False
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['comment'] = (
|
||||
'Failed to create group {0}. {1}'
|
||||
).format(name, friendly_error)
|
||||
ret['comment'] = 'Failed to create group {0}. {1}' \
|
||||
''.format(name, friendly_error)
|
||||
else:
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'The group {0} already exists.'
|
||||
).format(name)
|
||||
ret['comment'] = 'The group {0} already exists.'.format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def delete(name):
|
||||
def delete(name, **kwargs):
|
||||
'''
|
||||
Remove the named group
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group to remove
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of results
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -118,6 +130,14 @@ def info(name):
|
|||
'''
|
||||
Return information about a group
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group for which to get information
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of information about the group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -151,6 +171,17 @@ def getent(refresh=False):
|
|||
'''
|
||||
Return info on all groups
|
||||
|
||||
Args:
|
||||
|
||||
refresh (bool):
|
||||
Refresh the info for all groups in ``__context__``. If False only
|
||||
the groups in ``__context__`` wil be returned. If True the
|
||||
``__context__`` will be refreshed with current data and returned.
|
||||
Default is False
|
||||
|
||||
Returns:
|
||||
A list of groups and their information
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -182,16 +213,26 @@ def getent(refresh=False):
|
|||
return ret
|
||||
|
||||
|
||||
def adduser(name, username):
|
||||
def adduser(name, username, **kwargs):
|
||||
'''
|
||||
add a user to a group
|
||||
Add a user to a group
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group to modify
|
||||
|
||||
username (str):
|
||||
The name of the user to add to the group
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of results
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.adduser foo username
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
|
@ -209,7 +250,7 @@ def adduser(name, username):
|
|||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
try:
|
||||
if __fixlocaluser(username.lower()) not in existingMembers:
|
||||
if salt.utils.win_functions.get_sam_name(username) not in existingMembers:
|
||||
if not __opts__['test']:
|
||||
groupObj.Add('WinNT://' + username.replace('\\', '/'))
|
||||
|
||||
|
@ -231,16 +272,26 @@ def adduser(name, username):
|
|||
return ret
|
||||
|
||||
|
||||
def deluser(name, username):
|
||||
def deluser(name, username, **kwargs):
|
||||
'''
|
||||
remove a user from a group
|
||||
Remove a user from a group
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group to modify
|
||||
|
||||
username (str):
|
||||
The name of the user to remove from the group
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of results
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.deluser foo username
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
|
@ -258,7 +309,7 @@ def deluser(name, username):
|
|||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
try:
|
||||
if __fixlocaluser(username.lower()) in existingMembers:
|
||||
if salt.utils.win_functions.get_sam_name(username) in existingMembers:
|
||||
if not __opts__['test']:
|
||||
groupObj.Remove('WinNT://' + username.replace('\\', '/'))
|
||||
|
||||
|
@ -280,16 +331,27 @@ def deluser(name, username):
|
|||
return ret
|
||||
|
||||
|
||||
def members(name, members_list):
|
||||
def members(name, members_list, **kwargs):
|
||||
'''
|
||||
remove a user from a group
|
||||
Ensure a group contains only the members in the list
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name of the group to modify
|
||||
|
||||
members_list (str):
|
||||
A single user or a comma separated list of users. The group will
|
||||
contain only the users specified in this list.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary of results
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.members foo 'user1,user2,user3'
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
|
@ -297,7 +359,7 @@ def members(name, members_list):
|
|||
'changes': {'Users Added': [], 'Users Removed': []},
|
||||
'comment': []}
|
||||
|
||||
members_list = [__fixlocaluser(thisMember) for thisMember in members_list.lower().split(",")]
|
||||
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
|
||||
if not isinstance(members_list, list):
|
||||
ret['result'] = False
|
||||
ret['comment'].append('Members is not a list object')
|
||||
|
@ -364,27 +426,26 @@ def members(name, members_list):
|
|||
return ret
|
||||
|
||||
|
||||
def __fixlocaluser(username):
|
||||
'''
|
||||
prefixes a username w/o a backslash with the computername
|
||||
|
||||
i.e. __fixlocaluser('Administrator') would return 'computername\administrator'
|
||||
'''
|
||||
if '\\' not in username:
|
||||
username = ('{0}\\{1}').format(__salt__['grains.get']('host'), username)
|
||||
|
||||
return username.lower()
|
||||
|
||||
|
||||
def list_groups(refresh=False):
|
||||
'''
|
||||
Return a list of groups
|
||||
|
||||
Args:
|
||||
|
||||
refresh (bool):
|
||||
Refresh the info for all groups in ``__context__``. If False only
|
||||
the groups in ``__context__`` wil be returned. If True, the
|
||||
``__context__`` will be refreshed with current data and returned.
|
||||
Default is False
|
||||
|
||||
Returns:
|
||||
list: A list of groups on the machine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.getent
|
||||
salt '*' group.list_groups
|
||||
'''
|
||||
if 'group.list_groups' in __context__ and not refresh:
|
||||
return __context__['group.getent']
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1285,6 +1285,18 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
#Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be seperated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.shlex_split(install_flags)
|
||||
|
||||
# Install the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[version_num].get('use_scheduler', False):
|
||||
|
@ -1313,21 +1325,43 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
start_time='01:00',
|
||||
ac_only=False,
|
||||
stop_if_on_batteries=False)
|
||||
|
||||
# Run Scheduled Task
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if use_msiexec:
|
||||
cmd.extend([msiexec, '/i', cached_pkg])
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
cmd.append('ALLUSERS="1"')
|
||||
# Special handling for installing salt
|
||||
if pkg_name in ['salt-minion', 'salt-minion-py3']:
|
||||
ret[pkg_name] = {'install status': 'task started'}
|
||||
if not __salt__['task.run'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
|
||||
# Make sure the task is running, try for 5 secs
|
||||
from time import time
|
||||
t_end = time() + 5
|
||||
while time() < t_end:
|
||||
task_running = __salt__['task.status'](
|
||||
'update-salt-software') == 'Running'
|
||||
if task_running:
|
||||
break
|
||||
|
||||
if not task_running:
|
||||
log.error(
|
||||
'Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
|
||||
# All other packages run with task scheduler
|
||||
else:
|
||||
cmd.append(cached_pkg)
|
||||
cmd.extend(salt.utils.args.shlex_split(install_flags))
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
|
||||
# Combine cmd and arguments
|
||||
cmd = [cmd].extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cache_path,
|
||||
|
|
|
@ -302,6 +302,11 @@ def get_community_names():
|
|||
# Windows SNMP service GUI.
|
||||
if isinstance(current_values, list):
|
||||
for current_value in current_values:
|
||||
|
||||
# Ignore error values
|
||||
if not isinstance(current_value, dict):
|
||||
continue
|
||||
|
||||
permissions = str()
|
||||
for permission_name in _PERMISSION_TYPES:
|
||||
if current_value['vdata'] == _PERMISSION_TYPES[permission_name]:
|
||||
|
|
|
@ -1260,7 +1260,7 @@ def status(name, location='\\'):
|
|||
task_service = win32com.client.Dispatch("Schedule.Service")
|
||||
task_service.Connect()
|
||||
|
||||
# get the folder to delete the folder from
|
||||
# get the folder where the task is defined
|
||||
task_folder = task_service.GetFolder(location)
|
||||
task = task_folder.GetTask(name)
|
||||
|
||||
|
|
|
@ -34,8 +34,10 @@ try:
|
|||
import yum
|
||||
HAS_YUM = True
|
||||
except ImportError:
|
||||
from salt.ext.six.moves import configparser
|
||||
HAS_YUM = False
|
||||
|
||||
from salt.ext.six.moves import configparser
|
||||
|
||||
# pylint: enable=import-error,redefined-builtin
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -2814,41 +2816,32 @@ def _parse_repo_file(filename):
|
|||
'''
|
||||
Turn a single repo file into a dict
|
||||
'''
|
||||
repos = {}
|
||||
header = ''
|
||||
repo = ''
|
||||
with salt.utils.files.fopen(filename, 'r') as rfile:
|
||||
for line in rfile:
|
||||
if line.startswith('['):
|
||||
repo = line.strip().replace('[', '').replace(']', '')
|
||||
repos[repo] = {}
|
||||
parsed = configparser.ConfigParser()
|
||||
config = {}
|
||||
|
||||
# Even though these are essentially uselss, I want to allow the
|
||||
# user to maintain their own comments, etc
|
||||
if not line:
|
||||
if not repo:
|
||||
header += line
|
||||
if line.startswith('#'):
|
||||
if not repo:
|
||||
header += line
|
||||
else:
|
||||
if 'comments' not in repos[repo]:
|
||||
repos[repo]['comments'] = []
|
||||
repos[repo]['comments'].append(line.strip())
|
||||
continue
|
||||
try:
|
||||
parsed.read(filename)
|
||||
except configparser.MissingSectionHeaderError as err:
|
||||
log.error(
|
||||
'Failed to parse file {0}, error: {1}'.format(filename, err.message)
|
||||
)
|
||||
return ('', {})
|
||||
|
||||
# These are the actual configuration lines that matter
|
||||
if '=' in line:
|
||||
try:
|
||||
comps = line.strip().split('=')
|
||||
repos[repo][comps[0].strip()] = '='.join(comps[1:])
|
||||
except KeyError:
|
||||
log.error(
|
||||
'Failed to parse line in %s, offending line was '
|
||||
'\'%s\'', filename, line.rstrip()
|
||||
)
|
||||
for section in parsed._sections:
|
||||
section_dict = dict(parsed._sections[section])
|
||||
section_dict.pop('__name__')
|
||||
config[section] = section_dict
|
||||
|
||||
return (header, repos)
|
||||
# Try to extract leading comments
|
||||
headers = ''
|
||||
with salt.utils.fopen(filename, 'r') as rawfile:
|
||||
for line in rawfile:
|
||||
if line.strip().startswith('#'):
|
||||
headers += '{0}\n'.format(line.strip())
|
||||
else:
|
||||
break
|
||||
|
||||
return (headers, config)
|
||||
|
||||
|
||||
def file_list(*packages):
|
||||
|
|
|
@ -108,6 +108,10 @@ A REST API for Salt
|
|||
Collect and report statistics about the CherryPy server
|
||||
|
||||
Reports are available via the :py:class:`Stats` URL.
|
||||
stats_disable_auth : False
|
||||
Do not require authentication to access the ``/stats`` endpoint.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
static
|
||||
A filesystem path to static HTML/JavaScript/CSS/image assets.
|
||||
static_path : ``/static``
|
||||
|
@ -2708,6 +2712,10 @@ class Stats(object):
|
|||
'tools.salt_auth.on': True,
|
||||
})
|
||||
|
||||
def __init__(self):
|
||||
if cherrypy.config['apiopts'].get('stats_disable_auth'):
|
||||
self._cp_config['tools.salt_auth.on'] = False
|
||||
|
||||
def GET(self):
|
||||
'''
|
||||
Return a dump of statistics collected from the CherryPy server
|
||||
|
|
|
@ -233,7 +233,7 @@ class PillarCache(object):
|
|||
functions=self.functions,
|
||||
pillar_override=self.pillar_override,
|
||||
pillarenv=self.pillarenv)
|
||||
return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here
|
||||
return fresh_pillar.compile_pillar()
|
||||
|
||||
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
|
||||
log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
|
||||
|
@ -763,7 +763,7 @@ class Pillar(object):
|
|||
|
||||
return pillar, errors
|
||||
|
||||
def _external_pillar_data(self, pillar, val, pillar_dirs, key):
|
||||
def _external_pillar_data(self, pillar, val, key):
|
||||
'''
|
||||
Builds actual pillar data structure and updates the ``pillar`` variable
|
||||
'''
|
||||
|
@ -772,26 +772,16 @@ class Pillar(object):
|
|||
if isinstance(val, dict):
|
||||
ext = self.ext_pillars[key](self.minion_id, pillar, **val)
|
||||
elif isinstance(val, list):
|
||||
if key == 'git':
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
val,
|
||||
pillar_dirs)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
else:
|
||||
if key == 'git':
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
val,
|
||||
pillar_dirs)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
return ext
|
||||
|
||||
def ext_pillar(self, pillar, pillar_dirs, errors=None):
|
||||
def ext_pillar(self, pillar, errors=None):
|
||||
'''
|
||||
Render the external pillar data
|
||||
'''
|
||||
|
@ -843,9 +833,8 @@ class Pillar(object):
|
|||
continue
|
||||
try:
|
||||
ext = self._external_pillar_data(pillar,
|
||||
val,
|
||||
pillar_dirs,
|
||||
key)
|
||||
val,
|
||||
key)
|
||||
except Exception as exc:
|
||||
errors.append(
|
||||
'Failed to load ext_pillar {0}: {1}'.format(
|
||||
|
@ -867,16 +856,14 @@ class Pillar(object):
|
|||
ext = None
|
||||
return pillar, errors
|
||||
|
||||
def compile_pillar(self, ext=True, pillar_dirs=None):
|
||||
def compile_pillar(self, ext=True):
|
||||
'''
|
||||
Render the pillar data and return
|
||||
'''
|
||||
top, top_errors = self.get_top()
|
||||
if ext:
|
||||
if self.opts.get('ext_pillar_first', False):
|
||||
self.opts['pillar'], errors = self.ext_pillar(
|
||||
self.pillar_override,
|
||||
pillar_dirs)
|
||||
self.opts['pillar'], errors = self.ext_pillar(self.pillar_override)
|
||||
self.rend = salt.loader.render(self.opts, self.functions)
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches, errors=errors)
|
||||
|
@ -888,8 +875,7 @@ class Pillar(object):
|
|||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
pillar, errors = self.ext_pillar(
|
||||
pillar, pillar_dirs, errors=errors)
|
||||
pillar, errors = self.ext_pillar(pillar, errors=errors)
|
||||
else:
|
||||
matches = self.top_matches(top)
|
||||
pillar, errors = self.render_pillar(matches)
|
||||
|
@ -984,6 +970,6 @@ class Pillar(object):
|
|||
# ext_pillar etc.
|
||||
class AsyncPillar(Pillar):
|
||||
@tornado.gen.coroutine
|
||||
def compile_pillar(self, ext=True, pillar_dirs=None):
|
||||
ret = super(AsyncPillar, self).compile_pillar(ext=ext, pillar_dirs=pillar_dirs)
|
||||
def compile_pillar(self, ext=True):
|
||||
ret = super(AsyncPillar, self).compile_pillar(ext=ext)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
|
|
@ -3,12 +3,6 @@
|
|||
Use a git repository as a Pillar source
|
||||
---------------------------------------
|
||||
|
||||
.. note::
|
||||
This external pillar has been rewritten for the :ref:`2015.8.0
|
||||
<release-2015-8-0>` release. The old method of configuring this
|
||||
external pillar will be maintained for a couple releases, allowing time for
|
||||
configurations to be updated to reflect the new usage.
|
||||
|
||||
This external pillar allows for a Pillar top file and Pillar SLS files to be
|
||||
sourced from a git repository.
|
||||
|
||||
|
@ -41,8 +35,7 @@ the repo's URL. Configuration details can be found below.
|
|||
- bar
|
||||
|
||||
Additionally, while git_pillar allows for the branch/tag to be overridden
|
||||
(see :ref:`here <git-pillar-env-remap>`, or :ref:`here
|
||||
<git-pillar-env-remap-legacy>` for Salt releases before 2015.8.0), keep in
|
||||
(see :ref:`here <git-pillar-env-remap>`), keep in
|
||||
mind that the top file must reference the actual environment name. It is
|
||||
common practice to make the environment in a git_pillar top file match the
|
||||
branch/tag name, but when remapping, the environment of course no longer
|
||||
|
@ -51,113 +44,10 @@ the repo's URL. Configuration details can be found below.
|
|||
common misconfiguration that may be to blame, and is a good first step in
|
||||
troubleshooting.
|
||||
|
||||
.. _git-pillar-pre-2015-8-0:
|
||||
.. _git-pillar-configuration:
|
||||
|
||||
Configuring git_pillar for Salt releases before 2015.8.0
|
||||
========================================================
|
||||
|
||||
.. note::
|
||||
This legacy configuration for git_pillar will no longer be supported as of
|
||||
the **Oxygen** release of Salt.
|
||||
|
||||
For Salt releases earlier than :ref:`2015.8.0 <release-2015-8-0>`,
|
||||
GitPython is the only supported provider for git_pillar. Individual
|
||||
repositories can be configured under the :conf_master:`ext_pillar`
|
||||
configuration parameter like so:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: master https://gitserver/git-pillar.git root=subdirectory
|
||||
|
||||
The repository is specified in the format ``<branch> <repo_url>``, with an
|
||||
optional ``root`` parameter (added in the :ref:`2014.7.0
|
||||
<release-2014-7-0>` release) which allows the pillar SLS files to be
|
||||
served up from a subdirectory (similar to :conf_master:`gitfs_root` in gitfs).
|
||||
|
||||
To use more than one branch from the same repo, multiple lines must be
|
||||
specified under :conf_master:`ext_pillar`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: master https://gitserver/git-pillar.git
|
||||
- git: dev https://gitserver/git-pillar.git
|
||||
|
||||
.. _git-pillar-env-remap-legacy:
|
||||
|
||||
To remap a specific branch to a specific Pillar environment, use the format
|
||||
``<branch>:<env>``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: develop:dev https://gitserver/git-pillar.git
|
||||
- git: master:prod https://gitserver/git-pillar.git
|
||||
|
||||
In this case, the ``develop`` branch would need its own ``top.sls`` with a
|
||||
``dev`` section in it, like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
dev:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
The ``master`` branch would need its own ``top.sls`` with a ``prod`` section in
|
||||
it:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
prod:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
If ``__env__`` is specified as the branch name, then git_pillar will first look
|
||||
at the minion's :conf_minion:`environment` option. If unset, it will fall back
|
||||
to using branch specified by the master's :conf_master:`gitfs_base`:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- git: __env__ https://gitserver/git-pillar.git root=pillar
|
||||
|
||||
The corresponding Pillar top file would look like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{saltenv}}:
|
||||
'*':
|
||||
- bar
|
||||
|
||||
.. note::
|
||||
This feature was unintentionally omitted when git_pillar was rewritten for
|
||||
the 2015.8.0 release. It was added again in the 2016.3.4 release, but it
|
||||
has changed slightly in that release. On Salt masters running 2015.8.0
|
||||
through 2016.3.3, this feature can only be accessed using the legacy config
|
||||
described above. For 2016.3.4 and later, refer to explanation of the
|
||||
``__env__`` parameter in the below section.
|
||||
|
||||
Versions 2016.3.0 through 2016.3.4 incorrectly check the *master's*
|
||||
``environment`` config option (instead of the minion's) before falling back
|
||||
to :conf_master:`gitfs_base`. This has been fixed in the 2016.3.5 and
|
||||
2016.11.1 releases (2016.11.0 contains the incorrect behavior).
|
||||
|
||||
Additionally, in releases before 2016.11.0, both ``{{env}}`` and
|
||||
``{{saltenv}}`` could be used as a placeholder for the environment.
|
||||
Starting in 2016.11.0, ``{{env}}`` is no longer supported.
|
||||
|
||||
.. _git-pillar-2015-8-0-and-later:
|
||||
|
||||
Configuring git_pillar for Salt releases 2015.8.0 and later
|
||||
===========================================================
|
||||
|
||||
.. note::
|
||||
In version 2015.8.0, the method of configuring git external pillars has
|
||||
changed, and now more closely resembles that of the :ref:`Git Fileserver
|
||||
Backend <tutorial-gitfs>`. If Salt detects the old configuration schema, it
|
||||
will use the pre-2015.8.0 code to compile the external pillar. A warning
|
||||
will also be logged.
|
||||
Configuring git_pillar for Salt
|
||||
===============================
|
||||
|
||||
Beginning with Salt version 2015.8.0, pygit2_ is now supported in addition to
|
||||
GitPython_. The requirements for GitPython_ and pygit2_ are the same as for
|
||||
|
@ -258,32 +148,6 @@ The corresponding Pillar top file would look like this:
|
|||
'*':
|
||||
- bar
|
||||
|
||||
.. note::
|
||||
This feature was unintentionally omitted when git_pillar was rewritten for
|
||||
the 2015.8.0 release. It was added again in the 2016.3.4 release, but it
|
||||
has changed slightly in that release. The fallback value replaced by
|
||||
``{{env}}`` is :conf_master: is :conf_master:`git_pillar_base`, while the
|
||||
legacy config's version of this feature replaces ``{{env}}`` with
|
||||
:conf_master:`gitfs_base`.
|
||||
|
||||
On Salt masters running 2015.8.0 through 2016.3.3, this feature can only be
|
||||
accessed using the legacy config in the previous section of this page.
|
||||
|
||||
The same issue which affected the behavior of the minion's
|
||||
:conf_minion:`environment` config value using the legacy configuration
|
||||
syntax (see the documentation in the pre-2015.8.0 section above for the
|
||||
legacy support of this feature) also affects the new-style git_pillar
|
||||
syntax in version 2016.3.4. This has been corrected in version 2016.3.5 and
|
||||
2016.11.1 (2016.11.0 contains the incorrect behavior).
|
||||
|
||||
2016.3.4 incorrectly checks the *master's* ``environment`` config option
|
||||
(instead of the minion's) before falling back to the master's
|
||||
:conf_master:`git_pillar_base`.
|
||||
|
||||
Additionally, in releases before 2016.11.0, both ``{{env}}`` and
|
||||
``{{saltenv}}`` could be used as a placeholder for the environment.
|
||||
Starting in 2016.11.0, ``{{env}}`` is no longer supported.
|
||||
|
||||
With the addition of pygit2_ support, git_pillar can now interact with
|
||||
authenticated remotes. Authentication works just like in gitfs (as outlined in
|
||||
the :ref:`Git Fileserver Backend Walkthrough <gitfs-authentication>`), only
|
||||
|
@ -469,8 +333,6 @@ from __future__ import absolute_import
|
|||
# Import python libs
|
||||
import copy
|
||||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.gitfs
|
||||
|
@ -482,13 +344,6 @@ from salt.pillar import Pillar
|
|||
|
||||
# Import third party libs
|
||||
from salt.ext import six
|
||||
# pylint: disable=import-error
|
||||
try:
|
||||
import git
|
||||
HAS_GITPYTHON = True
|
||||
except ImportError:
|
||||
HAS_GITPYTHON = False
|
||||
# pylint: enable=import-error
|
||||
|
||||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
|
@ -509,339 +364,89 @@ def __virtual__():
|
|||
# No git external pillars were configured
|
||||
return False
|
||||
|
||||
for ext_pillar in git_ext_pillars:
|
||||
if isinstance(ext_pillar['git'], six.string_types):
|
||||
# Verification of legacy git pillar configuration
|
||||
if not HAS_GITPYTHON:
|
||||
log.error(
|
||||
'Git-based ext_pillar is enabled in configuration but '
|
||||
'could not be loaded, is GitPython installed?'
|
||||
)
|
||||
return False
|
||||
if not git.__version__ > '0.3.0':
|
||||
return False
|
||||
return __virtualname__
|
||||
else:
|
||||
# Verification of new git pillar configuration
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
return __virtualname__
|
||||
except FileserverConfigError:
|
||||
pass
|
||||
return False
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
return __virtualname__
|
||||
except FileserverConfigError:
|
||||
return False
|
||||
|
||||
|
||||
def ext_pillar(minion_id, repo, pillar_dirs):
|
||||
def ext_pillar(minion_id, repo):
|
||||
'''
|
||||
Checkout the ext_pillar sources and compile the resulting pillar SLS
|
||||
'''
|
||||
if isinstance(repo, six.string_types):
|
||||
return _legacy_git_pillar(minion_id, repo, pillar_dirs)
|
||||
else:
|
||||
opts = copy.deepcopy(__opts__)
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
pillar.fetch_remotes()
|
||||
pillar.checkout()
|
||||
ret = {}
|
||||
merge_strategy = __opts__.get(
|
||||
'pillar_source_merging_strategy',
|
||||
'smart'
|
||||
)
|
||||
merge_lists = __opts__.get(
|
||||
'pillar_merge_lists',
|
||||
False
|
||||
)
|
||||
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
|
||||
# If pillarenv is set, only grab pillars with that match pillarenv
|
||||
if opts['pillarenv'] and env != opts['pillarenv']:
|
||||
log.debug(
|
||||
'env \'%s\' for pillar dir \'%s\' does not match '
|
||||
'pillarenv \'%s\', skipping',
|
||||
env, pillar_dir, opts['pillarenv']
|
||||
)
|
||||
continue
|
||||
if pillar_dir in pillar.pillar_linked_dirs:
|
||||
log.debug(
|
||||
'git_pillar is skipping processing on %s as it is a '
|
||||
'mounted repo', pillar_dir
|
||||
)
|
||||
continue
|
||||
else:
|
||||
log.debug(
|
||||
'git_pillar is processing pillar SLS from %s for pillar '
|
||||
'env \'%s\'', pillar_dir, env
|
||||
)
|
||||
|
||||
if env == '__env__':
|
||||
env = opts.get('pillarenv') \
|
||||
or opts.get('environment') \
|
||||
or opts.get('git_pillar_base')
|
||||
log.debug('__env__ maps to %s', env)
|
||||
|
||||
pillar_roots = [pillar_dir]
|
||||
|
||||
if __opts__['git_pillar_includes']:
|
||||
# Add the rest of the pillar_dirs in this environment to the
|
||||
# list, excluding the current pillar_dir being processed. This
|
||||
# is because it was already specified above as the first in the
|
||||
# list, so that its top file is sourced from the correct
|
||||
# location and not from another git_pillar remote.
|
||||
pillar_roots.extend(
|
||||
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
|
||||
if env == e and d != pillar_dir]
|
||||
)
|
||||
|
||||
opts['pillar_roots'] = {env: pillar_roots}
|
||||
|
||||
local_pillar = Pillar(opts, __grains__, minion_id, env)
|
||||
ret = salt.utils.dictupdate.merge(
|
||||
ret,
|
||||
local_pillar.compile_pillar(ext=False),
|
||||
strategy=merge_strategy,
|
||||
merge_lists=merge_lists
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
# Legacy git_pillar code
|
||||
class _LegacyGitPillar(object):
|
||||
'''
|
||||
Deal with the remote git repository for Pillar
|
||||
'''
|
||||
|
||||
def __init__(self, branch, repo_location, opts):
|
||||
'''
|
||||
Try to initialize the Git repo object
|
||||
'''
|
||||
self.branch = self.map_branch(branch, opts)
|
||||
self.rp_location = repo_location
|
||||
self.opts = opts
|
||||
self._envs = set()
|
||||
self.working_dir = ''
|
||||
self.repo = None
|
||||
|
||||
hash_type = getattr(hashlib, opts['hash_type'])
|
||||
hash_str = '{0} {1}'.format(self.branch, self.rp_location)
|
||||
repo_hash = hash_type(salt.utils.stringutils.to_bytes(hash_str)).hexdigest()
|
||||
rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash)
|
||||
|
||||
if not os.path.isdir(rp_):
|
||||
os.makedirs(rp_)
|
||||
try:
|
||||
self.repo = git.Repo.init(rp_)
|
||||
except (git.exc.NoSuchPathError,
|
||||
git.exc.InvalidGitRepositoryError) as exc:
|
||||
log.error(
|
||||
'GitPython exception caught while initializing the repo: %s. '
|
||||
'Maybe the git CLI program is not available.', exc
|
||||
)
|
||||
except Exception as exc:
|
||||
log.exception('Undefined exception in git pillar. '
|
||||
'This may be a bug should be reported to the '
|
||||
'SaltStack developers.')
|
||||
|
||||
# Git directory we are working on
|
||||
# Should be the same as self.repo.working_dir
|
||||
self.working_dir = rp_
|
||||
|
||||
if isinstance(self.repo, git.Repo):
|
||||
if not self.repo.remotes:
|
||||
try:
|
||||
self.repo.create_remote('origin', self.rp_location)
|
||||
# ignore git ssl verification if requested
|
||||
if self.opts.get('pillar_gitfs_ssl_verify', True):
|
||||
self.repo.git.config('http.sslVerify', 'true')
|
||||
else:
|
||||
self.repo.git.config('http.sslVerify', 'false')
|
||||
except os.error:
|
||||
# This exception occurs when two processes are
|
||||
# trying to write to the git config at once, go
|
||||
# ahead and pass over it since this is the only
|
||||
# write.
|
||||
# This should place a lock down.
|
||||
pass
|
||||
else:
|
||||
if self.repo.remotes.origin.url != self.rp_location:
|
||||
self.repo.remotes.origin.config_writer.set(
|
||||
'url', self.rp_location)
|
||||
|
||||
def map_branch(self, branch, opts=None):
|
||||
opts = __opts__ if opts is None else opts
|
||||
if branch == '__env__':
|
||||
branch = opts.get('environment') or 'base'
|
||||
if branch == 'base':
|
||||
branch = opts.get('gitfs_base') or 'master'
|
||||
elif ':' in branch:
|
||||
branch = branch.split(':', 1)[0]
|
||||
return branch
|
||||
|
||||
def update(self):
|
||||
'''
|
||||
Ensure you are following the latest changes on the remote
|
||||
|
||||
Return boolean whether it worked
|
||||
'''
|
||||
try:
|
||||
log.debug('Legacy git_pillar: Updating \'%s\'', self.rp_location)
|
||||
self.repo.git.fetch()
|
||||
except git.exc.GitCommandError as exc:
|
||||
log.error(
|
||||
'Unable to fetch the latest changes from remote %s: %s',
|
||||
self.rp_location, exc
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
checkout_ref = 'origin/{0}'.format(self.branch)
|
||||
log.debug('Legacy git_pillar: Checking out %s for \'%s\'',
|
||||
checkout_ref, self.rp_location)
|
||||
self.repo.git.checkout(checkout_ref)
|
||||
except git.exc.GitCommandError as exc:
|
||||
log.error(
|
||||
'Legacy git_pillar: Failed to checkout %s for \'%s\': %s',
|
||||
checkout_ref, self.rp_location, exc
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def envs(self):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
if isinstance(self.repo, git.Repo):
|
||||
remote = self.repo.remote()
|
||||
for ref in self.repo.refs:
|
||||
parted = ref.name.partition('/')
|
||||
short = parted[2] if parted[2] else parted[0]
|
||||
if isinstance(ref, git.Head):
|
||||
if short == 'master':
|
||||
short = 'base'
|
||||
if ref not in remote.stale_refs:
|
||||
self._envs.add(short)
|
||||
elif isinstance(ref, git.Tag):
|
||||
self._envs.add(short)
|
||||
|
||||
return list(self._envs)
|
||||
|
||||
|
||||
def _legacy_git_pillar(minion_id, repo_string, pillar_dirs):
|
||||
'''
|
||||
Support pre-Beryllium config schema
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'The git ext_pillar configuration is deprecated. Please refer to the '
|
||||
'documentation at '
|
||||
'https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.git_pillar.html '
|
||||
'for more information. This configuration will no longer be supported '
|
||||
'as of the Oxygen release of Salt.'
|
||||
)
|
||||
if pillar_dirs is None:
|
||||
return
|
||||
# split the branch, repo name and optional extra (key=val) parameters.
|
||||
options = repo_string.strip().split()
|
||||
branch_env = options[0]
|
||||
repo_location = options[1]
|
||||
root = ''
|
||||
|
||||
for extraopt in options[2:]:
|
||||
# Support multiple key=val attributes as custom parameters.
|
||||
DELIM = '='
|
||||
if DELIM not in extraopt:
|
||||
log.error(
|
||||
'Legacy git_pillar: Incorrectly formatted extra parameter '
|
||||
'\'%s\' within \'%s\' missing \'%s\')',
|
||||
extraopt, repo_string, DELIM
|
||||
)
|
||||
key, val = _extract_key_val(extraopt, DELIM)
|
||||
if key == 'root':
|
||||
root = val
|
||||
else:
|
||||
log.error(
|
||||
'Legacy git_pillar: Unrecognized extra parameter \'%s\' '
|
||||
'in \'%s\'',
|
||||
key, repo_string
|
||||
)
|
||||
|
||||
# environment is "different" from the branch
|
||||
cfg_branch, _, environment = branch_env.partition(':')
|
||||
|
||||
gitpil = _LegacyGitPillar(cfg_branch, repo_location, __opts__)
|
||||
branch = gitpil.branch
|
||||
|
||||
if environment == '':
|
||||
if branch == 'master':
|
||||
environment = 'base'
|
||||
else:
|
||||
environment = branch
|
||||
|
||||
# normpath is needed to remove appended '/' if root is empty string.
|
||||
pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root))
|
||||
log.debug(
|
||||
'Legacy git_pillar: pillar_dir for \'%s\' is \'%s\'',
|
||||
repo_string, pillar_dir
|
||||
)
|
||||
log.debug(
|
||||
'Legacy git_pillar: branch for \'%s\' is \'%s\'',
|
||||
repo_string, branch
|
||||
)
|
||||
|
||||
pillar_dirs.setdefault(pillar_dir, {})
|
||||
|
||||
if cfg_branch == '__env__' and branch not in ['master', 'base']:
|
||||
gitpil.update()
|
||||
elif pillar_dirs[pillar_dir].get(branch, False):
|
||||
log.debug(
|
||||
'Already processed pillar_dir \'%s\' for \'%s\'',
|
||||
pillar_dir, repo_string
|
||||
)
|
||||
return {} # we've already seen this combo
|
||||
|
||||
pillar_dirs[pillar_dir].setdefault(branch, True)
|
||||
|
||||
# Don't recurse forever-- the Pillar object will re-call the ext_pillar
|
||||
# function
|
||||
if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]:
|
||||
return {}
|
||||
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
opts['pillar_roots'][environment] = [pillar_dir]
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
pillar.fetch_remotes()
|
||||
pillar.checkout()
|
||||
ret = {}
|
||||
merge_strategy = __opts__.get(
|
||||
'pillar_source_merging_strategy',
|
||||
'smart'
|
||||
)
|
||||
merge_lists = __opts__.get(
|
||||
'pillar_merge_lists',
|
||||
False
|
||||
)
|
||||
for pillar_dir, env in six.iteritems(pillar.pillar_dirs):
|
||||
# If pillarenv is set, only grab pillars with that match pillarenv
|
||||
if opts['pillarenv'] and env != opts['pillarenv']:
|
||||
log.debug(
|
||||
'env \'%s\' for pillar dir \'%s\' does not match '
|
||||
'pillarenv \'%s\', skipping',
|
||||
env, pillar_dir, opts['pillarenv']
|
||||
)
|
||||
continue
|
||||
if pillar_dir in pillar.pillar_linked_dirs:
|
||||
log.debug(
|
||||
'git_pillar is skipping processing on %s as it is a '
|
||||
'mounted repo', pillar_dir
|
||||
)
|
||||
continue
|
||||
else:
|
||||
log.debug(
|
||||
'git_pillar is processing pillar SLS from %s for pillar '
|
||||
'env \'%s\'', pillar_dir, env
|
||||
)
|
||||
|
||||
pil = Pillar(opts, __grains__, minion_id, branch)
|
||||
if env == '__env__':
|
||||
env = opts.get('pillarenv') \
|
||||
or opts.get('environment') \
|
||||
or opts.get('git_pillar_base')
|
||||
log.debug('__env__ maps to %s', env)
|
||||
|
||||
return pil.compile_pillar(ext=False)
|
||||
pillar_roots = [pillar_dir]
|
||||
|
||||
if __opts__['git_pillar_includes']:
|
||||
# Add the rest of the pillar_dirs in this environment to the
|
||||
# list, excluding the current pillar_dir being processed. This
|
||||
# is because it was already specified above as the first in the
|
||||
# list, so that its top file is sourced from the correct
|
||||
# location and not from another git_pillar remote.
|
||||
pillar_roots.extend(
|
||||
[d for (d, e) in six.iteritems(pillar.pillar_dirs)
|
||||
if env == e and d != pillar_dir]
|
||||
)
|
||||
|
||||
def _update(branch, repo_location):
|
||||
'''
|
||||
Ensure you are following the latest changes on the remote
|
||||
opts['pillar_roots'] = {env: pillar_roots}
|
||||
|
||||
return boolean whether it worked
|
||||
'''
|
||||
gitpil = _LegacyGitPillar(branch, repo_location, __opts__)
|
||||
|
||||
return gitpil.update()
|
||||
|
||||
|
||||
def _envs(branch, repo_location):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
gitpil = _LegacyGitPillar(branch, repo_location, __opts__)
|
||||
|
||||
return gitpil.envs()
|
||||
local_pillar = Pillar(opts, __grains__, minion_id, env)
|
||||
ret = salt.utils.dictupdate.merge(
|
||||
ret,
|
||||
local_pillar.compile_pillar(ext=False),
|
||||
strategy=merge_strategy,
|
||||
merge_lists=merge_lists
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def _extract_key_val(kv, delimiter='='):
|
||||
|
|
|
@ -27,6 +27,16 @@ in <> brackets) in the url in order to populate pillar data based on the grain v
|
|||
url: http://example.com/api/<nodename>
|
||||
with_grains: True
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
|
||||
If %s is present in the url, it will be automaticaly replaced by the minion_id:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- http_json:
|
||||
url: http://example.com/api/%s
|
||||
|
||||
Module Documentation
|
||||
====================
|
||||
'''
|
||||
|
@ -64,6 +74,9 @@ def ext_pillar(minion_id,
|
|||
:return: A dictionary of the pillar data to add.
|
||||
:rtype: dict
|
||||
'''
|
||||
|
||||
url = url.replace('%s', _quote(minion_id))
|
||||
|
||||
grain_pattern = r'<(?P<grain_name>.*?)>'
|
||||
|
||||
if with_grains:
|
||||
|
|
|
@ -27,6 +27,16 @@ in <> brackets) in the url in order to populate pillar data based on the grain v
|
|||
url: http://example.com/api/<nodename>
|
||||
with_grains: True
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
|
||||
If %s is present in the url, it will be automaticaly replaced by the minion_id:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- http_json:
|
||||
url: http://example.com/api/%s
|
||||
|
||||
Module Documentation
|
||||
====================
|
||||
'''
|
||||
|
@ -65,6 +75,9 @@ def ext_pillar(minion_id,
|
|||
:return: A dictionary of the pillar data to add.
|
||||
:rtype: dict
|
||||
'''
|
||||
|
||||
url = url.replace('%s', _quote(minion_id))
|
||||
|
||||
grain_pattern = r'<(?P<grain_name>.*?)>'
|
||||
|
||||
if with_grains:
|
||||
|
|
|
@ -67,6 +67,17 @@ provider: ``napalm_base``
|
|||
|
||||
.. versionadded:: 2017.7.1
|
||||
|
||||
multiprocessing: ``False``
|
||||
Overrides the :conf_minion:`multiprocessing` option, per proxy minion.
|
||||
The ``multiprocessing`` option must be turned off for SSH-based proxies.
|
||||
However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based.
|
||||
As multiple proxy minions may share the same configuration file,
|
||||
this option permits the configuration of the ``multiprocessing`` option
|
||||
more specifically, for some proxy minions.
|
||||
|
||||
.. versionadded:: 2017.7.2
|
||||
|
||||
|
||||
.. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems
|
||||
.. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments
|
||||
|
||||
|
|
103
salt/renderers/nacl.py
Normal file
103
salt/renderers/nacl.py
Normal file
|
@ -0,0 +1,103 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r'''
|
||||
Renderer that will decrypt NACL ciphers
|
||||
|
||||
Any key in the SLS file can be an NACL cipher, and this renderer will decrypt it
|
||||
before passing it off to Salt. This allows you to safely store secrets in
|
||||
source control, in such a way that only your Salt master can decrypt them and
|
||||
distribute them only to the minions that need them.
|
||||
|
||||
The typical use-case would be to use ciphers in your pillar data, and keep a
|
||||
secret key on your master. You can put the public key in source control so that
|
||||
developers can add new secrets quickly and easily.
|
||||
|
||||
This renderer requires the libsodium library binary and libnacl >= 1.5.1
|
||||
python package (support for sealed boxes came in 1.5.1 version).
|
||||
|
||||
|
||||
Setup
|
||||
-----
|
||||
|
||||
To set things up, first generate a keypair. On the master, run the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# salt-call --local nacl.keygen keyfile=/root/.nacl
|
||||
# salt-call --local nacl.keygen_pub keyfile_pub=/root/.nacl.pub
|
||||
|
||||
|
||||
Using encrypted pillar
|
||||
---------------------
|
||||
|
||||
To encrypt secrets, copy the public key to your local machine and run:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ salt-call --local nacl.enc_pub datatoenc keyfile_pub=/root/.nacl.pub
|
||||
|
||||
|
||||
To apply the renderer on a file-by-file basis add the following line to the
|
||||
top of any pillar with nacl encrypted data in it:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#!yaml|nacl
|
||||
|
||||
Now with your renderer configured, you can include your ciphers in your pillar
|
||||
data like so:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
#!yaml|nacl
|
||||
|
||||
a-secret: "NACL[MRN3cc+fmdxyQbz6WMF+jq1hKdU5X5BBI7OjK+atvHo1ll+w1gZ7XyWtZVfq9gK9rQaMfkDxmidJKwE0Mw==]"
|
||||
'''
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.stringio
|
||||
import salt.syspaths
|
||||
|
||||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
NACL_REGEX = r'^NACL\[(.*)\]$'
|
||||
|
||||
|
||||
def _decrypt_object(obj, **kwargs):
|
||||
'''
|
||||
Recursively try to decrypt any object. If the object is a six.string_types
|
||||
(string or unicode), and it contains a valid NACLENC pretext, decrypt it,
|
||||
otherwise keep going until a string is found.
|
||||
'''
|
||||
if salt.utils.stringio.is_readable(obj):
|
||||
return _decrypt_object(obj.getvalue(), **kwargs)
|
||||
if isinstance(obj, six.string_types):
|
||||
if re.search(NACL_REGEX, obj) is not None:
|
||||
return __salt__['nacl.dec_pub'](re.search(NACL_REGEX, obj).group(1), **kwargs)
|
||||
else:
|
||||
return obj
|
||||
elif isinstance(obj, dict):
|
||||
for key, value in six.iteritems(obj):
|
||||
obj[key] = _decrypt_object(value, **kwargs)
|
||||
return obj
|
||||
elif isinstance(obj, list):
|
||||
for key, value in enumerate(obj):
|
||||
obj[key] = _decrypt_object(value, **kwargs)
|
||||
return obj
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def render(nacl_data, saltenv='base', sls='', argline='', **kwargs):
|
||||
'''
|
||||
Decrypt the data to be rendered using the given nacl key or the one given
|
||||
in config
|
||||
'''
|
||||
return _decrypt_object(nacl_data, **kwargs)
|
|
@ -52,7 +52,6 @@ from salt.ext import six
|
|||
|
||||
try:
|
||||
from raven import Client
|
||||
from raven.transport.http import HTTPTransport
|
||||
|
||||
has_raven = True
|
||||
except ImportError:
|
||||
|
@ -130,7 +129,7 @@ def returner(ret):
|
|||
return
|
||||
|
||||
if raven_config.get('dsn'):
|
||||
client = Client(raven_config.get('dsn'), transport=HTTPTransport)
|
||||
client = Client(raven_config.get('dsn'))
|
||||
else:
|
||||
try:
|
||||
servers = []
|
||||
|
@ -140,8 +139,7 @@ def returner(ret):
|
|||
servers=servers,
|
||||
public_key=raven_config['public_key'],
|
||||
secret_key=raven_config['secret_key'],
|
||||
project=raven_config['project'],
|
||||
transport=HTTPTransport
|
||||
project=raven_config['project']
|
||||
)
|
||||
except KeyError as missing_key:
|
||||
logger.error(
|
||||
|
|
|
@ -17,16 +17,28 @@ import salt.netapi
|
|||
|
||||
|
||||
def mk_token(**load):
|
||||
'''
|
||||
r'''
|
||||
Create an eauth token using provided credentials
|
||||
|
||||
Non-root users may specify an expiration date -- if allowed via the
|
||||
:conf_master:`token_expire_user_override` setting -- by passing an
|
||||
additional ``token_expire`` param. This overrides the
|
||||
:conf_master:`token_expire` setting of the same name in the Master config
|
||||
and is how long a token should live in seconds.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
salt-run auth.mk_token username=saltdev password=saltdev eauth=auto
|
||||
salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \\
|
||||
|
||||
# Create a token valid for three years.
|
||||
salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \
|
||||
token_expire=94670856
|
||||
|
||||
# Calculate the number of seconds using expr.
|
||||
salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \
|
||||
token_expire=$(expr \( 365 \* 24 \* 60 \* 60 \) \* 3)
|
||||
'''
|
||||
# This will hang if the master daemon is not running.
|
||||
netapi = salt.netapi.NetapiClient(__opts__)
|
||||
|
|
|
@ -11,7 +11,6 @@ import logging
|
|||
import salt.pillar.git_pillar
|
||||
import salt.utils.gitfs
|
||||
from salt.exceptions import SaltRunnerError
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -21,18 +20,13 @@ def update(branch=None, repo=None):
|
|||
.. versionadded:: 2014.1.0
|
||||
|
||||
.. versionchanged:: 2015.8.4
|
||||
This runner function now supports the :ref:`new git_pillar
|
||||
configuration schema <git-pillar-2015-8-0-and-later>` introduced in
|
||||
This runner function now supports the :ref:`git_pillar
|
||||
configuration schema <git-pillar-configuration>` introduced in
|
||||
2015.8.0. Additionally, the branch and repo can now be omitted to
|
||||
update all git_pillar remotes. The return data has also changed. For
|
||||
releases 2015.8.3 and earlier, there is no value returned. Starting
|
||||
with 2015.8.4, the return data is a dictionary. If using the :ref:`old
|
||||
git_pillar configuration schema <git-pillar-pre-2015-8-0>`, then the
|
||||
dictionary values will be ``True`` if the update completed without
|
||||
error, and ``False`` if an error occurred. If using the :ref:`new
|
||||
git_pillar configuration schema <git-pillar-2015-8-0-and-later>`, the
|
||||
values will be ``True`` only if new commits were fetched, and ``False``
|
||||
if there were errors or no new commits were fetched.
|
||||
update all git_pillar remotes. The return data has also changed to
|
||||
a dictionary. The values will be ``True`` only if new commits were
|
||||
fetched, and ``False`` if there were errors or no new commits were
|
||||
fetched.
|
||||
|
||||
Fetch one or all configured git_pillar remotes.
|
||||
|
||||
|
@ -56,7 +50,7 @@ def update(branch=None, repo=None):
|
|||
|
||||
# Update specific branch and repo
|
||||
salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git'
|
||||
# Update all repos (2015.8.4 and later)
|
||||
# Update all repos
|
||||
salt-run git_pillar.update
|
||||
# Run with debug logging
|
||||
salt-run git_pillar.update -l debug
|
||||
|
@ -67,47 +61,30 @@ def update(branch=None, repo=None):
|
|||
if pillar_type != 'git':
|
||||
continue
|
||||
pillar_conf = ext_pillar[pillar_type]
|
||||
if isinstance(pillar_conf, six.string_types):
|
||||
parts = pillar_conf.split()
|
||||
if len(parts) >= 2:
|
||||
desired_branch, desired_repo = parts[:2]
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != desired_branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != desired_repo:
|
||||
continue
|
||||
ret[pillar_conf] = salt.pillar.git_pillar._LegacyGitPillar(
|
||||
parts[0],
|
||||
parts[1],
|
||||
__opts__).update()
|
||||
|
||||
else:
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != remote.branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != remote.url:
|
||||
continue
|
||||
try:
|
||||
result = remote.fetch()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching git_pillar '
|
||||
'remote \'{1}\''.format(exc, remote.id),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
result = False
|
||||
finally:
|
||||
remote.clear_lock()
|
||||
ret[remote.id] = result
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
if branch != remote.branch:
|
||||
continue
|
||||
if repo is not None:
|
||||
if repo != remote.url:
|
||||
continue
|
||||
try:
|
||||
result = remote.fetch()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching git_pillar '
|
||||
'remote \'{1}\''.format(exc, remote.id),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
result = False
|
||||
finally:
|
||||
remote.clear_lock()
|
||||
ret[remote.id] = result
|
||||
|
||||
if not ret:
|
||||
if branch is not None or repo is not None:
|
||||
|
|
|
@ -149,10 +149,14 @@ Optional small program to encrypt data without needing salt modules.
|
|||
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import base64
|
||||
import os
|
||||
import salt.utils
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
import salt.utils.win_functions
|
||||
import salt.utils.win_dacl
|
||||
import salt.syspaths
|
||||
|
@ -203,7 +207,7 @@ def _get_sk(**kwargs):
|
|||
key = config['sk']
|
||||
sk_file = config['sk_file']
|
||||
if not key and sk_file:
|
||||
with salt.utils.fopen(sk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
|
||||
key = str(keyf.read()).rstrip('\n')
|
||||
if key is None:
|
||||
raise Exception('no key or sk_file found')
|
||||
|
@ -218,7 +222,7 @@ def _get_pk(**kwargs):
|
|||
pubkey = config['pk']
|
||||
pk_file = config['pk_file']
|
||||
if not pubkey and pk_file:
|
||||
with salt.utils.fopen(pk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'rb') as keyf:
|
||||
pubkey = str(keyf.read()).rstrip('\n')
|
||||
if pubkey is None:
|
||||
raise Exception('no pubkey or pk_file found')
|
||||
|
@ -256,9 +260,9 @@ def keygen(sk_file=None, pk_file=None):
|
|||
if sk_file and pk_file is None:
|
||||
if not os.path.isfile(sk_file):
|
||||
kp = libnacl.public.SecretKey()
|
||||
with salt.utils.fopen(sk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.sk))
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.platform.is_windows():
|
||||
cur_user = salt.utils.win_functions.get_current_user()
|
||||
salt.utils.win_dacl.set_owner(sk_file, cur_user)
|
||||
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
|
||||
|
@ -277,25 +281,25 @@ def keygen(sk_file=None, pk_file=None):
|
|||
|
||||
if os.path.isfile(sk_file) and not os.path.isfile(pk_file):
|
||||
# generate pk using the sk
|
||||
with salt.utils.fopen(sk_file, 'rb') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'rb') as keyf:
|
||||
sk = str(keyf.read()).rstrip('\n')
|
||||
sk = base64.b64decode(sk)
|
||||
kp = libnacl.public.SecretKey(sk)
|
||||
with salt.utils.fopen(pk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.pk))
|
||||
return 'saved pk_file: {0}'.format(pk_file)
|
||||
|
||||
kp = libnacl.public.SecretKey()
|
||||
with salt.utils.fopen(sk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(sk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.sk))
|
||||
if salt.utils.is_windows():
|
||||
if salt.utils.platform.is_windows():
|
||||
cur_user = salt.utils.win_functions.get_current_user()
|
||||
salt.utils.win_dacl.set_owner(sk_file, cur_user)
|
||||
salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True)
|
||||
else:
|
||||
# chmod 0600 file
|
||||
os.chmod(sk_file, 1536)
|
||||
with salt.utils.fopen(pk_file, 'w') as keyf:
|
||||
with salt.utils.files.fopen(pk_file, 'w') as keyf:
|
||||
keyf.write(base64.b64encode(kp.pk))
|
||||
return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file)
|
||||
|
||||
|
@ -335,13 +339,13 @@ def enc_file(name, out=None, **kwargs):
|
|||
data = __salt__['cp.get_file_str'](name)
|
||||
except Exception as e:
|
||||
# likly using salt-run so fallback to local filesystem
|
||||
with salt.utils.fopen(name, 'rb') as f:
|
||||
with salt.utils.files.fopen(name, 'rb') as f:
|
||||
data = f.read()
|
||||
d = enc(data, **kwargs)
|
||||
if out:
|
||||
if os.path.isfile(out):
|
||||
raise Exception('file:{0} already exist.'.format(out))
|
||||
with salt.utils.fopen(out, 'wb') as f:
|
||||
with salt.utils.files.fopen(out, 'wb') as f:
|
||||
f.write(d)
|
||||
return 'Wrote: {0}'.format(out)
|
||||
return d
|
||||
|
@ -382,13 +386,13 @@ def dec_file(name, out=None, **kwargs):
|
|||
data = __salt__['cp.get_file_str'](name)
|
||||
except Exception as e:
|
||||
# likly using salt-run so fallback to local filesystem
|
||||
with salt.utils.fopen(name, 'rb') as f:
|
||||
with salt.utils.files.fopen(name, 'rb') as f:
|
||||
data = f.read()
|
||||
d = dec(data, **kwargs)
|
||||
if out:
|
||||
if os.path.isfile(out):
|
||||
raise Exception('file:{0} already exist.'.format(out))
|
||||
with salt.utils.fopen(out, 'wb') as f:
|
||||
with salt.utils.files.fopen(out, 'wb') as f:
|
||||
f.write(d)
|
||||
return 'Wrote: {0}'.format(out)
|
||||
return d
|
||||
|
|
|
@ -59,6 +59,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
|||
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -524,3 +525,29 @@ def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
|||
'''
|
||||
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
|
||||
extmod_blacklist=extmod_blacklist)[0]
|
||||
|
||||
|
||||
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
|
||||
'''
|
||||
.. versionadded:: 2017.7.2
|
||||
|
||||
Sync eauth token modules from ``salt://_tokens`` to the master
|
||||
|
||||
saltenv : base
|
||||
The fileserver environment from which to sync. To sync from more than
|
||||
one environment, pass a comma-separated list.
|
||||
|
||||
extmod_whitelist : None
|
||||
comma-seperated list of modules to sync
|
||||
|
||||
extmod_blacklist : None
|
||||
comma-seperated list of modules to blacklist based on type
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-run saltutil.sync_eauth_tokens
|
||||
'''
|
||||
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
|
||||
extmod_blacklist=extmod_blacklist)[0]
|
||||
|
|
|
@ -2127,11 +2127,14 @@ class State(object):
|
|||
reqs[r_state].append(chunk)
|
||||
continue
|
||||
try:
|
||||
if (fnmatch.fnmatch(chunk[u'name'], req_val) or
|
||||
fnmatch.fnmatch(chunk[u'__id__'], req_val)):
|
||||
if req_key == u'id' or chunk[u'state'] == req_key:
|
||||
found = True
|
||||
reqs[r_state].append(chunk)
|
||||
if isinstance(req_val, six.string_types):
|
||||
if (fnmatch.fnmatch(chunk[u'name'], req_val) or
|
||||
fnmatch.fnmatch(chunk[u'__id__'], req_val)):
|
||||
if req_key == u'id' or chunk[u'state'] == req_key:
|
||||
found = True
|
||||
reqs[r_state].append(chunk)
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError as exc:
|
||||
raise SaltRenderError(
|
||||
u'Could not locate requisite of [{0}] present in state with name [{1}]'.format(
|
||||
|
@ -2309,13 +2312,17 @@ class State(object):
|
|||
req_val = lreq[req_key]
|
||||
comment += \
|
||||
u'{0}{1}: {2}\n'.format(u' ' * 23, req_key, req_val)
|
||||
running[tag] = {u'changes': {},
|
||||
u'result': False,
|
||||
u'comment': comment,
|
||||
u'__run_num__': self.__run_num,
|
||||
u'__sls__': low[u'__sls__']}
|
||||
if low.get('__prereq__'):
|
||||
run_dict = self.pre
|
||||
else:
|
||||
run_dict = running
|
||||
run_dict[tag] = {u'changes': {},
|
||||
u'result': False,
|
||||
u'comment': comment,
|
||||
u'__run_num__': self.__run_num,
|
||||
u'__sls__': low[u'__sls__']}
|
||||
self.__run_num += 1
|
||||
self.event(running[tag], len(chunks), fire_event=low.get(u'fire_event'))
|
||||
self.event(run_dict[tag], len(chunks), fire_event=low.get(u'fire_event'))
|
||||
return running
|
||||
for chunk in reqs:
|
||||
# Check to see if the chunk has been run, only run it if
|
||||
|
|
|
@ -910,7 +910,7 @@ def group_present(name, policies=None, policies_from_pillars=None, managed_polic
|
|||
in the policies argument will override the keys defined in
|
||||
policies_from_pillars.
|
||||
|
||||
manaaged_policies (list)
|
||||
managed_policies (list)
|
||||
A list of policy names or ARNs that should be attached to this group.
|
||||
|
||||
users (list)
|
||||
|
|
|
@ -144,8 +144,7 @@ def present(name,
|
|||
ret['changes']['created'] = __salt__['docker.create_network'](
|
||||
name,
|
||||
driver=driver,
|
||||
driver_opts=driver_opts,
|
||||
check_duplicate=True)
|
||||
driver_opts=driver_opts)
|
||||
|
||||
except Exception as exc:
|
||||
ret['comment'] = ('Failed to create network \'{0}\': {1}'
|
||||
|
|
|
@ -41,6 +41,19 @@ or clusters are available.
|
|||
as this makes all master configuration settings available in all minion's
|
||||
pillars.
|
||||
|
||||
Etcd profile configuration can be overriden using following arguments: ``host``,
|
||||
``port``, ``username``, ``password``, ``ca``, ``client_key`` and ``client_cert``.
|
||||
|
||||
.. code-block:: yaml
|
||||
my-value:
|
||||
etcd.set:
|
||||
- name: /path/to/key
|
||||
- value: value
|
||||
- host: 127.0.0.1
|
||||
- port: 2379
|
||||
- username: user
|
||||
- password: pass
|
||||
|
||||
Available Functions
|
||||
-------------------
|
||||
|
||||
|
@ -132,7 +145,7 @@ def __virtual__():
|
|||
return __virtualname__ if HAS_ETCD else False
|
||||
|
||||
|
||||
def set_(name, value, profile=None):
|
||||
def set_(name, value, profile=None, **kwargs):
|
||||
'''
|
||||
Set a key in etcd and can be called as ``set``.
|
||||
|
||||
|
@ -161,11 +174,11 @@ def set_(name, value, profile=None):
|
|||
'changes': {}
|
||||
}
|
||||
|
||||
current = __salt__['etcd.get'](name, profile=profile)
|
||||
current = __salt__['etcd.get'](name, profile=profile, **kwargs)
|
||||
if not current:
|
||||
created = True
|
||||
|
||||
result = __salt__['etcd.set'](name, value, profile=profile)
|
||||
result = __salt__['etcd.set'](name, value, profile=profile, **kwargs)
|
||||
|
||||
if result and result != current:
|
||||
if created:
|
||||
|
@ -179,7 +192,7 @@ def set_(name, value, profile=None):
|
|||
return rtn
|
||||
|
||||
|
||||
def wait_set(name, value, profile=None):
|
||||
def wait_set(name, value, profile=None, **kwargs):
|
||||
'''
|
||||
Set a key in etcd only if the watch statement calls it. This function is
|
||||
also aliased as ``wait_set``.
|
||||
|
@ -208,7 +221,7 @@ def wait_set(name, value, profile=None):
|
|||
}
|
||||
|
||||
|
||||
def directory(name, profile=None):
|
||||
def directory(name, profile=None, **kwargs):
|
||||
'''
|
||||
Create a directory in etcd.
|
||||
|
||||
|
@ -234,11 +247,11 @@ def directory(name, profile=None):
|
|||
'changes': {}
|
||||
}
|
||||
|
||||
current = __salt__['etcd.get'](name, profile=profile, recurse=True)
|
||||
current = __salt__['etcd.get'](name, profile=profile, recurse=True, **kwargs)
|
||||
if not current:
|
||||
created = True
|
||||
|
||||
result = __salt__['etcd.set'](name, None, directory=True, profile=profile)
|
||||
result = __salt__['etcd.set'](name, None, directory=True, profile=profile, **kwargs)
|
||||
|
||||
if result and result != current:
|
||||
if created:
|
||||
|
@ -250,7 +263,7 @@ def directory(name, profile=None):
|
|||
return rtn
|
||||
|
||||
|
||||
def rm_(name, recurse=False, profile=None):
|
||||
def rm_(name, recurse=False, profile=None, **kwargs):
|
||||
'''
|
||||
Deletes a key from etcd. This function is also aliased as ``rm``.
|
||||
|
||||
|
@ -275,11 +288,11 @@ def rm_(name, recurse=False, profile=None):
|
|||
'changes': {}
|
||||
}
|
||||
|
||||
if not __salt__['etcd.get'](name, profile=profile):
|
||||
if not __salt__['etcd.get'](name, profile=profile, **kwargs):
|
||||
rtn['comment'] = 'Key does not exist'
|
||||
return rtn
|
||||
|
||||
if __salt__['etcd.rm'](name, recurse=recurse, profile=profile):
|
||||
if __salt__['etcd.rm'](name, recurse=recurse, profile=profile, **kwargs):
|
||||
rtn['comment'] = 'Key removed'
|
||||
rtn['changes'] = {
|
||||
name: 'Deleted'
|
||||
|
@ -290,7 +303,7 @@ def rm_(name, recurse=False, profile=None):
|
|||
return rtn
|
||||
|
||||
|
||||
def wait_rm(name, recurse=False, profile=None):
|
||||
def wait_rm(name, recurse=False, profile=None, **kwargs):
|
||||
'''
|
||||
Deletes a key from etcd only if the watch statement calls it.
|
||||
This function is also aliased as ``wait_rm``.
|
||||
|
|
|
@ -1458,8 +1458,6 @@ def latest(name,
|
|||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True):
|
||||
merge_rev = remote_rev if rev == 'HEAD' \
|
||||
else desired_upstream
|
||||
|
||||
if git_ver >= _LooseVersion('1.8.1.6'):
|
||||
# --ff-only added in version 1.8.1.6. It's not
|
||||
|
@ -1476,7 +1474,7 @@ def latest(name,
|
|||
|
||||
__salt__['git.merge'](
|
||||
target,
|
||||
rev=merge_rev,
|
||||
rev=remote_rev,
|
||||
opts=merge_opts,
|
||||
user=user,
|
||||
password=password)
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
r'''
|
||||
Management of user groups
|
||||
=========================
|
||||
|
||||
The group module is used to create and manage unix group settings, groups
|
||||
can be either present or absent:
|
||||
The group module is used to create and manage group settings, groups can be
|
||||
either present or absent. User/Group names can be passed to the ``adduser``,
|
||||
``deluser``, and ``members`` parameters. ``adduser`` and ``deluser`` can be used
|
||||
together but not with ``members``.
|
||||
|
||||
In Windows, if no domain is specified in the user or group name (ie:
|
||||
`DOMAIN\username``) the module will assume a local user or group.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -36,6 +41,10 @@ import sys
|
|||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
import salt.utils.win_functions
|
||||
|
||||
|
||||
def _changes(name,
|
||||
gid=None,
|
||||
|
@ -50,6 +59,18 @@ def _changes(name,
|
|||
if not lgrp:
|
||||
return False
|
||||
|
||||
# User and Domain names are not case sensitive in Windows. Let's make them
|
||||
# all lower case so we can compare properly
|
||||
if salt.utils.is_windows():
|
||||
if lgrp['members']:
|
||||
lgrp['members'] = [user.lower() for user in lgrp['members']]
|
||||
if members:
|
||||
members = [salt.utils.win_functions.get_sam_name(user) for user in members]
|
||||
if addusers:
|
||||
addusers = [salt.utils.win_functions.get_sam_name(user) for user in addusers]
|
||||
if delusers:
|
||||
delusers = [salt.utils.win_functions.get_sam_name(user) for user in delusers]
|
||||
|
||||
change = {}
|
||||
if gid:
|
||||
if lgrp['gid'] != gid:
|
||||
|
@ -57,7 +78,7 @@ def _changes(name,
|
|||
|
||||
if members:
|
||||
# -- if new member list if different than the current
|
||||
if set(lgrp['members']) ^ set(members):
|
||||
if set(lgrp['members']).symmetric_difference(members):
|
||||
change['members'] = members
|
||||
|
||||
if addusers:
|
||||
|
@ -79,31 +100,58 @@ def present(name,
|
|||
addusers=None,
|
||||
delusers=None,
|
||||
members=None):
|
||||
'''
|
||||
r'''
|
||||
Ensure that a group is present
|
||||
|
||||
name
|
||||
The name of the group to manage
|
||||
Args:
|
||||
|
||||
gid
|
||||
The group id to assign to the named group; if left empty, then the next
|
||||
available group id will be assigned
|
||||
name (str):
|
||||
The name of the group to manage
|
||||
|
||||
system
|
||||
Whether or not the named group is a system group. This is essentially
|
||||
the '-r' option of 'groupadd'.
|
||||
gid (str):
|
||||
The group id to assign to the named group; if left empty, then the
|
||||
next available group id will be assigned. Ignored on Windows
|
||||
|
||||
addusers
|
||||
List of additional users to be added as a group members.
|
||||
system (bool):
|
||||
Whether or not the named group is a system group. This is essentially
|
||||
the '-r' option of 'groupadd'. Ignored on Windows
|
||||
|
||||
delusers
|
||||
Ensure these user are removed from the group membership.
|
||||
addusers (list):
|
||||
List of additional users to be added as a group members. Cannot
|
||||
conflict with names in delusers. Cannot be used in conjunction with
|
||||
members.
|
||||
|
||||
members
|
||||
Replace existing group members with a list of new members.
|
||||
delusers (list):
|
||||
Ensure these user are removed from the group membership. Cannot
|
||||
conflict with names in addusers. Cannot be used in conjunction with
|
||||
members.
|
||||
|
||||
Note: Options 'members' and 'addusers/delusers' are mutually exclusive and
|
||||
can not be used together.
|
||||
members (list):
|
||||
Replace existing group members with a list of new members. Cannot be
|
||||
used in conjunction with addusers or delusers.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Adds DOMAIN\db_admins and Administrators to the local db_admin group
|
||||
# Removes Users
|
||||
db_admin:
|
||||
group.present:
|
||||
- addusers:
|
||||
- DOMAIN\db_admins
|
||||
- Administrators
|
||||
- delusers:
|
||||
- Users
|
||||
|
||||
# Ensures only DOMAIN\domain_admins and the local Administrator are
|
||||
# members of the local Administrators group. All other users are
|
||||
# removed
|
||||
Administrators:
|
||||
group.present:
|
||||
- members:
|
||||
- DOMAIN\domain_admins
|
||||
- Administrator
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
|
@ -233,8 +281,17 @@ def absent(name):
|
|||
'''
|
||||
Ensure that the named group is absent
|
||||
|
||||
name
|
||||
The name of the group to remove
|
||||
Args:
|
||||
name (str):
|
||||
The name of the group to remove
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Removes the local group `db_admin`
|
||||
db_admin:
|
||||
group.absent
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
|
|
|
@ -15,6 +15,7 @@ import logging
|
|||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import zip
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils.files
|
||||
|
||||
# Import XML parser
|
||||
|
@ -36,17 +37,23 @@ def _elements_equal(e1, e2):
|
|||
return all(_elements_equal(c1, c2) for c1, c2 in zip(e1, e2))
|
||||
|
||||
|
||||
def _fail(ret, msg):
|
||||
ret['comment'] = msg
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
|
||||
def present(name,
|
||||
config=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Ensure the job is present in the Jenkins
|
||||
configured jobs
|
||||
Ensure the job is present in the Jenkins configured jobs
|
||||
|
||||
name
|
||||
The unique name for the Jenkins job
|
||||
|
||||
config
|
||||
The Salt URL for the file to use for
|
||||
configuring the job.
|
||||
The Salt URL for the file to use for configuring the job
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
|
@ -54,9 +61,7 @@ def present(name,
|
|||
'changes': {},
|
||||
'comment': ['Job {0} is up to date.'.format(name)]}
|
||||
|
||||
_job_exists = __salt__['jenkins.job_exists'](name)
|
||||
|
||||
if _job_exists:
|
||||
if __salt__['jenkins.job_exists'](name):
|
||||
_current_job_config = __salt__['jenkins.get_job_config'](name)
|
||||
buf = six.moves.StringIO(_current_job_config)
|
||||
oldXML = ET.fromstring(buf.read())
|
||||
|
@ -68,21 +73,28 @@ def present(name,
|
|||
diff = difflib.unified_diff(
|
||||
ET.tostringlist(oldXML, encoding='utf8', method='xml'),
|
||||
ET.tostringlist(newXML, encoding='utf8', method='xml'), lineterm='')
|
||||
__salt__['jenkins.update_job'](name, config, __env__)
|
||||
ret['changes'][name] = ''.join(diff)
|
||||
ret['comment'].append('Job {0} updated.'.format(name))
|
||||
try:
|
||||
__salt__['jenkins.update_job'](name, config, __env__)
|
||||
except CommandExecutionError as exc:
|
||||
return _fail(ret, exc.strerror)
|
||||
else:
|
||||
ret['changes'] = ''.join(diff)
|
||||
ret['comment'].append('Job \'{0}\' updated.'.format(name))
|
||||
|
||||
else:
|
||||
cached_source_path = __salt__['cp.cache_file'](config, __env__)
|
||||
with salt.utils.files.fopen(cached_source_path) as _fp:
|
||||
new_config_xml = _fp.read()
|
||||
|
||||
__salt__['jenkins.create_job'](name, config, __env__)
|
||||
try:
|
||||
__salt__['jenkins.create_job'](name, config, __env__)
|
||||
except CommandExecutionError as exc:
|
||||
return _fail(ret, exc.strerror)
|
||||
|
||||
buf = six.moves.StringIO(new_config_xml)
|
||||
diff = difflib.unified_diff('', buf.readlines(), lineterm='')
|
||||
ret['changes'][name] = ''.join(diff)
|
||||
ret['comment'].append('Job {0} added.'.format(name))
|
||||
ret['comment'].append('Job \'{0}\' added.'.format(name))
|
||||
|
||||
ret['comment'] = '\n'.join(ret['comment'])
|
||||
return ret
|
||||
|
@ -91,24 +103,23 @@ def present(name,
|
|||
def absent(name,
|
||||
**kwargs):
|
||||
'''
|
||||
Ensure the job is present in the Jenkins
|
||||
configured jobs
|
||||
Ensure the job is absent from the Jenkins configured jobs
|
||||
|
||||
name
|
||||
The name of the Jenkins job to remove.
|
||||
|
||||
The name of the Jenkins job to remove
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'comment': []}
|
||||
|
||||
_job_exists = __salt__['jenkins.job_exists'](name)
|
||||
|
||||
if _job_exists:
|
||||
__salt__['jenkins.delete_job'](name)
|
||||
ret['comment'] = 'Job {0} deleted.'.format(name)
|
||||
if __salt__['jenkins.job_exists'](name):
|
||||
try:
|
||||
__salt__['jenkins.delete_job'](name)
|
||||
except CommandExecutionError as exc:
|
||||
return _fail(ret, exc.strerror)
|
||||
else:
|
||||
ret['comment'] = 'Job \'{0}\' deleted.'.format(name)
|
||||
else:
|
||||
ret['comment'] = 'Job {0} already absent.'.format(name)
|
||||
ret['comment'] = 'Job \'{0}\' already absent.'.format(name)
|
||||
return ret
|
||||
|
|
|
@ -52,7 +52,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only make these states available if a pkg provider has been detected or
|
||||
Only make these states available if a kernelpkg provider has been detected or
|
||||
assigned for this minion
|
||||
'''
|
||||
return 'kernelpkg.upgrade' in __salt__
|
||||
|
@ -67,8 +67,8 @@ def latest_installed(name, **kwargs): # pylint: disable=unused-argument
|
|||
|
||||
This state only installs the kernel, but does not activate it.
|
||||
The new kernel should become active at the next reboot.
|
||||
See :mod:`kernelpkg.needs_reboot <salt.modules.kernelpkg.needs_reboot>` for details on
|
||||
how to detect this condition, :mod:`kernelpkg.latest_active <salt.states.kernelpkg.latest_active>`
|
||||
See :py:func:`kernelpkg.needs_reboot <salt.modules.kernelpkg_linux_yum.needs_reboot>` for details on
|
||||
how to detect this condition, and :py:func:`~salt.states.kernelpkg.latest_active`
|
||||
to initiale a reboot when needed.
|
||||
|
||||
name
|
||||
|
@ -113,8 +113,9 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum
|
|||
system. If the running version is not the latest one installed, this
|
||||
state will reboot the system.
|
||||
|
||||
See :mod:`kernelpkg.upgrade <salt.modules.kernelpkg.upgrade>` and
|
||||
:mod:`kernelpkg.latest_installed <salt.states.kernelpkg.latest_installed>` for ways to install new kernel packages.
|
||||
See :py:func:`kernelpkg.upgrade <salt.modules.kernelpkg_linux_yum.upgrade>` and
|
||||
:py:func:`~salt.states.kernelpkg.latest_installed`
|
||||
for ways to install new kernel packages.
|
||||
|
||||
This module does not attempt to understand or manage boot loader configurations
|
||||
it is possible to have a new kernel installed, but a boot loader configuration
|
||||
|
@ -122,7 +123,8 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum
|
|||
schedule this state to run automatically.
|
||||
|
||||
Because this state function may cause the system to reboot, it may be preferable
|
||||
to move it to the very end of the state run. See :mod:`kernelpkg.latest_wait <salt.states.kernelpkg.latest_wait>`
|
||||
to move it to the very end of the state run.
|
||||
See :py:func:`~salt.states.kernelpkg.latest_wait`
|
||||
for a waitable state that can be called with the `listen` requesite.
|
||||
|
||||
name
|
||||
|
@ -168,7 +170,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum
|
|||
def latest_wait(name, at_time=None, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Initiate a reboot if the running kernel is not the latest one installed. This is the
|
||||
waitable version of :mod:`kernelpkg.latest_active <salt.states.kernelpkg.latest_active>` and
|
||||
waitable version of :py:func:`~salt.states.kernelpkg.latest_active` and
|
||||
will not take any action unless triggered by a watch or listen requesite.
|
||||
|
||||
.. note::
|
||||
|
|
96
salt/states/mssql_database.py
Normal file
96
salt/states/mssql_database.py
Normal file
|
@ -0,0 +1,96 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Microsoft SQLServer Databases
|
||||
===========================================
|
||||
|
||||
The mssql_database module is used to create
|
||||
and manage SQL Server Databases
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
yolo:
|
||||
mssql_database.present
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import collections
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the mssql module is present
|
||||
'''
|
||||
return 'mssql.version' in __salt__
|
||||
|
||||
|
||||
def _normalize_options(options):
|
||||
if type(options) in [dict, collections.OrderedDict]:
|
||||
return ['{0}={1}'.format(k, v) for k, v in options.items()]
|
||||
if type(options) is list and (not len(options) or type(options[0]) is str):
|
||||
return options
|
||||
# Invalid options
|
||||
if type(options) is not list or type(options[0]) not in [dict, collections.OrderedDict]:
|
||||
return []
|
||||
return [o for d in options for o in _normalize_options(d)]
|
||||
|
||||
|
||||
def present(name, containment='NONE', options=None, **kwargs):
|
||||
'''
|
||||
Ensure that the named database is present with the specified options
|
||||
|
||||
name
|
||||
The name of the database to manage
|
||||
containment
|
||||
Defaults to NONE
|
||||
options
|
||||
Can be a list of strings, a dictionary, or a list of dictionaries
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if __salt__['mssql.db_exists'](name, **kwargs):
|
||||
ret['comment'] = 'Database {0} is already present (Not going to try to set its options)'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Database {0} is set to be added'.format(name)
|
||||
return ret
|
||||
|
||||
db_created = __salt__['mssql.db_create'](name, containment=containment, new_database_options=_normalize_options(options), **kwargs)
|
||||
if db_created is not True: # Non-empty strings are also evaluated to True, so we cannot use if not db_created:
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Database {0} failed to be created: {1}'.format(name, db_created)
|
||||
return ret
|
||||
ret['comment'] += 'Database {0} has been added'.format(name)
|
||||
ret['changes'][name] = 'Present'
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, **kwargs):
|
||||
'''
|
||||
Ensure that the named database is absent
|
||||
|
||||
name
|
||||
The name of the database to remove
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if not __salt__['mssql.db_exists'](name):
|
||||
ret['comment'] = 'Database {0} is not present'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Database {0} is set to be removed'.format(name)
|
||||
return ret
|
||||
if __salt__['mssql.db_remove'](name, **kwargs):
|
||||
ret['comment'] = 'Database {0} has been removed'.format(name)
|
||||
ret['changes'][name] = 'Absent'
|
||||
return ret
|
||||
# else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Database {0} failed to be removed'.format(name)
|
||||
return ret
|
116
salt/states/mssql_login.py
Normal file
116
salt/states/mssql_login.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Microsoft SQLServer Logins
|
||||
========================================
|
||||
|
||||
The mssql_login module is used to create
|
||||
and manage SQL Server Logins
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
frank:
|
||||
mssql_login.present
|
||||
- domain: mydomain
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import collections
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the mssql module is present
|
||||
'''
|
||||
return 'mssql.version' in __salt__
|
||||
|
||||
|
||||
def _normalize_options(options):
|
||||
if type(options) in [dict, collections.OrderedDict]:
|
||||
return ['{0}={1}'.format(k, v) for k, v in options.items()]
|
||||
if type(options) is list and (not len(options) or type(options[0]) is str):
|
||||
return options
|
||||
# Invalid options
|
||||
if type(options) is not list or type(options[0]) not in [dict, collections.OrderedDict]:
|
||||
return []
|
||||
return [o for d in options for o in _normalize_options(d)]
|
||||
|
||||
|
||||
def present(name, password=None, domain=None, server_roles=None, options=None, **kwargs):
|
||||
'''
|
||||
Checks existance of the named login.
|
||||
If not present, creates the login with the specified roles and options.
|
||||
|
||||
name
|
||||
The name of the login to manage
|
||||
password
|
||||
Creates a SQL Server authentication login
|
||||
Since hashed passwords are varbinary values, if the
|
||||
new_login_password is 'long', it will be considered
|
||||
to be HASHED.
|
||||
domain
|
||||
Creates a Windows authentication login.
|
||||
Needs to be NetBIOS domain or hostname
|
||||
server_roles
|
||||
Add this login to all the server roles in the list
|
||||
options
|
||||
Can be a list of strings, a dictionary, or a list of dictionaries
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if bool(password) == bool(domain):
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'One and only one of password and domain should be specifies'
|
||||
return ret
|
||||
if __salt__['mssql.login_exists'](name, domain=domain, **kwargs):
|
||||
ret['comment'] = 'Login {0} is already present (Not going to try to set its password)'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Login {0} is set to be added'.format(name)
|
||||
return ret
|
||||
|
||||
login_created = __salt__['mssql.login_create'](name,
|
||||
new_login_password=password,
|
||||
new_login_domain=domain,
|
||||
new_login_roles=server_roles,
|
||||
new_login_options=_normalize_options(options),
|
||||
**kwargs)
|
||||
# Non-empty strings are also evaluated to True, so we cannot use if not login_created:
|
||||
if login_created is not True:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Login {0} failed to be added: {1}'.format(name, login_created)
|
||||
return ret
|
||||
ret['comment'] = 'Login {0} has been added. '.format(name)
|
||||
ret['changes'][name] = 'Present'
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, **kwargs):
|
||||
'''
|
||||
Ensure that the named login is absent
|
||||
|
||||
name
|
||||
The name of the login to remove
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if not __salt__['mssql.login_exists'](name):
|
||||
ret['comment'] = 'Login {0} is not present'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Login {0} is set to be removed'.format(name)
|
||||
return ret
|
||||
if __salt__['mssql.login_remove'](name, **kwargs):
|
||||
ret['comment'] = 'Login {0} has been removed'.format(name)
|
||||
ret['changes'][name] = 'Absent'
|
||||
return ret
|
||||
# else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Login {0} failed to be removed'.format(name)
|
||||
return ret
|
84
salt/states/mssql_role.py
Normal file
84
salt/states/mssql_role.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Microsoft SQLServer Databases
|
||||
===========================================
|
||||
|
||||
The mssql_role module is used to create
|
||||
and manage SQL Server Roles
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
yolo:
|
||||
mssql_role.present
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the mssql module is present
|
||||
'''
|
||||
return 'mssql.version' in __salt__
|
||||
|
||||
|
||||
def present(name, owner=None, grants=None, **kwargs):
|
||||
'''
|
||||
Ensure that the named database is present with the specified options
|
||||
|
||||
name
|
||||
The name of the database to manage
|
||||
owner
|
||||
Adds owner using AUTHORIZATION option
|
||||
Grants
|
||||
Can only be a list of strings
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if __salt__['mssql.role_exists'](name, **kwargs):
|
||||
ret['comment'] = 'Role {0} is already present (Not going to try to set its grants)'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Role {0} is set to be added'.format(name)
|
||||
return ret
|
||||
|
||||
role_created = __salt__['mssql.role_create'](name, owner=owner, grants=grants, **kwargs)
|
||||
if role_created is not True: # Non-empty strings are also evaluated to True, so we cannot use if not role_created:
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'Role {0} failed to be created: {1}'.format(name, role_created)
|
||||
return ret
|
||||
ret['comment'] += 'Role {0} has been added'.format(name)
|
||||
ret['changes'][name] = 'Present'
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, **kwargs):
|
||||
'''
|
||||
Ensure that the named database is absent
|
||||
|
||||
name
|
||||
The name of the database to remove
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if not __salt__['mssql.role_exists'](name):
|
||||
ret['comment'] = 'Role {0} is not present'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Role {0} is set to be removed'.format(name)
|
||||
return ret
|
||||
if __salt__['mssql.role_remove'](name, **kwargs):
|
||||
ret['comment'] = 'Role {0} has been removed'.format(name)
|
||||
ret['changes'][name] = 'Absent'
|
||||
return ret
|
||||
# else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Role {0} failed to be removed'.format(name)
|
||||
return ret
|
114
salt/states/mssql_user.py
Normal file
114
salt/states/mssql_user.py
Normal file
|
@ -0,0 +1,114 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Microsoft SQLServer Users
|
||||
=======================================
|
||||
|
||||
The mssql_user module is used to create
|
||||
and manage SQL Server Users
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
frank:
|
||||
mssql_user.present:
|
||||
- database: yolo
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import collections
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the mssql module is present
|
||||
'''
|
||||
return 'mssql.version' in __salt__
|
||||
|
||||
|
||||
def _normalize_options(options):
|
||||
if type(options) in [dict, collections.OrderedDict]:
|
||||
return ['{0}={1}'.format(k, v) for k, v in options.items()]
|
||||
if type(options) is list and (not len(options) or type(options[0]) is str):
|
||||
return options
|
||||
# Invalid options
|
||||
if type(options) is not list or type(options[0]) not in [dict, collections.OrderedDict]:
|
||||
return []
|
||||
return [o for d in options for o in _normalize_options(d)]
|
||||
|
||||
|
||||
def present(name, login=None, domain=None, database=None, roles=None, options=None, **kwargs):
|
||||
'''
|
||||
Checks existance of the named user.
|
||||
If not present, creates the user with the specified roles and options.
|
||||
|
||||
name
|
||||
The name of the user to manage
|
||||
login
|
||||
If not specified, will be created WITHOUT LOGIN
|
||||
domain
|
||||
Creates a Windows authentication user.
|
||||
Needs to be NetBIOS domain or hostname
|
||||
database
|
||||
The database of the user (not the login)
|
||||
roles
|
||||
Add this user to all the roles in the list
|
||||
options
|
||||
Can be a list of strings, a dictionary, or a list of dictionaries
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if domain and not login:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'domain cannot be set without login'
|
||||
return ret
|
||||
if __salt__['mssql.user_exists'](name, domain=domain, database=database, **kwargs):
|
||||
ret['comment'] = 'User {0} is already present (Not going to try to set its roles or options)'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'User {0} is set to be added'.format(name)
|
||||
return ret
|
||||
|
||||
user_created = __salt__['mssql.user_create'](name, login=login,
|
||||
domain=domain,
|
||||
database=database,
|
||||
roles=roles,
|
||||
options=_normalize_options(options),
|
||||
**kwargs)
|
||||
if user_created is not True: # Non-empty strings are also evaluated to True, so we cannot use if not user_created:
|
||||
ret['result'] = False
|
||||
ret['comment'] += 'User {0} failed to be added: {1}'.format(name, user_created)
|
||||
return ret
|
||||
ret['comment'] += 'User {0} has been added'.format(name)
|
||||
ret['changes'][name] = 'Present'
|
||||
return ret
|
||||
|
||||
|
||||
def absent(name, **kwargs):
|
||||
'''
|
||||
Ensure that the named user is absent
|
||||
|
||||
name
|
||||
The username of the user to remove
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
if not __salt__['mssql.user_exists'](name):
|
||||
ret['comment'] = 'User {0} is not present'.format(name)
|
||||
return ret
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'User {0} is set to be removed'.format(name)
|
||||
return ret
|
||||
if __salt__['mssql.user_remove'](name, **kwargs):
|
||||
ret['comment'] = 'User {0} has been removed'.format(name)
|
||||
ret['changes'][name] = 'Absent'
|
||||
return ret
|
||||
# else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'User {0} failed to be removed'.format(name)
|
||||
return ret
|
|
@ -68,7 +68,7 @@ def present(dbname, name,
|
|||
'db_password': db_password,
|
||||
'db_host': db_host,
|
||||
'db_port': db_port,
|
||||
'runas': user
|
||||
'user': user
|
||||
}
|
||||
|
||||
# check if schema exists
|
||||
|
@ -144,7 +144,7 @@ def absent(dbname, name, user=None,
|
|||
'db_password': db_password,
|
||||
'db_host': db_host,
|
||||
'db_port': db_port,
|
||||
'runas': user
|
||||
'user': user
|
||||
}
|
||||
|
||||
# check if schema exists and remove it
|
||||
|
|
|
@ -13,9 +13,10 @@ The postgres_users module is used to create and manage Postgres users.
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import logging
|
||||
|
||||
# Salt imports
|
||||
from salt.modules import postgres
|
||||
|
@ -45,6 +46,7 @@ def present(name,
|
|||
password=None,
|
||||
default_password=None,
|
||||
refresh_password=None,
|
||||
valid_until=None,
|
||||
groups=None,
|
||||
user=None,
|
||||
maintenance_db=None,
|
||||
|
@ -112,6 +114,9 @@ def present(name,
|
|||
This behaviour makes it possible to execute in environments without
|
||||
superuser access available, e.g. Amazon RDS for PostgreSQL
|
||||
|
||||
valid_until
|
||||
A date and time after which the role's password is no longer valid.
|
||||
|
||||
groups
|
||||
A string of comma separated groups the user should be in
|
||||
|
||||
|
@ -168,7 +173,6 @@ def present(name,
|
|||
if user_attr is not None:
|
||||
mode = 'update'
|
||||
|
||||
# The user is not present, make it!
|
||||
cret = None
|
||||
update = {}
|
||||
if mode == 'update':
|
||||
|
@ -199,6 +203,18 @@ def present(name,
|
|||
update['superuser'] = superuser
|
||||
if password is not None and (refresh_password or user_attr['password'] != password):
|
||||
update['password'] = True
|
||||
if valid_until is not None:
|
||||
valid_until_dt = __salt__['postgres.psql_query'](
|
||||
'SELECT \'{0}\'::timestamp(0) as dt;'.format(
|
||||
valid_until.replace('\'', '\'\'')),
|
||||
**db_args)[0]['dt']
|
||||
try:
|
||||
valid_until_dt = datetime.datetime.strptime(
|
||||
valid_until_dt, '%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
valid_until_dt = None
|
||||
if valid_until_dt != user_attr['expiry time']:
|
||||
update['valid_until'] = valid_until
|
||||
if groups is not None:
|
||||
lgroups = groups
|
||||
if isinstance(groups, (six.string_types, six.text_type)):
|
||||
|
@ -228,6 +244,7 @@ def present(name,
|
|||
inherit=inherit,
|
||||
replication=replication,
|
||||
rolepassword=password,
|
||||
valid_until=valid_until,
|
||||
groups=groups,
|
||||
**db_args)
|
||||
else:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue