Merge branch 'master' into requirements_mock

This commit is contained in:
Daniel Wozniak 2020-01-11 11:27:42 -07:00 committed by GitHub
commit 4422bd8ae1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
115 changed files with 7108 additions and 1929 deletions

View file

@ -10,7 +10,7 @@ runTestSuite(
nox_env_name: 'runtests-zeromq',
nox_passthrough_opts: '--ssh-tests',
python_version: 'py2',
testrun_timeout: 6,
testrun_timeout: 7,
use_spot_instances: true)
// vim: ft=groovy

View file

@ -10,7 +10,7 @@ runTestSuite(
nox_env_name: 'runtests-zeromq',
nox_passthrough_opts: '--unit',
python_version: 'py3',
testrun_timeout: 8,
testrun_timeout: 9,
use_spot_instances: false)
// vim: ft=groovy

View file

@ -10,7 +10,7 @@ runTestSuite(
nox_env_name: 'runtests-zeromq',
nox_passthrough_opts: '--unit',
python_version: 'py3',
testrun_timeout: 8,
testrun_timeout: 9,
use_spot_instances: false)
// vim: ft=groovy

0
.git-blame-ignore-revs Normal file
View file

11
.github/stale.yml vendored
View file

@ -2,15 +2,18 @@
# Number of days of inactivity before an issue becomes stale
# 600 is approximately 1 year and 8 months
daysUntilStale: 600
daysUntilStale: 30
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
#exemptLabels:
# - pinned
# - security
exemptLabels:
- Confirmed
- Blocker
- Critical
- P1
- P2
# Label to use when marking an issue as stale
staleLabel: stale

4
.gitignore vendored
View file

@ -14,6 +14,7 @@ MANIFEST
*.DS_Store
.pytest_cache
Pipfile.lock
.mypy_cache/*
# virtualenv
# - ignores directories of a virtualenv when you create it right on
@ -103,6 +104,9 @@ tests/integration/cloud/providers/pki/minions
# Ignore nox virtualenvs
/.nox/
# Ignore pyenv files
.python-version
# Kitchen tests files
.kitchen.local.yml
kitchen.local.yml

View file

@ -25,3 +25,14 @@
# script: Fedora
# minion:
# cheese: edam
#tencentcloud-guangzhou-s1sm1:
# provider: my-tencentcloud-config
# availability_zone: ap-guangzhou-3
# image: img-31tjrtph
# size: S1.SMALL1
# allocate_public_ip: True
# internet_max_bandwidth_out: 1
# password: '153e41ec96140152'
# securitygroups:
# - sg-5e90804b

View file

@ -0,0 +1,10 @@
#tencentcloud-guangzhou-s1sm1:
# provider: my-tencentcloud-config
# availability_zone: ap-guangzhou-3
# image: img-31tjrtph
# size: S1.SMALL1
# allocate_public_ip: True
# internet_max_bandwidth_out: 1
# password: '153e41ec96140152'
# securitygroups:
# - sg-5e90804b

View file

@ -87,3 +87,12 @@
#my-scaleway-config:
# driver: scaleway
#my-tencentcloud-config:
# driver: tencentcloud
# Tencent Cloud Secret Id
# id: AKIDA64pOio9BMemkApzevX0HS169S4b750A
# Tencent Cloud Secret Key
# key: 8r2xmPn0C5FDvRAlmcJimiTZKVRsk260
# Tencent Cloud Region
# location: ap-guangzhou

View file

@ -0,0 +1,8 @@
#my-tencentcloud-config:
# driver: tencentcloud
# Tencent Cloud Secret Id
# id: AKIDA64pOio9BMemkApzevX0HS169S4b750A
# Tencent Cloud Secret Key
# key: 8r2xmPn0C5FDvRAlmcJimiTZKVRsk260
# Tencent Cloud Region
# location: ap-guangzhou

View file

@ -350,7 +350,7 @@
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# If the autosign_grains_dir is specified, incoming keys from minions with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains

View file

@ -302,7 +302,7 @@ syndic_user: salt
# the autosign_file and the auto_accept setting.
#autoreject_file: /etc/salt/autoreject.conf
# If the autosign_grains_dir is specified, incoming keys from minons with grain
# If the autosign_grains_dir is specified, incoming keys from minions with grain
# values matching those defined in files in this directory will be accepted
# automatically. This is insecure. Minions need to be configured to send the grains.
#autosign_grains_dir: /etc/salt/autosign_grains

View file

@ -34,6 +34,7 @@ cloud modules
scaleway
softlayer
softlayer_hw
tencentcloud
vagrant
virtualbox
vmware

View file

@ -0,0 +1,6 @@
==============================
salt.cloud.clouds.tencentcloud
==============================
.. automodule:: salt.cloud.clouds.tencentcloud
:members:

View file

@ -512,6 +512,7 @@ execution modules
xapi_virt
xbpspkg
xfs
xml
xmpp
yumpkg
zabbix

View file

@ -4,3 +4,4 @@ salt.modules.saltcheck
.. automodule:: salt.modules.saltcheck
:members:
:exclude-members: SaltCheck, StateTestLoader

View file

@ -0,0 +1,6 @@
================
salt.modules.xml
================
.. automodule:: salt.modules.xml
:members:

View file

@ -317,6 +317,7 @@ state modules
win_wusa
winrepo
x509
xml
xmpp
zabbix_action
zabbix_host

View file

@ -0,0 +1,6 @@
===============
salt.states.xml
===============
.. automodule:: salt.states.xml
:members:

View file

@ -38,26 +38,26 @@ These are features that are available for almost every cloud host.
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+=======+
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+-------+
[1] Yes, if salt-api is enabled.
@ -74,46 +74,46 @@ instance name to be passed in. For example:
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|attach_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|del_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|detach_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_keypairs | | |Yes | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|rename |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|set_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_term_protect | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|take_action | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+
|attach_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|del_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|detach_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|disable_term_protect |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|enable_term_protect |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|keepvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_keypairs | | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|rename |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|set_tags |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_term_protect | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|take_action | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
Functions
=========
@ -126,83 +126,83 @@ require the name of the provider to be passed in. For example:
.. container:: scrollable
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|block_device_mappings |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|create_keypair | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|create_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|delete_key | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|delete_keypair | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|delete_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_ip | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_key | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_keyid | | |Yes | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_keypair | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_networkid | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_node | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_password | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_spot_config | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|get_subnetid | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|import_key | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|key_list | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|keyname |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_custom_images | | | | | | | | | | | |Yes | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_keys | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|list_vlans | | | | | | | | | | | |Yes |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|rackconnect | | | | | | | |Yes | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|reformat_node | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|securitygroup |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_key | | | | | |Yes | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_keypair | | |Yes |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|show_volume | | | |Yes| | | | | | | | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|Tencent|
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |Cloud |
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+=======+
|block_device_mappings |Yes | | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_keypair | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|create_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_keypair | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|delete_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_ip | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_key | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_keyid | | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_keypair | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_networkid | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_node | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_password | |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_size | | |Yes | | |Yes | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_spot_config | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|get_subnetid | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|import_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|key_list | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|keyname |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_custom_images | | | | | | | | | | | |Yes | | |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_keys | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|list_vlans | | | | | | | | | | | |Yes |Yes | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|rackconnect | | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|reformat_node | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|securitygroup |Yes | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|securitygroupid | | | |Yes| | | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_image | | | |Yes| | | | |Yes | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_key | | | | | |Yes | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_keypair | | |Yes |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
|show_volume | | | |Yes| | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+-------+
[1] Yes, if salt-api is enabled.

View file

@ -128,6 +128,7 @@ Cloud Provider Specifics
Getting Started With Scaleway <scaleway>
Getting Started With Saltify <saltify>
Getting Started With SoftLayer <softlayer>
Getting Started With Tencent Cloud <tencentcloud>
Getting Started With Vagrant <vagrant>
Getting Started With Vexxhost <vexxhost>
Getting Started With Virtualbox <virtualbox>

View file

@ -0,0 +1,309 @@
==================================
Getting Started With Tencent Cloud
==================================
Tencent Cloud is a secure, reliable and high-performance cloud compute service
provided by Tencent. It is the 2nd largest Cloud Provider in China.
Dependencies
============
The Tencent Cloud driver for Salt Cloud requires the ``tencentcloud-sdk-python`` package,
which is available at PyPI:
https://pypi.org/project/tencentcloud-sdk-python/
This package can be installed using ``pip`` or ``easy_install``:
.. code-block:: bash
# pip install tencentcloud-sdk-python
# easy_install tencentcloud-sdk-python
Provider Configuration
======================
To use this module, set up the cloud configuration at
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/*.conf``:
.. code-block:: yaml
my-tencentcloud-config:
driver: tencentcloud
# Tencent Cloud Secret Id
id: AKIDA64pOio9BMemkApzevX0HS169S4b750A
# Tencent Cloud Secret Key
key: 8r2xmPn0C5FDvRAlmcJimiTZKVRsk260
# Tencent Cloud Region
location: ap-guangzhou
Configuration Parameters
~~~~~~~~~~~~~~~~~~~~~~~~
driver
------
**Required**. ``tencentcloud`` to use this module.
id
--
**Required**. Your Tencent Cloud secret id.
key
---
**Required**. Your Tencent Cloud secret key.
location
--------
**Optional**. If this value is not specified, the default is ``ap-guangzhou``.
Available locations can be found using the ``--list-locations`` option:
.. code-block:: bash
# salt-cloud --list-location my-tencentcloud-config
Profile Configuration
=====================
Tencent Cloud profiles require a ``provider``, ``availability_zone``, ``image`` and ``size``.
Set up an initial profile at ``/etc/salt/cloud.profiles`` or ``/etc/salt/cloud.profiles.d/*.conf``:
.. code-block:: yaml
tencentcloud-guangzhou-s1sm1:
provider: my-tencentcloud-config
availability_zone: ap-guangzhou-3
image: img-31tjrtph
size: S1.SMALL1
allocate_public_ip: True
internet_max_bandwidth_out: 1
password: '153e41ec96140152'
securitygroups:
- sg-5e90804b
Configuration Parameters
~~~~~~~~~~~~~~~~~~~~~~~~
provider
--------
**Required**. Name of entry in ``salt/cloud.providers.d/???`` file.
availability_zone
-----------------
**Required**. The availability zone that the instance is located in.
Available zones can be found using the ``list_availability_zones`` function:
.. code-block:: bash
# salt-cloud -f list_availability_zones my-tencentcloud-config
image
-----
**Required**. The image id to use for the instance.
Available images can be found using the ``--list-images`` option:
.. code-block:: bash
# salt-cloud --list-images my-tencentcloud-config
size
----
**Required**. Instance type for instance can be found using the ``--list-sizes`` option.
.. code-block:: bash
# salt-cloud --list-sizes my-tencentcloud-config
securitygroups
--------------
**Optional**. A list of security group ids to associate with.
Available security group ids can be found using the ``list_securitygroups`` function:
.. code-block:: bash
# salt-cloud -f list_securitygroups my-tencentcloud-config
Multiple security groups are supported:
.. code-block:: yaml
tencentcloud-guangzhou-s1sm1:
securitygroups:
- sg-5e90804b
- sg-8kpynf2t
hostname
--------
**Optional**. The hostname of the instance.
instance_charge_type
--------------------
**Optional**. The charge type of the instance. Valid values are ``PREPAID``,
``POSTPAID_BY_HOUR`` and ``SPOTPAID``. The default is ``POSTPAID_BY_HOUR``.
instance_charge_type_prepaid_renew_flag
---------------------------------------
**Optional**. When enabled, the instance will be renew automatically
when it reaches the end of the prepaid tenancy.
Valid values are ``NOTIFY_AND_AUTO_RENEW``, ``NOTIFY_AND_MANUAL_RENEW`` and ``DISABLE_NOTIFY_AND_MANUAL_RENEW``.
.. note::
This value is only used when ``instance_charge_type`` is set to ``PREPAID``.
instance_charge_type_prepaid_period
-----------------------------------
**Optional**. The tenancy time in months of the prepaid instance,
Valid values are ``1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36``.
.. note::
This value is only used when ``instance_charge_type`` is set to ``PREPAID``.
allocate_public_ip
------------------
**Optional**. Associate a public ip address with an instance
in a VPC or Classic. Boolean value, default is ``false``.
internet_max_bandwidth_out
--------------------------
**Optional**. Maximum outgoing bandwidth to the public network, measured in Mbps (Mega bits per second).
Value range: ``[0, 100]``. If this value is not specified, the default is ``0`` Mbps.
internet_charge_type
--------------------
**Optional**. Internet charge type of the instance. Valid values are ``BANDWIDTH_PREPAID``,
``TRAFFIC_POSTPAID_BY_HOUR``, ``BANDWIDTH_POSTPAID_BY_HOUR`` and ``BANDWIDTH_PACKAGE``.
The default is ``TRAFFIC_POSTPAID_BY_HOUR``.
key_name
--------
**Optional**. The key pair to use for the instance, for example ``skey-16jig7tx``.
password
--------
**Optional**. Login password for the instance.
private_ip
----------
**Optional**. The private ip to be assigned to this instance,
must be in the provided subnet and available.
project_id
----------
**Optional**. The project this instance belongs to, defaults to ``0``.
vpc_id
------
**Optional**. The id of a VPC network.
If you want to create instances in a VPC network, this parameter must be set.
subnet_id
---------
**Optional**. The id of a VPC subnet.
If you want to create instances in VPC network, this parameter must be set.
system_disk_size
----------------
**Optional**. Size of the system disk.
Value range: ``[50, 1000]``, and unit is ``GB``. Default is ``50`` GB.
system_disk_type
----------------
**Optional**. Type of the system disk.
Valid values are ``CLOUD_BASIC``, ``CLOUD_SSD`` and ``CLOUD_PREMIUM``, default value is ``CLOUD_BASIC``.
Actions
=======
The following actions are supported by the Tencent Cloud Salt Cloud driver.
show_instance
~~~~~~~~~~~~~
This action is a thin wrapper around ``--full-query``, which displays details on a
single instance only. In an environment with several machines, this will save a
user from having to sort through all instance data, just to examine a single
instance.
.. code-block:: bash
$ salt-cloud -a show_instance myinstance
show_disk
~~~~~~~~~
Return disk details about a specific instance.
.. code-block:: bash
$ salt-cloud -a show_disk myinstance
destroy
~~~~~~~
Destroy a Tencent Cloud instance.
.. code-block:: bash
$ salt-cloud -a destroy myinstance
start
~~~~~
Start a Tencent Cloud instance.
.. code-block:: bash
$ salt-cloud -a start myinstance
stop
~~~~
Stop a Tencent Cloud instance.
.. code-block:: bash
$ salt-cloud -a stop myinstance
reboot
~~~~~~
Reboot a Tencent Cloud instance.
.. code-block:: bash
$ salt-cloud -a reboot myinstance
Functions
=========
The following functions are currently supported by the Tencent Cloud Salt Cloud driver.
list_securitygroups
~~~~~~~~~~~~~~~~~~~
Lists all Tencent Cloud security groups in current region.
.. code-block:: bash
$ salt-cloud -f list_securitygroups my-tencentcloud-config
list_availability_zones
~~~~~~~~~~~~~~~~~~~~~~~
Lists all Tencent Cloud availability zones in current region.
.. code-block:: bash
$ salt-cloud -f list_availability_zones my-tencentcloud-config
list_custom_images
~~~~~~~~~~~~~~~~~~
Lists any custom images associated with the account. These images can
be used to create a new instance.
.. code-block:: bash
$ salt-cloud -f list_custom_images my-tencentcloud-config
show_image
~~~~~~~~~~
Return details about a specific image. This image can be used
to create a new instance.
.. code-block:: bash
$ salt-cloud -f show_image tencentcloud image=img-31tjrtph

View file

@ -4,39 +4,82 @@
Contributing
============
There is a great need for contributions to Salt and patches are welcome! The goal
here is to make contributions clear, make sure there is a trail for where the code
has come from, and most importantly, to give credit where credit is due!
There is a great need for contributions to Salt and patches are welcome! The
goal here is to make contributions clear, make sure there is a trail for where
the code has come from, and most importantly, to give credit where credit is
due!
There are a number of ways to contribute to Salt development.
There are a number of ways to contribute to Salt development, including (but
not limited to):
For details on how to contribute documentation improvements please review
:ref:`Writing Salt Documentation <salt-docs>`.
* filing well-written bug reports
* enhancing the documentation
* providing workarounds, patches, and other code without tests
* engaging in constructive discussion
* helping out in `#salt on Freenode <#salt on freenode_>`_,
the `Community Slack <SaltStack Community Slack_>`_,
the `salt-users <salt-users_>`_ mailing list,
a `SaltStack meetup <saltstack meetup_>`_,
or `Server Fault <saltstack on serverfault_>`_.
* telling others about problems you solved with Salt
If this or other Salt documentation is unclear, please review :ref:`Writing
Salt Documentation <salt-docs>`. PRs are welcome!
Quickstart
----------
If you just want to get started before reading the rest of this guide, you can
get the process started by running the following:
.. code-block:: bash
python3 -m pip install --user pre-commit
git clone --origin upstream https://github.com/saltstack/salt.git
cd salt
pre-commit install
While those commands are running, finish reading the rest of this guide.
Pre-commit
----------
To reduce friction during the development process, SaltStack uses `pre-commit
<pre-commit_>`_. This tool adds pre-commit hooks to git to automate several
processes that used to be manual. Rather than having to remember to run several
different tools before you commit, you only have to run ``git commit``, and you
will be notified about style and lint issues before you ever open a PR.
Salt Coding Style
-----------------
SaltStack has its own coding style guide that informs contributors on various coding
approaches. Please review the :ref:`Salt Coding Style <coding-style>` documentation
for information about Salt's particular coding patterns.
After the Neon release, SaltStack is `joining the ranks <SEP 15_>`_ of projects
in adopting the `Black code formatter <Black_>`_ in order to ease the adoption
of a unified code formatting style.
Where Black is silent, SaltStack has its own coding style guide that informs
contributors on various style points. Please review the :ref:`Salt Coding Style
<coding-style>` documentation for information about Salt's particular coding
patterns.
Within the :ref:`Salt Coding Style <coding-style>` documentation, there is a
section about running Salt's ``.testing.pylintrc`` file. SaltStack recommends
running the ``.testing.pylintrc`` file on any files you are changing with your
code contribution before submitting a pull request to Salt's repository. Please
see the :ref:`Linting<pylint-instructions>` documentation for more information.
code contribution before submitting a pull request to Salt's repository.
.. note::
If you've installed ``pre-commit``, this will automatically happen before each
commit. Otherwise, see the :ref:`Linting<pylint-instructions>` documentation
for more information.
There are two pylint files in the ``salt`` directory. One is the
``.pylintrc`` file and the other is the ``.testing.pylintrc`` file. The
tests that run in Jenkins against GitHub Pull Requests use
``.testing.pylintrc``. The ``testing.pylintrc`` file is a little less
strict than the ``.pylintrc`` and is used to make it easier for contributors
to submit changes. The ``.pylintrc`` file can be used for linting, but the
``testing.pylintrc`` is the source of truth when submitting pull requests.
Copyright Headers
-----------------
Copyright headers are not needed for files in the Salt project. Files that have
existing copyright headers should be considered legacy and not an example to
follow.
.. _github-pull-request:
@ -48,7 +91,8 @@ contributions. The workflow advice below mirrors `GitHub's own guide <GitHub
Fork a Repo Guide_>`_ and is well worth reading.
#. `Fork saltstack/salt`_ on GitHub.
#. Make a local clone of your fork.
#. Make a local clone of your fork. (Skip this step if you followed
the Quickstart)
.. code-block:: bash
@ -61,6 +105,12 @@ Fork a Repo Guide_>`_ and is well worth reading.
git remote add upstream https://github.com/saltstack/salt.git
If you followed the Quickstart, you'll add your own remote instead
.. code-block:: bash
git remote add my-account git@github.com:my-account/salt.git
#. Create a new branch in your clone.
.. note::
@ -69,47 +119,34 @@ Fork a Repo Guide_>`_ and is well worth reading.
feature Y". Multiple unrelated fixes and/or features should be
isolated into separate branches.
If you're working on a bug or documentation fix, create your branch from
the oldest **supported** main release branch that contains the bug or requires the documentation
update. See :ref:`Which Salt Branch? <which-salt-branch>`.
.. code-block:: bash
git fetch upstream
git checkout -b fix-broken-thing upstream/2016.11
If you're working on a feature, create your branch from the |repo_primary_branch| branch.
.. code-block:: bash
git fetch upstream
git checkout -b add-cool-feature upstream/|repo_primary_branch|
git checkout -b fix-broken-thing upstream/master
#. Edit and commit changes to your branch.
.. code-block:: bash
vim path/to/file1 path/to/file2
vim path/to/file1 path/to/file2 tests/test_file1.py tests/test_file2.py
git diff
git add path/to/file1 path/to/file2
git commit
Write a short, descriptive commit title and a longer commit message if
necessary.
necessary. Use an imperative style for the title.
.. note::
GOOD
If your change fixes a bug or implements a feature already filed in the
`issue tracker`_, be sure to
`reference the issue <https://help.github.com/en/articles/closing-issues-using-keywords>`_
number in the commit message body.
.. code-block:: bash
.. code-block::
Fix broken things in file1 and file2
Fixes #31337
We needed to make this change because the underlying dependency
changed. Now this uses the up-to-date API.
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
# On branch fix-broken-thing
@ -117,6 +154,30 @@ Fork a Repo Guide_>`_ and is well worth reading.
# modified: path/to/file1
# modified: path/to/file2
BAD
.. code-block::
Fixes broken things
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
# On branch fix-broken-thing
# Changes to be committed:
# modified: path/to/file1
# modified: path/to/file2
Taking a few moments to explain *why* you made a change will save time
and effort in the future when others come to investigate a change. A
clear explanation of why something changed can help future developers
avoid introducing bugs, or breaking an edge case.
.. note::
If your change fixes a bug or implements a feature already filed in the
`issue tracker`_, be sure to
`reference the issue <https://help.github.com/en/articles/closing-issues-using-keywords>`_
number in the commit message body.
If you get stuck, there are many introductory Git resources on
http://help.github.com.
@ -141,17 +202,9 @@ Fork a Repo Guide_>`_ and is well worth reading.
.. code-block:: bash
git fetch upstream
git rebase upstream/2016.11 fix-broken-thing
git rebase upstream/master fix-broken-thing
git push -u origin fix-broken-thing
or
.. code-block:: bash
git fetch upstream
git rebase upstream/|repo_primary_branch| add-cool-feature
git push -u origin add-cool-feature
If you do rebase, and the push is rejected with a
``(non-fast-forward)`` comment, then run ``git status``. You will
likely see a message about the branches diverging:
@ -180,18 +233,11 @@ Fork a Repo Guide_>`_ and is well worth reading.
https://github.com/my-account/salt/pull/new/fix-broken-thing
#. If your branch is a fix for a release branch, choose that as the base
branch (e.g. ``2016.11``),
https://github.com/my-account/salt/compare/saltstack:2016.11...fix-broken-thing
If your branch is a feature, choose ``|repo_primary_branch|`` as the base branch,
https://github.com/my-account/salt/compare/saltstack:|repo_primary_branch|...add-cool-feature
#. Choose ``master`` as the base Salt branch.
#. Review that the proposed changes are what you expect.
#. Write a descriptive comment. Include links to related issues (e.g.
'Fixes #31337.') in the comment field.
#. Write a descriptive comment. If you added good information to your git
commit message, they will already be present here. Include links to
related issues (e.g. 'Fixes #31337.') in the comment field.
#. Click ``Create pull request``.
#. Salt project members will review your pull request and automated tests will
@ -209,8 +255,8 @@ Fork a Repo Guide_>`_ and is well worth reading.
Pull request against `saltstack/salt`_ are automatically tested on a
variety of operating systems and configurations. On average these tests
take 30 minutes. Depending on your GitHub notification settings you may
also receive an email message about the test results.
take a couple of hours. Depending on your GitHub notification settings
you may also receive an email message about the test results.
Test progress and results can be found at http://jenkins.saltstack.com/.
@ -219,209 +265,34 @@ Fork a Repo Guide_>`_ and is well worth reading.
Salt's Branch Topology
----------------------
There are three different kinds of branches in use: |repo_primary_branch|, main release
branches, and dot release branches.
- All feature work should go into the ``|repo_primary_branch|`` branch.
- Bug fixes and documentation changes should go into the oldest **supported
main** release branch affected by the the bug or documentation change (you
can use the blame button in github to figure out when the bug was introduced).
Supported releases are the last 2 releases. For example, if the latest release
is 2018.3, the last two release are 2018.3 and 2017.7.
Main release branches are named after a year and month, such as
``2016.11`` and ``2017.7``.
- Hot fixes, as determined by SaltStack's release team, should be submitted
against **dot** release branches. Dot release branches are named after a
year, month, and version. Examples include ``2016.11.8`` and ``2017.7.2``.
.. note::
GitHub will open pull requests against Salt's main branch, ``|repo_primary_branch|``,
by default. Be sure to check which branch is selected when creating the
pull request.
The |repo_primary_branch| Branch
================================
The ``|repo_primary_branch|`` branch is unstable and bleeding-edge. Pull requests containing
feature additions or non-bug-fix changes should be made against the ``|repo_primary_branch|``
branch.
.. note::
If you have a bug fix or documentation change and have already forked your
working branch from ``|repo_primary_branch|`` and do not know how to rebase your commits
against another branch, then submit it to ``|repo_primary_branch|`` anyway. SaltStack's
development team will be happy to back-port it to the correct branch.
**Please make sure you let the maintainers know that the pull request needs
to be back-ported.**
Main Release Branches
=====================
The current release branch is the most recent stable release. Pull requests
containing bug fixes or documentation changes should be made against the oldest supported main
release branch that is affected.
The branch name will be a date-based name such as ``2016.11``.
Bug fixes are made on this branch so that dot release branches can be cut from
the main release branch without introducing surprises and new features. This
approach maximizes stability.
Dot Release Branches
====================
Prior to tagging an official release, a branch will be created when the SaltStack
release team is ready to tag. The dot release branch is created from a main release
branch. The dot release branch will be the same name as the tag minus the ``v``.
For example, the ``2017.7.1`` dot release branch was created from the ``2017.7``
main release branch. The ``v2017.7.1`` release was tagged at the ``HEAD`` of the
``2017.7.1`` branch.
This branching strategy will allow for more stability when there is a need for
a re-tag during the testing phase of the release process and further increases
stability.
Once the dot release branch is created, the fixes required for a given release,
as determined by the SaltStack release team, will be added to this branch. All
commits in this branch will be merged forward into the main release branch as
well.
Merge Forward Process
=====================
The Salt repository follows a "Merge Forward" policy. The merge-forward
behavior means that changes submitted to older main release branches will
automatically be "merged-forward" into the newer branches.
For example, a pull request is merged into ``2017.7``. Then, the entire
``2017.7`` branch is merged-forward into the ``2018.3`` branch, and the
``2018.3`` branch is merged-forward into the ``|repo_primary_branch|`` branch.
This process makes is easy for contributors to make only one pull-request
against an older branch, but allows the change to propagate to all **main**
release branches.
The merge-forward work-flow applies to all main release branches and the
operation runs continuously.
Merge-Forwards for Dot Release Branches
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The merge-forward policy applies to dot release branches as well, but has a
slightly different behavior. If a change is submitted to a **dot** release
branch, the dot release branch will be merged into its parent **main**
release branch.
For example, a pull request is merged into the ``2017.7.2`` release branch.
Then, the entire ``2017.7.2`` branch is merged-forward into the ``2017.7``
branch. From there, the merge forward process continues as normal.
The only way in which dot release branches differ from main release branches
in regard to merge-forwards, is that once a dot release branch is created
from the main release branch, the dot release branch does not receive merge
forwards.
.. note::
The merge forward process for dot release branches is one-way:
dot release branch --> main release branch.
Salt uses a typical branch strategy - ``master`` is the next expected release.
Code should only make it to ``master`` once it's production ready. This means
that typical changes (fixes, features) should have accompanying tests.\
Closing GitHub issues from commits
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This "merge-forward" strategy requires that `the magic keywords to close a
GitHub issue <Closing issues via commit message_>`_ appear in the commit
message text directly. Only including the text in a pull request will not
close the issue.
GitHub will close the referenced issue once the *commit* containing the
magic text is merged into the default branch (``|repo_primary_branch|``). Any magic text
input only into the pull request description will not be seen at the
Git-level when those commits are merged-forward. In other words, only the
commits are merged-forward and not the pull request text.
SaltStack encourages using `the magic keywords to close a GitHub issue <Closing
issues via commit message_>`_. These should appear in the commit message text
directly.
.. _backporting-pull-requests:
Backporting Pull Requests
=========================
-------------------------
If a bug is fixed on ``|repo_primary_branch|`` and the bug is also present on a
currently-supported release branch, it will need to be back-ported to an
applicable branch.
.. note:: Most Salt contributors can skip these instructions
These instructions do not need to be read in order to contribute to the
Salt project! The SaltStack team will back-port fixes on behalf of
contributors in order to keep the contribution process easy.
These instructions are intended for frequent Salt contributors, advanced
Git users, SaltStack employees, or independent souls who wish to back-port
changes themselves.
It is often easiest to fix a bug on the oldest supported release branch and
then merge that branch forward into ``|repo_primary_branch|`` (as described earlier in this
document). When that is not possible the fix must be back-ported, or copied,
into any other affected branches.
These steps assume a pull request ``#1234`` has been merged into ``|repo_primary_branch|``.
And ``upstream`` is the name of the remote pointing to the main Salt repo.
#. Identify the oldest supported release branch that is affected by the bug.
#. Create a new branch for the back-port by reusing the same branch from the
original pull request.
Name the branch ``bp-<NNNN>`` and use the number of the original pull
request.
.. code-block:: bash
git fetch upstream refs/pull/1234/head:bp-1234
git checkout bp-1234
#. Find the parent commit of the original pull request.
The parent commit of the original pull request must be known in order to
rebase onto a release branch. The easiest way to find this is on GitHub.
Open the original pull request on GitHub and find the first commit in the
list of commits. Select and copy the SHA for that commit. The parent of
that commit can be specified by appending ``~1`` to the end.
#. Rebase the new branch on top of the release branch.
* ``<release-branch>`` is the branch identified in step #1.
* ``<orig-base>`` is the SHA identified in step #3 -- don't forget to add
``~1`` to the end!
.. code-block:: bash
git rebase --onto <release-branch> <orig-base> bp-1234
Note, release branches prior to ``2016.11`` will not be able to make use of
rebase and must use cherry-picking instead.
#. Push the back-port branch to GitHub and open a new pull request.
Opening a pull request for the back-port allows for the test suite and
normal code-review process.
.. code-block:: bash
git push -u origin bp-1234
On rare occasions, a serious bug will be found in the middle of a release
cycle. These bugs will require a point release. Contributors should still
submit fixes directly to ``master``, but they should also call attention to the
fact that it addresses a critical issue and will need to be back-ported.
Keeping Salt Forks in Sync
--------------------------
Salt advances quickly. It is therefore critical to pull upstream changes
from upstream into your fork on a regular basis. Nothing is worse than putting
hard work into a pull request only to see bunches of merge conflicts because it
has diverged too far from upstream.
Salt advances quickly. It is therefore critical to pull upstream changes from
upstream into your fork on a regular basis. Nothing is worse than putting hard
work into a pull request only to see bunches of merge conflicts because it has
diverged too far from upstream.
.. seealso:: `GitHub Fork a Repo Guide`_
@ -450,20 +321,20 @@ the name of the main `saltstack/salt`_ repository.
git fetch upstream
#. Update your copy of the ``|repo_primary_branch|`` branch.
#. Update your copy of the ``master`` branch.
.. code-block:: bash
git checkout |repo_primary_branch|
git merge --ff-only upstream/|repo_primary_branch|
git checkout master
git merge --ff-only upstream/master
If Git complains that a fast-forward merge is not possible, you have local
commits.
* Run ``git pull --rebase origin |repo_primary_branch|`` to rebase your changes on top of
* Run ``git pull --rebase origin master`` to rebase your changes on top of
the upstream changes.
* Or, run ``git branch <branch-name>`` to create a new branch with your
commits. You will then need to reset your ``|repo_primary_branch|`` branch before
commits. You will then need to reset your ``master`` branch before
updating it with the changes from upstream.
If Git complains that local files will be overwritten, you have changes to
@ -474,7 +345,7 @@ the name of the main `saltstack/salt`_ repository.
.. code-block:: bash
git push origin |repo_primary_branch|
git push origin master
#. Repeat the previous two steps for any other branches you work with, such as
the current release branch.
@ -505,28 +376,6 @@ If you do not wish to receive these notifications, please add your GitHub
handle to the blacklist line in the ``.mention-bot`` file located in the
root of the Salt repository.
.. _probot-gpg-verification:
GPG Verification
----------------
SaltStack has enabled `GPG Probot`_ to enforce GPG signatures for all
commits included in a Pull Request.
In order for the GPG verification status check to pass, *every* contributor in
the pull request must:
- Set up a GPG key on local machine
- Sign all commits in the pull request with key
- Link key with GitHub account
This applies to all commits in the pull request.
GitHub hosts a number of `help articles`_ for creating a GPG key, using the
GPG key with ``git`` locally, and linking the GPG key to your GitHub account.
Once these steps are completed, the commit signing verification will look like
the example in GitHub's `GPG Signature Verification feature announcement`_.
Bootstrap Script Changes
------------------------
@ -551,6 +400,13 @@ Script, see the Bootstrap Script's `Contributing Guidelines`_.
.. _GPG Probot: https://probot.github.io/apps/gpg/
.. _help articles: https://help.github.com/articles/signing-commits-with-gpg/
.. _GPG Signature Verification feature announcement: https://github.com/blog/2144-gpg-signature-verification
.. _bootstrap-salt.sh: https://github.com/saltstack/salt/blob/|repo_primary_branch|/salt/cloud/deploy/bootstrap-salt.sh
.. _bootstrap-salt.sh: https://github.com/saltstack/salt/blob/master/salt/cloud/deploy/bootstrap-salt.sh
.. _salt-bootstrap repo: https://github.com/saltstack/salt-bootstrap
.. _Contributing Guidelines: https://github.com/saltstack/salt-bootstrap/blob/develop/CONTRIBUTING.md
.. _`Black`: https://pypi.org/project/black/
.. _`SEP 15`: https://github.com/saltstack/salt-enhancement-proposals/pull/21
.. _`pre-commit`: https://pre-commit.com/
.. _`SaltStack Community Slack`: https://saltstackcommunity.herokuapp.com/
.. _`#salt on freenode`: http://webchat.freenode.net/?channels=salt&uio=Mj10cnVlJjk9dHJ1ZSYxMD10cnVl83
.. _`saltstack meetup`: https://www.meetup.com/pro/saltstack/
.. _`saltstack on serverfault`: https://serverfault.com/questions/tagged/saltstack

View file

@ -4,17 +4,13 @@
Salt Coding Style
=================
Salt is developed with a certain coding style, while the style is dominantly
PEP 8 it is not completely PEP 8. It is also noteworthy that a few
development techniques are also employed which should be adhered to. In the
end, the code is made to be "Salty".
To make it easier to contribute and read Salt code, SaltStack has `adopted
Black <SEP 15_>`_ as its code formatter. There are a few places where Black is
silent, and this guide should be used in those cases.
Most importantly though, we will accept code that violates the coding style and
KINDLY ask the contributor to fix it, or go ahead and fix the code on behalf of
the contributor. Coding style is NEVER grounds to reject code contributions,
and is never grounds to talk down to another member of the community (There are
no grounds to treat others without respect, especially people working to
improve Salt)!!
Coding style is NEVER grounds to reject code contributions, and is never
grounds to talk down to another member of the community (There are no grounds
to treat others without respect, especially people working to improve Salt)!
.. _pylint-instructions:
@ -65,27 +61,6 @@ Multi-word variables should be separated by an underscore.
Variables which are two-letter words should have an underscore appended
to them to pad them to three characters.
Strings
=======
Salt follows a few rules when formatting strings:
Single Quotes
-------------
In Salt, all strings use single quotes unless there is a good reason not to.
This means that docstrings use single quotes, standard strings use single
quotes etc.:
.. code-block:: python
def foo():
'''
A function that does things
'''
name = 'A name'
return name
Formatting Strings
------------------
@ -104,31 +79,8 @@ Please do NOT use printf formatting.
Docstring Conventions
---------------------
Docstrings should always add a newline, docutils takes care of the new line and
it makes the code cleaner and more vertical:
`GOOD`:
.. code-block:: python
def bar():
'''
Here lies a docstring with a newline after the quotes and is the salty
way to handle it! Vertical code is the way to go!
'''
return
`BAD`:
.. code-block:: python
def baz():
'''This is not ok!'''
return
When adding a new function or state, where possible try to use a
``versionadded`` directive to denote when the function or state was added.
``versionadded`` directive to denote when the function, state, or parameter was added.
.. code-block:: python
@ -141,16 +93,13 @@ When adding a new function or state, where possible try to use a
msg : None
The string to be printed.
'''
print msg
print(msg)
If you are uncertain what version should be used, either consult a core
developer in IRC or bring this up when opening your
:ref:`pull request <installing-for-development>` and a core developer will add the proper
version once your pull request has been merged. Bugfixes will be available in a
bugfix release (i.e. 0.17.1, the first bugfix release for 0.17.0), while new
features are held for feature releases, and this will affect what version
number should be used in the ``versionadded`` directive.
developer in IRC or bring this up when opening your :ref:`pull request
<installing-for-development>` and a core developer will let you know what
version to add. Typically this will be the next element in the `periodic table
<https://en.wikipedia.org/wiki/List_of_chemical_elements>`_.
Similar to the above, when an existing function or state is modified (for
example, when an argument is added), then under the explanation of that new
@ -176,7 +125,7 @@ significantly, the ``versionchanged`` directive can be used to clarify this:
.. versionadded 0.17.0
'''
print 'Greetings! {0}\n\n{1}'.format(msg, signature)
print('Greetings! {0}\n\n{1}'.format(msg, signature))
Dictionaries
@ -257,130 +206,16 @@ avoided.
.. _`absolute imports`: http://legacy.python.org/dev/peps/pep-0328/#rationale-for-absolute-imports
Vertical is Better
==================
When writing Salt code, vertical code is generally preferred. This is not a hard
rule but more of a guideline. As PEP 8 specifies, Salt code should not exceed 79
characters on a line, but it is preferred to separate code out into more
newlines in some cases for better readability:
.. code-block:: python
import os
os.chmod(
os.path.join(self.opts['sock_dir'],
'minion_event_pub.ipc'),
448
)
Where there are more line breaks, this is also apparent when constructing a
function with many arguments, something very common in state functions for
instance:
.. code-block:: python
def managed(name,
source=None,
source_hash='',
user=None,
group=None,
mode=None,
template=None,
makedirs=False,
context=None,
replace=True,
defaults=None,
saltenv=None,
backup='',
**kwargs):
.. note::
Making function and class definitions vertical is only required if the
arguments are longer then 80 characters. Otherwise, the formatting is
optional and both are acceptable.
Line Length
-----------
For function definitions and function calls, Salt adheres to the PEP-8
specification of at most 80 characters per line.
Non function definitions or function calls, please adopt a soft limit of 120
characters per line. If breaking the line reduces the code readability, don't
break it. Still, try to avoid passing that 120 characters limit and remember,
**vertical is better... unless it isn't**
Indenting
=========
Some confusion exists in the python world about indenting things like function
calls, the above examples use 8 spaces when indenting comma-delimited
constructs.
The confusion arises because the pep8 program INCORRECTLY flags this as wrong,
where PEP 8, the document, cites only using 4 spaces here as wrong, as it
doesn't differentiate from a new indent level.
Right:
.. code-block:: python
def managed(name,
source=None,
source_hash='',
user=None)
WRONG:
.. code-block:: python
def managed(name,
source=None,
source_hash='',
user=None)
Lining up the indent is also correct:
.. code-block:: python
def managed(name,
source=None,
source_hash='',
user=None)
This also applies to function calls and other hanging indents.
pep8 and Flake8 (and, by extension, the vim plugin Syntastic) will complain
about the double indent for hanging indents. This is a `known conflict
<https://github.com/jcrocholl/pep8/issues/167#issuecomment-15936564>`_ between
pep8 (the script) and the actual PEP 8 standard. It is recommended that this
particular warning be ignored with the following lines in
``~/.config/flake8``:
.. code-block:: ini
[flake8]
ignore = E226,E241,E242,E126
Make sure your Flake8/pep8 are up to date. The first three errors are ignored
by default and are present here to keep the behavior the same. This will also
work for pep8 without the Flake8 wrapper -- just replace all instances of
'flake8' with 'pep8', including the filename.
Code Churn
==========
Many pull requests have been submitted that only churn code in the name of
PEP 8. Code churn is a leading source of bugs and is strongly discouraged.
PEP 8. Code churn is a leading source of bugs and is **strongly discouraged**.
While style fixes are encouraged they should be isolated to a single file per
commit, and the changes should be legitimate, if there are any questions about
whether a style change is legitimate please reference this document and the
official PEP 8 (http://legacy.python.org/dev/peps/pep-0008/) document before
changing code. Many claims that a change is PEP 8 have been invalid, please
double check before committing fixes.
.. _`SEP 15`: https://github.com/saltstack/salt-enhancement-proposals/pull/21

View file

@ -42,13 +42,56 @@ the example below:
test.ping: []
network.ip_addrs:
interface: eth0
cidr: '10.0.0.0/8'
cidr: 10.0.0.0/8
In the example above :py:mod:`salt.modules.network.ip_addrs` has additional
filters to help narrow down the results. In the above example IP addresses
are only returned if they are on a eth0 interface and in the 10.0.0.0/8 IP
range.
.. versionchanged:: Sodium
The format to define mine_functions has been changed to allow the same format
as used for module.run. The old format (above) will still be supported.
.. code-block:: yaml
mine_functions:
test.ping: []
network.ip_addrs:
- interface: eth0
- cidr: 10.0.0.0/8
test.arg:
- isn't
- this
- fun
- this: that
- salt: stack
.. _mine_minion-side-acl:
Minion-side Access Control
--------------------------
.. versionadded:: Sodium
Mine functions can be targeted to only be available to specific minions. This
uses the same targeting parameters as :ref:`targeting` but with keywords ``allow_tgt``
and ``allow_tgt_type``. When a minion requests a function from the salt mine that
is not allowed to be requested by that minion (i.e. when looking up the combination
of ``allow_tgt`` and ``allow_tgt_type`` and the requesting minion is not in the list)
it will get no data, just as if the requested function is not present in the salt mine.
.. code-block:: yaml
mine_functions:
network.ip_addrs:
- interface: eth0
- cidr: 10.0.0.0/8
- allow_tgt: 'G@role:master'
- allow_tgt_type: 'compound'
Mine Functions Aliases
----------------------
@ -71,6 +114,25 @@ positional and key-value arguments is not supported.
- mine_function: grains.get
- ip_interfaces
.. versionchanged:: Sodium
With the addition of the module.run-like format for defining mine_functions, the
method of adding aliases remains similar. Just add a ``mine_function`` kwarg with
the name of the real function to call, making the key below ``mine_functions``
the alias:
.. code-block:: yaml
mine_functions:
alias_name:
- mine_function: network.ip_addrs
- eth0
internal_ip_addrs:
- mine_function: network.ip_addrs
- cidr: 192.168.0.0/16
ip_list:
- mine_function: grains.get
- ip_interfaces
.. _mine_interval:
@ -123,6 +185,7 @@ stored in a different location. Here is an example of a flat roster containing
of the Minion in question. This results in a non-trivial delay in
retrieving the requested data.
Minions Targeting with Mine
===========================

View file

@ -4,6 +4,171 @@
Salt Release Notes - Codename Neon
==================================
Saltcheck Updates
=================
Available since 2018.3, the :py:func:`saltcheck module <salt.modules.saltcheck>`
has been enhanced to:
* Support saltenv environments
* Associate tests with states by naming convention
* Adds report_highstate_tests function
* Adds empty and notempty assertions
* Adds skip keyword
* Adds print_result keyword
* Adds assertion_section keyword
* Use saltcheck.state_apply to run state.apply for test setup or teardown
* Changes output to display test time
* Works with salt-ssh
Saltcheck provides unittest like functionality requiring only the knowledge of
salt module execution and yaml. Saltcheck uses salt modules to return data, then
runs an assertion against that return. This allows for testing with all the
features included in salt modules.
In order to run state and highstate saltcheck tests, a sub-folder in the state directory
must be created and named ``saltcheck-tests``. Tests for a state should be created in files
ending in ``*.tst`` and placed in the ``saltcheck-tests`` folder. ``tst`` files are run
through the salt rendering system, enabling tests to be written in yaml (or renderer of choice),
and include jinja, as well as the usual grain and pillar information. Like states, multiple tests can
be specified in a ``tst`` file. Multiple ``tst`` files can be created in the ``saltcheck-tests``
folder, and should be named the same as the associated state. The ``id`` of a test works in the
same manner as in salt state files and should be unique and descriptive.
Usage
-----
Example file system layout:
.. code-block:: text
/srv/salt/apache/
init.sls
config.sls
saltcheck-tests/
init.tst
config.tst
deployment_validation.tst
Tests can be run for each state by name, for all ``apache/saltcheck/*.tst`` files,
or for all states assigned to the minion in top.sls. Tests may also be created
with no associated state. These tests will be run through the use of
``saltcheck.run_state_tests``, but will not be automatically run by
``saltcheck.run_highstate_tests``.
.. code-block:: bash
salt '*' saltcheck.run_state_tests apache,apache.config
salt '*' saltcheck.run_state_tests apache check_all=True
salt '*' saltcheck.run_highstate_tests
salt '*' saltcheck.run_state_tests apache.deployment_validation
Example Tests
-------------
.. code-block:: jinja
{# will run the common salt state before further testing #}
setup_test_environment:
module_and_function: saltcheck.state_apply
args:
- common
pillar-data:
data: value
{% for package in ["apache2", "openssh"] %}
{# or another example #}
{# for package in salt['pillar.get']("packages") #}
jinja_test_{{ package }}_latest:
module_and_function: pkg.upgrade_available
args:
- {{ package }}
assertion: assertFalse
{% endfor %}
validate_user_present_and_shell:
module_and_function: user.info
args:
- root
assertion: assertEqual
expected-return: /bin/bash
assertion_section: shell
print_result: False
skip_test:
module_and_function: pkg.upgrade_available
args:
- apache2
assertion: assertFalse
skip: True
Output Format Changes
---------------------
Saltcheck output has been enhanced to display the time taken per test. This results
in a change to the output format.
Previous Output:
.. code-block:: text
local:
|_
----------
ntp:
----------
ntp-client-installed:
Pass
ntp-service-status:
Pass
|_
----------
TEST RESULTS:
----------
Failed:
0
Missing Tests:
0
Passed:
2
New output:
.. code-block:: text
local:
|_
----------
ntp:
----------
ntp-client-installed:
----------
duration:
1.0408
status:
Pass
ntp-service-status:
----------
duration:
1.464
status:
Pass
|_
----------
TEST RESULTS:
----------
Execution Time:
2.5048
Failed:
0
Missing Tests:
0
Passed:
2
Skipped:
0
Unless and onlyif Enhancements
==============================
@ -81,6 +246,41 @@ as well as managing keystore files.
Hn+GmxZA
-----END CERTIFICATE-----
XML Module
==========
A new state and execution module for editing XML files is now included. Currently it allows for
editing values from an xpath query, or editing XML IDs.
.. code-block:: bash
# salt-call xml.set_attribute /tmp/test.xml ".//actor[@id='3']" editedby "Jane Doe"
local:
True
# salt-call xml.get_attribute /tmp/test.xml ".//actor[@id='3']"
local:
----------
editedby:
Jane Doe
id:
3
# salt-call xml.get_value /tmp/test.xml ".//actor[@id='2']"
local:
Liam Neeson
# salt-call xml.set_value /tmp/test.xml ".//actor[@id='2']" "Patrick Stewart"
local:
True
# salt-call xml.get_value /tmp/test.xml ".//actor[@id='2']"
local:
Patrick Stewart
.. code-block:: yaml
ensure_value_true:
xml.value_present:
- name: /tmp/test.xml
- xpath: .//actor[@id='1']
- value: William Shatner
Jinja enhancements
==================
@ -111,7 +311,6 @@ The module can be also used to test ``json`` and ``yaml`` maps:
salt myminion jinja.import_json myformula/defaults.json
json_query filter
-----------------
@ -170,12 +369,102 @@ Also, slot parsing is now supported inside of nested state data structures (dict
- "DO NOT OVERRIDE"
ignore_if_missing: True
- The :py:func:`file.symlink <salt.states.file.symlink>` state was
fixed to remove existing file system entries other than files,
directories and symbolic links properly.
- The ``onchanges`` and ``prereq`` :ref:`requisites <requisites>` now behave
properly in test mode.
State Changes
=============
- Added new :py:func:`ssh_auth.manage <salt.states.ssh_auth.manage>` state to
ensure only the specified ssh keys are present for the specified user.
- Added new :py:func:`saltutil <salt.states.saltutil>` state to use instead of
``module.run`` to more easily handle change.
- Added new `onfail_all` requisite form to allow for AND logic when adding
onfail states.
Module Changes
==============
- The :py:func:`debian_ip <salt.modules.debian_ip>` module used by the
:py:func:`network.managed <salt.states.network.managed>` state has been
heavily refactored. The order that options appear in inet/inet6 blocks may
produce cosmetic changes. Many options without an 'ipvX' prefix will now be
shared between inet and inet6 blocks. The options ``enable_ipv4`` and
``enabled_ipv6`` will now fully remove relevant inet/inet6 blocks. Overriding
options by prefixing them with 'ipvX' will now work with most options (i.e.
``dns`` can be overriden by ``ipv4dns`` or ``ipv6dns``). The ``proto`` option
is now required.
- Added new :py:func:`boto_ssm <salt.modules.boto_ssm>` module to set and query
secrets in AWS SSM parameters.
- Added new :py:func:`flatpak <salt.modules.flatpak>` module to work with flatpak packages.
- The :py:func:`file.set_selinux_context <salt.modules.file.set_selinux_context>`
module now supports perstant changes with ``persist=True`` by calling the
:py:func:`selinux.fcontext_add_policy <salt.modules.selinux.fcontext_add_policy>` module.
- The :py:func:`file.remove <salt.modules.file.remove>` module was
fixed to remove file system entries other than files, directories
and symbolic links properly.
- The :py:func:`yumpkg <salt.modules.yumpkg>` module has been updated to support
VMWare's Photon OS, which uses tdnf (a C implementation of dnf).
- The :py:func:`chocolatey.bootstrap <salt.modules.chocolatey.bootstrap>` function
has been updated to support offline installation.
- The :py:func:`chocolatey.unbootstrap <salt.modules.chocolatey.unbootstrap>` function
has been added to uninstall Chocolatey.
Runner Changes
==============
- The :py:func:`saltutil.sync_auth <salt.runners.saltutil.sync_auth>` function
has been added to sync loadable auth modules. :py:func:`saltutil.sync_all <salt.runners.saltutil.sync_all>`
will also include these modules.
Util Changes
============
- The :py:func:`win_dotnet <salt.utils.win_dotnet>` Salt util has been added to
make it easier to detect the versions of .NET installed on the system. It includes
the following functions:
- :py:func:`versions <salt.utils.win_dotnet.versions>`
- :py:func:`versions_list <salt.utils.win_dotnet.versions_list>`
- :py:func:`versions_details <salt.utils.win_dotnet.versions_details>`
- :py:func:`version_at_least <salt.utils.win_dotnet.version_at_least>`
Serializer Changes
==================
- The configparser serializer and deserializer functions can now be made to preserve
case of item names by passing 'preserve_case=True' in the options parameter of the function.
.. note::
This is a parameter consumed only by the salt.serializer.configparser serialize and
deserialize functions and not the low-level configparser python object.
For example, in a file.serialze state:
.. code-block:: yaml
some.ini:
- file.serialize:
- formatter: configparser
- merge_if_exists: True
- deserializer_opts:
- preserve_case: True
- serializer_opts:
- preserve_case: True
Enhancements to Engines
=======================
@ -209,6 +498,11 @@ Module Changes
Deprecations
============
Raet Deprecated
---------------
- The Raet transport has been deprecated. Please use the supported
transport protocols tcp or zeromq.
Module Deprecations
-------------------

View file

@ -0,0 +1,29 @@
:orphan:
====================================
Salt Release Notes - Codename Sodium
====================================
Salt mine updates
=================
Syntax update
-------------
The syntax for defining salt functions in config or pillar files has changed to
also support the syntax used in :py:mod:`module.run <salt.states.module.run>`.
The old syntax for the mine_function - as a dict, or as a list with dicts that
contain more than exactly one key - is still supported but discouraged in favor
of the more uniform syntax of module.run.
Minion-side ACL
---------------
Salt has had master-side ACL for the salt mine for some time, where the master
configuration contained `mine_get` that specified which minions could request
which functions. However, now you can specify which minions can access a function
in the salt mine function definition itself (or when calling :py:func:`mine.send <salt.modules.mine.send>`).
This targeting works the same as the generic minion targeting as specified
:ref:`here <targeting>`. The parameters used are ``allow_tgt`` and ``allow_tgt_type``.
See also :ref:`the documentation of the Salt Mine <mine_minion-side-acl>`.

View file

@ -27,7 +27,7 @@ High level differences to yum and apt are:
- The repository metadata (SLS files) is hosted through either salt or
git.
- Packages can be downloaded from within the salt repository, a git
repository or from http(s) or ftp urls.
repository or from HTTP(S) or FTP URLs.
- No dependencies are managed. Dependencies between packages needs to
be managed manually.
@ -264,7 +264,7 @@ in the master config file:
Creating a Package Definition SLS File
======================================
The package definition file is a yaml file that contains all the information
The package definition file is a YAML file that contains all the information
needed to install a piece of software using salt. It defines information about
the package to include version, full name, flags required for the installer and
uninstaller, whether or not to use the Windows task scheduler to install the
@ -315,17 +315,17 @@ The version line is the version for the package to be installed. It is used when
you need to install a specific version of a piece of software.
.. warning::
The version must be enclosed in quotes, otherwise the yaml parser will
The version must be enclosed in quotes, otherwise the YAML parser will
remove trailing zeros.
.. note::
There are unique situations where previous versions are unavailable. Take
Google Chrome for example. There is only one url provided for a standalone
Google Chrome for example. There is only one URL provided for a standalone
installation of Google Chrome.
(https://dl.google.com/edgedl/chrome/install/GoogleChromeStandaloneEnterprise.msi)
When a new version is released, the url just points to the new version. To
When a new version is released, the URL just points to the new version. To
handle situations such as these, set the version to `latest`. Salt will
install the version of Chrome at the URL and report that version. Here's an
example:
@ -579,7 +579,7 @@ Available parameters are as follows:
:param bool reboot: Not implemented
:param str local: Not implemented
:param str locale: Not implemented
Examples can be found at https://github.com/saltstack/salt-winrepo-ng

View file

@ -17,6 +17,7 @@ jxmlease
kazoo
keyring==5.7.1
kubernetes<4.0
libnacl==1.6.0
mock>=3.0.5; python_version < '3.6'
more-itertools==5.0.0
moto

View file

@ -60,6 +60,7 @@ jxmlease==1.0.1
kazoo==2.6.1
keyring==5.7.1
kubernetes==3.0.0
libnacl==1.6.0
lxml==4.3.3 # via junos-eznc, ncclient
mako==1.1.0
markupsafe==1.1.1

View file

@ -52,6 +52,7 @@ jxmlease==1.0.1
kazoo==2.6.1
keyring==5.7.1
kubernetes==3.0.0
libnacl==1.6.0
lxml==4.3.3 # via junos-eznc, ncclient
mako==1.1.0
markupsafe==1.1.1

View file

@ -52,6 +52,7 @@ jxmlease==1.0.1
kazoo==2.6.1
keyring==5.7.1
kubernetes==3.0.0
libnacl==1.6.0
lxml==4.3.3 # via junos-eznc, ncclient
mako==1.1.0
markupsafe==1.1.1

View file

@ -52,6 +52,7 @@ jxmlease==1.0.1
kazoo==2.6.1
keyring==5.7.1
kubernetes==3.0.0
libnacl==1.6.0
lxml==4.3.3 # via junos-eznc, ncclient
mako==1.1.0
markupsafe==1.1.1

View file

@ -52,6 +52,7 @@ jxmlease==1.0.1
kazoo==2.6.1
keyring==5.7.1
kubernetes==3.0.0
libnacl==1.6.0
lxml==4.3.3 # via junos-eznc, ncclient
mako==1.1.0
markupsafe==1.1.1

View file

@ -11,6 +11,7 @@ jmespath
jsonschema
keyring==5.7.1
kubernetes<4.0
libnacl
mock>=3.0.5; python_version < '3.6'
more-itertools==5.0.0
moto<=1.3.7

163
salt/beacons/cert_info.py Normal file
View file

@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
'''
Beacon to monitor certificate expiration dates from files on the filesystem.
.. versionadded:: Sodium
:maintainer: <devops@eitr.tech>
:maturity: new
:depends: OpenSSL
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
from datetime import datetime
import logging
# Import salt libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin,3rd-party-module-not-gated
from salt.ext.six.moves import map as _map
from salt.ext.six.moves import range as _range
# pylint: enable=import-error,no-name-in-module,redefined-builtin,3rd-party-module-not-gated
import salt.utils.files
# Import Third Party Libs
try:
from OpenSSL import crypto
HAS_OPENSSL = True
except ImportError:
HAS_OPENSSL = False
log = logging.getLogger(__name__)
DEFAULT_NOTIFY_DAYS = 45
__virtualname__ = 'cert_info'
def __virtual__():
if HAS_OPENSSL is False:
return False
return __virtualname__
def validate(config):
'''
Validate the beacon configuration
'''
_config = {}
list(_map(_config.update, config))
# Configuration for cert_info beacon should be a list of dicts
if not isinstance(config, list):
return False, ('Configuration for cert_info beacon must be a list.')
if 'files' not in _config:
return False, ('Configuration for cert_info beacon '
'must contain files option.')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Monitor the certificate files on the minion.
Specify a notification threshold in days and only emit a beacon if any certificates are
expiring within that timeframe or if `notify_days` equals `-1` (always report information).
The default notification threshold is 45 days and can be overridden at the beacon level and
at an individual certificate level.
.. code-block:: yaml
beacons:
cert_info:
- files:
- /etc/pki/tls/certs/mycert.pem
- /etc/pki/tls/certs/yourcert.pem:
notify_days: 15
- /etc/pki/tls/certs/ourcert.pem
- notify_days: 45
- interval: 86400
'''
ret = []
certificates = []
CryptoError = crypto.Error # pylint: disable=invalid-name
_config = {}
list(_map(_config.update, config))
global_notify_days = _config.get('notify_days', DEFAULT_NOTIFY_DAYS)
for cert_path in _config.get('files', []):
notify_days = global_notify_days
if isinstance(cert_path, dict):
try:
notify_days = cert_path[cert_path.keys()[0]].get('notify_days', global_notify_days)
cert_path = cert_path.keys()[0]
except IndexError as exc:
log.error('Unable to load certificate %s (%s)', cert_path, exc)
continue
try:
with salt.utils.files.fopen(cert_path) as fp_:
cert = crypto.load_certificate(crypto.FILETYPE_PEM, fp_.read())
except (IOError, CryptoError) as exc:
log.error('Unable to load certificate %s (%s)', cert_path, exc)
continue
cert_date = datetime.strptime(cert.get_notAfter().decode(encoding='UTF-8'), "%Y%m%d%H%M%SZ")
date_diff = (cert_date - datetime.today()).days
log.debug('Certificate %s expires in %s days.', cert_path, date_diff)
if notify_days < 0 or date_diff <= notify_days:
log.debug('Certificate %s triggered beacon due to %s day notification threshold.', cert_path, notify_days)
extensions = []
for ext in _range(0, cert.get_extension_count()):
extensions.append(
{
'ext_name': cert.get_extension(ext).get_short_name().decode(encoding='UTF-8'),
'ext_data': str(cert.get_extension(ext))
}
)
certificates.append(
{
'cert_path': cert_path,
'issuer': ','.join(
['{0}="{1}"'.format(
t[0].decode(encoding='UTF-8'),
t[1].decode(encoding='UTF-8')
) for t in cert.get_issuer().get_components()]),
'issuer_dict': {
k.decode('UTF-8'): v.decode('UTF-8') for k, v in cert.get_issuer().get_components()
},
'notAfter_raw': cert.get_notAfter().decode(encoding='UTF-8'),
'notAfter': cert_date.strftime("%Y-%m-%d %H:%M:%SZ"),
'notBefore_raw': cert.get_notBefore().decode(encoding='UTF-8'),
'notBefore': datetime.strptime(
cert.get_notBefore().decode(encoding='UTF-8'), "%Y%m%d%H%M%SZ"
).strftime("%Y-%m-%d %H:%M:%SZ"),
'serial_number': cert.get_serial_number(),
'signature_algorithm': cert.get_signature_algorithm().decode(encoding='UTF-8'),
'subject': ','.join(
['{0}="{1}"'.format(
t[0].decode(encoding='UTF-8'),
t[1].decode(encoding='UTF-8')
) for t in cert.get_subject().get_components()]),
'subject_dict': {
k.decode('UTF-8'): v.decode('UTF-8') for k, v in cert.get_subject().get_components()
},
'version': cert.get_version(),
'extensions': extensions,
'has_expired': cert.has_expired()
}
)
if certificates:
ret.append({'certificates': certificates})
return ret

View file

@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
'''
Wrap the saltcheck module to copy files to ssh minion before running tests
'''
# Import Python libs
from __future__ import absolute_import, print_function
import logging
import tempfile
import os
import shutil
import tarfile
from contextlib import closing
# Import salt libs
import salt.utils.url
import salt.utils.files
import salt.utils.json
log = logging.getLogger(__name__)
def update_master_cache(states, saltenv='base'):
'''
Replace standard saltcheck version with similar logic but replacing cp.cache_dir with
generating files, tar'ing them up, copying tarball to remote host, extracting tar
to state cache directory, and cleanup of files
'''
cache = __opts__['cachedir']
state_cache = os.path.join(cache, 'files', saltenv)
# Setup for copying states to gendir
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__:
__context__['cp.fileclient_{0}'.format(id(__opts__))] = \
salt.fileclient.get_file_client(__opts__)
# generate cp.list_states output and save to gendir
cp_output = salt.utils.json.dumps(__salt__['cp.list_states']())
cp_output_file = os.path.join(gendir, 'cp_output.txt')
with salt.utils.files.fopen(cp_output_file, 'w') as fp:
fp.write(cp_output)
# cp state directories to gendir
already_processed = []
sls_list = salt.utils.args.split_input(states)
for state_name in sls_list:
# generate low data for each state and save to gendir
state_low_file = os.path.join(gendir, state_name + '.low')
state_low_output = salt.utils.json.dumps(__salt__['state.show_low_sls'](state_name))
with salt.utils.files.fopen(state_low_file, 'w') as fp:
fp.write(state_low_output)
state_name = state_name.replace(".", os.sep)
if state_name in already_processed:
log.debug("Already cached state for %s", state_name)
else:
file_copy_file = os.path.join(gendir, state_name + '.copy')
log.debug('copying %s to %s', state_name, gendir)
qualified_name = salt.utils.url.create(state_name, saltenv)
# Duplicate cp.get_dir to gendir
copy_result = __context__['cp.fileclient_{0}'.format(id(__opts__))].get_dir(
qualified_name, gendir, saltenv)
if copy_result:
copy_result = [dir.replace(gendir, state_cache) for dir in copy_result]
copy_result_output = salt.utils.json.dumps(copy_result)
with salt.utils.files.fopen(file_copy_file, 'w') as fp:
fp.write(copy_result_output)
already_processed.append(state_name)
else:
# If files were not copied, assume state.file.sls was given and just copy state
state_name = os.path.dirname(state_name)
file_copy_file = os.path.join(gendir, state_name + '.copy')
if state_name in already_processed:
log.debug('Already cached state for %s', state_name)
else:
qualified_name = salt.utils.url.create(state_name, saltenv)
copy_result = __context__['cp.fileclient_{0}'.format(id(__opts__))].get_dir(
qualified_name, gendir, saltenv)
if copy_result:
copy_result = [dir.replace(gendir, state_cache) for dir in copy_result]
copy_result_output = salt.utils.json.dumps(copy_result)
with salt.utils.files.fopen(file_copy_file, 'w') as fp:
fp.write(copy_result_output)
already_processed.append(state_name)
# turn gendir into tarball and remove gendir
try:
# cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in salt.utils.path.os_walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
# Copy tarfile to ssh host
single = salt.client.ssh.Single(
__opts__,
'',
**__salt__.kwargs)
thin_dir = __opts__['thin_dir']
ret = single.shell.send(trans_tar, thin_dir)
# Clean up local tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
tar_path = os.path.join(thin_dir, os.path.basename(trans_tar))
# Extract remote tarball to cache directory and remove tar file
# TODO this could be better handled by a single state/connection due to ssh overhead
ret = __salt__['file.mkdir'](state_cache)
ret = __salt__['archive.tar']('xf', tar_path, dest=state_cache)
ret = __salt__['file.remove'](tar_path)
return ret
def run_state_tests(states, saltenv='base', check_all=False):
'''
Define common functions to activite this wrapping module and tar copy.
After file copies are finished, run the usual local saltcheck function
'''
ret = update_master_cache(states, saltenv)
ret = __salt__['saltcheck.run_state_tests_ssh'](states, saltenv=saltenv, check_all=check_all)
return ret
def run_highstate_tests(saltenv='base'):
'''
Lookup top files for minion, pass results to wrapped run_state_tests for copy and run
'''
top_states = __salt__['state.show_top']().get(saltenv)
state_string = ','.join(top_states)
ret = run_state_tests(state_string, saltenv)
return ret

View file

@ -492,6 +492,71 @@ def list_nodes_select(call=None):
)
def _stringlist_to_dictionary(input_string):
'''
Convert a stringlist (comma separated settings) to a dictionary
The result of the string setting1=value1,setting2=value2 will be a python dictionary:
{'setting1':'value1','setting2':'value2'}
'''
return dict(item.strip().split("=") for item in input_string.split(",") if item)
def _dictionary_to_stringlist(input_dict):
'''
Convert a dictionary to a stringlist (comma separated settings)
The result of the dictionary {'setting1':'value1','setting2':'value2'} will be:
setting1=value1,setting2=value2
'''
return ','.join('{}={}'.format(k, input_dict[k]) for k in sorted(input_dict.keys()))
def _reconfigure_clone(vm_, vmid):
'''
If we cloned a machine, see if we need to reconfigure any of the options such as net0,
ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's brought up
:param vm_:
:return:
'''
if not vm_.get('technology') == 'qemu':
log.warning('Reconfiguring clones is only available under `qemu`')
return
# TODO: Support other settings here too as these are not the only ones that can be modified after a clone operation
log.info('Configuring cloned VM')
# Modify the settings for the VM one at a time so we can see any problems with the values
# as quickly as possible
for setting in vm_:
if re.match(r'^(ide|sata|scsi)(\d+)$', setting):
postParams = {setting: vm_[setting]}
query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams)
elif re.match(r'^net(\d+)$', setting):
# net strings are a list of comma seperated settings. We need to merge the settings so that
# the setting in the profile only changes the settings it touches and the other settings
# are left alone. An example of why this is necessary is because the MAC address is set
# in here and generally you don't want to alter or have to know the MAC address of the new
# instance, but you may want to set the VLAN bridge
data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid))
# Generate a dictionary of settings from the existing string
new_setting = {}
if setting in data:
new_setting.update(_stringlist_to_dictionary(data[setting]))
# Merge the new settings (as a dictionary) into the existing dictionary to get the
# new merged settings
new_setting.update(_stringlist_to_dictionary(vm_[setting]))
# Convert the dictionary back into a string list
postParams = {setting: _dictionary_to_stringlist(new_setting)}
query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams)
def create(vm_):
'''
Create a single VM from a data dict
@ -575,6 +640,9 @@ def create(vm_):
if not wait_for_created(data['upid'], timeout=300):
return {'Error': 'Unable to create {0}, command timed out'.format(name)}
if vm_.get('clone') is True:
_reconfigure_clone(vm_, vmid)
# VM has been created. Starting..
if not start(name, vmid, call='action'):
log.error('Node %s (%s) failed to start!', name, vmid)

File diff suppressed because it is too large Load diff

View file

@ -35,6 +35,7 @@ import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.verify
import salt.utils.mine
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.jid
@ -548,6 +549,18 @@ class RemoteFuncs(object):
if not skip_verify:
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if isinstance(load['fun'], six.string_types):
functions = list(set(load['fun'].split(',')))
_ret_dict = len(functions) > 1
elif isinstance(load['fun'], list):
functions = load['fun']
_ret_dict = True
else:
return {}
functions_allowed = []
if 'mine_get' in self.opts:
# If master side acl defined.
if not isinstance(self.opts['mine_get'], dict):
@ -557,11 +570,18 @@ class RemoteFuncs(object):
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if not any(re.match(perm, load['fun']) for perm in perms):
for fun in functions:
if any(re.match(perm, fun) for perm in perms):
functions_allowed.append(fun)
if not functions_allowed:
return {}
else:
functions_allowed = functions
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
expr_form = load.get('expr_form')
# keep both expr_form and tgt_type to ensure
# comptability between old versions of salt
@ -580,17 +600,43 @@ class RemoteFuncs(object):
greedy=False
)
minions = _res['minions']
minion_side_acl = {} # Cache minion-side ACL
for minion in minions:
fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine')
if isinstance(fdata, dict):
fdata = fdata.get(load['fun'])
if fdata:
ret[minion] = fdata
mine_data = self.cache.fetch('minions/{0}'.format(minion), 'mine')
if not isinstance(mine_data, dict):
continue
for function in functions_allowed:
if function not in mine_data:
continue
mine_entry = mine_data[function]
mine_result = mine_data[function]
if isinstance(mine_entry, dict) and salt.utils.mine.MINE_ITEM_ACL_ID in mine_entry:
mine_result = mine_entry[salt.utils.mine.MINE_ITEM_ACL_DATA]
# Check and fill minion-side ACL cache
if function not in minion_side_acl.get(minion, {}):
if 'allow_tgt' in mine_entry:
# Only determine allowed targets if any have been specified.
# This prevents having to add a list of all minions as allowed targets.
salt.utils.dictupdate.set_dict_key_value(
minion_side_acl,
'{}:{}'.format(minion, function),
checker.check_minions(
mine_entry['allow_tgt'],
mine_entry.get('allow_tgt_type', 'glob')
)['minions']
)
if salt.utils.mine.minion_side_acl_denied(minion_side_acl, minion, function, load['id']):
continue
if _ret_dict:
ret.setdefault(function, {})[minion] = mine_result
else:
# There is only one function in functions_allowed.
ret[minion] = mine_result
return ret
def _mine(self, load, skip_verify=False):
'''
Return the mine data
Store/update the mine data in cache.
'''
if not skip_verify:
if 'id' not in load or 'data' not in load:
@ -598,12 +644,12 @@ class RemoteFuncs(object):
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cbank = 'minions/{0}'.format(load['id'])
ckey = 'mine'
new_data = load['data']
if not load.get('clear', False):
data = self.cache.fetch(cbank, ckey)
if isinstance(data, dict):
data.update(load['data'])
load['data'] = data
self.cache.store(cbank, ckey, load['data'])
data.update(new_data)
self.cache.store(cbank, ckey, data)
return True
def _mine_delete(self, load):
@ -703,7 +749,6 @@ class RemoteFuncs(object):
'''
if any(key not in load for key in ('id', 'grains')):
return False
# pillar = salt.pillar.Pillar(
log.debug('Master _pillar using ext: %s', load.get('ext'))
pillar = salt.pillar.get_pillar(
self.opts,

View file

@ -2058,6 +2058,7 @@ def os_data():
else:
grains['os'] = grains['kernel']
if grains['kernel'] == 'FreeBSD':
grains['osfullname'] = grains['os']
try:
grains['osrelease'] = __salt__['cmd.run']('freebsd-version -u').split('-')[0]
except salt.exceptions.CommandExecutionError:

View file

@ -30,6 +30,7 @@ from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.transport.client
import salt.defaults.exitcodes
import salt.utils.crypt
from salt.utils.ctx import RequestContext
@ -1531,6 +1532,7 @@ class Minion(MinionBase):
name='ProcessPayload',
args=(instance, self.opts, data, self.connected)
)
process._after_fork_methods.append((salt.utils.crypt.reinit_crypto, [], {}))
else:
process = threading.Thread(
target=self._target,

View file

@ -2383,6 +2383,22 @@ def script(source,
on a Windows minion you must also use the ``password`` argument, and
the target user account must be in the Administrators group.
.. note::
For Window's users, specifically Server users, it may be necessary
to specify your runas user using the User Logon Name instead of the
legacy logon name. Traditionally, logons would be in the following
format.
``Domain/user``
In the event this causes issues when executing scripts, use the UPN
format which looks like the following.
``user@domain.local``
More information <https://github.com/saltstack/salt/issues/55080>
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.

View file

@ -3775,6 +3775,10 @@ def remove(path):
.. code-block:: bash
salt '*' file.remove /tmp/foo
.. versionchanged:: Neon
The method now works on all types of file system entries, not just
files, directories and symlinks.
'''
path = os.path.expanduser(path)
@ -3782,7 +3786,7 @@ def remove(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
if os.path.islink(path) or (os.path.exists(path) and not os.path.isdir(path)):
os.remove(path)
return True
elif os.path.isdir(path):

View file

@ -70,7 +70,7 @@ def run_command(jboss_config, command, fail_on_error=True):
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
'''
cli_command_result = __call_cli(jboss_config, command)
cli_command_result = _call_cli(jboss_config, command)
if cli_command_result['retcode'] == 0:
cli_command_result['success'] = True
@ -104,7 +104,7 @@ def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
'''
cli_command_result = __call_cli(jboss_config, operation, retries)
cli_command_result = _call_cli(jboss_config, operation, retries)
if cli_command_result['retcode'] == 0:
if _is_cli_output(cli_command_result['stdout']):
@ -116,8 +116,19 @@ def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = False
match = re.search(r'^(JBAS\d+):', cli_result['failure-description'])
cli_result['err_code'] = match.group(1)
# if match is None then check for wildfly error code
if match is None:
match = re.search(r'^(WFLYCTL\d+):', cli_result['failure-description'])
if match is not None:
cli_result['err_code'] = match.group(1)
else:
# Could not find err_code
log.error("Jboss 7 operation failed! Error Code could not be found!")
cli_result['err_code'] = '-1'
cli_result['stdout'] = cli_command_result['stdout']
else:
if fail_on_error:
@ -132,7 +143,7 @@ def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
return cli_result
def __call_cli(jboss_config, command, retries=1):
def _call_cli(jboss_config, command, retries=1):
command_segments = [
jboss_config['cli_path'],
'--connect',
@ -158,11 +169,11 @@ def __call_cli(jboss_config, command, retries=1):
if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:
raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))
# It may happen that eventhough server is up it may not respond to the call
# TODO add WFLYCTL code
if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli
log.debug('Command failed, retrying... (%d tries left)', retries)
time.sleep(3)
return __call_cli(jboss_config, command, retries - 1)
return _call_cli(jboss_config, command, retries - 1)
return cli_command_result

View file

@ -574,10 +574,13 @@ def _fix_cask_namespace(name=None, pkgs=None):
if pkgs:
pkgs_ = []
for pkg in pkgs:
if pkg.startswith('caskroom/cask/'):
if isinstance(pkg, str) and pkg.startswith('caskroom/cask/'):
show_warning = True
pkg = pkg.replace("caskroom/cask/", "homebrew/cask/")
pkgs_.append(pkg)
pkgs_.append(pkg)
else:
pkgs_.append(pkg)
continue
pkgs = pkgs_
if show_warning:

View file

@ -5,7 +5,6 @@ The function cache system allows for data to be stored on the master so it can b
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import time
import traceback
@ -17,6 +16,10 @@ import salt.utils.args
import salt.utils.event
import salt.utils.network
import salt.transport.client
import salt.utils.mine
import salt.utils.minions
import salt.utils.dictupdate
import salt.utils.functools
from salt.exceptions import SaltClientError
# Import 3rd-party libs
@ -79,10 +82,40 @@ def _mine_get(load, opts):
return channel.send(load)
def _mine_store(
mine_data,
clear=False):
'''
Helper function to store the provided mine data.
This will store either locally in the cache (for masterless setups), or in
the master's cache.
:param dict mine_data: Dictionary with function_name: function_data to store.
:param bool clear: Whether or not to clear (`True`) the mine data for the
function names present in ``mine_data``, or update it (`False`).
'''
# Store in the salt-minion's local cache
if __opts__['file_client'] == 'local':
if not clear:
old = __salt__['data.get']('mine_cache')
if isinstance(old, dict):
old.update(mine_data)
mine_data = old
return __salt__['data.update']('mine_cache', mine_data)
# Store on the salt master
load = {
'cmd': '_mine',
'data': mine_data,
'id': __opts__['id'],
'clear': clear,
}
return _mine_send(load, __opts__)
def update(clear=False, mine_functions=None):
'''
Execute the configured functions and send the data back up to the master.
The functions to be executed are merged from the master config, pillar and
Call the configured functions and send the data back up to the master.
The functions to be called are merged from the master config, pillar and
minion config under the option `mine_functions`:
.. code-block:: yaml
@ -94,14 +127,17 @@ def update(clear=False, mine_functions=None):
This function accepts the following arguments:
clear: False
Boolean flag specifying whether updating will clear the existing
mines, or will update. Default: `False` (update).
:param bool clear: Default: ``False``
Specifies whether updating will clear the existing values (``True``), or
whether it will update them (``False``).
:param dict mine_functions:
Update (or clear, see ``clear``) the mine data on these functions only.
This will need to have the structure as defined on
https://docs.saltstack.com/en/latest/topics/mine/index.html#mine-functions
mine_functions
Update the mine data on certain functions only.
This feature can be used when updating the mine for functions
that require refresh at different intervals than the rest of
that require a refresh at different intervals than the rest of
the functions specified under `mine_functions` in the
minion/master config or pillar.
A potential use would be together with the `scheduler`, for example:
@ -129,64 +165,68 @@ def update(clear=False, mine_functions=None):
salt '*' mine.update
'''
m_data = {}
if not mine_functions:
m_data = __salt__['config.merge']('mine_functions', {})
mine_functions = __salt__['config.merge']('mine_functions', {})
# If we don't have any mine functions configured, then we should just bail out
if not m_data:
if not mine_functions:
return
elif mine_functions and isinstance(mine_functions, list):
m_data = dict((fun, {}) for fun in mine_functions)
elif mine_functions and isinstance(mine_functions, dict):
m_data = mine_functions
elif isinstance(mine_functions, list):
mine_functions = dict((fun, {}) for fun in mine_functions)
elif isinstance(mine_functions, dict):
pass
else:
return
data = {}
for func in m_data:
mine_data = {}
for function_alias, function_data in six.iteritems(mine_functions):
function_name, function_args, function_kwargs, minion_acl = \
salt.utils.mine.parse_function_definition(function_data)
if not _mine_function_available(function_name or function_alias):
continue
try:
if m_data[func] and isinstance(m_data[func], dict):
mine_func = m_data[func].pop('mine_function', func)
if not _mine_function_available(mine_func):
continue
data[func] = __salt__[mine_func](**m_data[func])
elif m_data[func] and isinstance(m_data[func], list):
mine_func = func
if isinstance(m_data[func][0], dict) and 'mine_function' in m_data[func][0]:
mine_func = m_data[func][0]['mine_function']
m_data[func].pop(0)
if not _mine_function_available(mine_func):
continue
data[func] = __salt__[mine_func](*m_data[func])
else:
if not _mine_function_available(func):
continue
data[func] = __salt__[func]()
res = salt.utils.functools.call_function(
__salt__[function_name or function_alias],
*function_args,
**function_kwargs
)
except Exception: # pylint: disable=broad-except
trace = traceback.format_exc()
log.error('Function %s in mine_functions failed to execute', func)
log.error('Function %s in mine.update failed to execute', function_name or function_alias)
log.debug('Error: %s', trace)
continue
if __opts__['file_client'] == 'local':
if not clear:
old = __salt__['data.get']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
'clear': clear,
}
return _mine_send(load, __opts__)
mine_data[function_alias] = salt.utils.mine.wrap_acl_structure(
res,
**minion_acl
)
return _mine_store(mine_data, clear)
def send(func, *args, **kwargs):
def send(name, *args, **kwargs):
'''
Send a specific function to the mine.
Send a specific function and its result to the salt mine.
This gets stored in either the local cache, or the salt master's cache.
:param str name: Name of the function to add to the mine.
The following pameters are extracted from kwargs if present:
:param str mine_function: The name of the execution_module.function to run
and whose value will be stored in the salt mine. Defaults to ``name``.
:param str allow_tgt: Targeting specification for ACL. Specifies which minions
are allowed to access this function.
:param str allow_tgt_type: Type of the targeting specification. This value will
be ignored if ``allow_tgt`` is not specified.
Remaining args and kwargs will be passed on to the function to run.
:rtype: bool
:return: Whether executing the function and storing the information was succesful.
.. versionchanged:: Sodium
Added ``allow_tgt``- and ``allow_tgt_type``-parameters to specify which
minions are allowed to access this function.
See :ref:`targeting` for more information about targeting.
CLI Example:
@ -194,48 +234,30 @@ def send(func, *args, **kwargs):
salt '*' mine.send network.ip_addrs eth0
salt '*' mine.send eth0_ip_addrs mine_function=network.ip_addrs eth0
salt '*' mine.send eth0_ip_addrs mine_function=network.ip_addrs eth0 allow_tgt='G@grain:value' allow_tgt_type=compound
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
mine_func = kwargs.pop('mine_function', func)
if mine_func not in __salt__:
return False
data = {}
arg_data = salt.utils.args.arg_lookup(__salt__[mine_func])
func_data = copy.deepcopy(kwargs)
for ind, _ in enumerate(arg_data.get('args', [])):
try:
func_data[arg_data['args'][ind]] = args[ind]
except IndexError:
# Safe error, arg may be in kwargs
pass
f_call = salt.utils.args.format_call(
__salt__[mine_func],
func_data,
expected_extra_kws=MINE_INTERNAL_KEYWORDS)
for arg in args:
if arg not in f_call['args']:
f_call['args'].append(arg)
mine_function = kwargs.pop('mine_function', None)
allow_tgt = kwargs.pop('allow_tgt', None)
allow_tgt_type = kwargs.pop('allow_tgt_type', None)
mine_data = {}
try:
if 'kwargs' in f_call:
data[func] = __salt__[mine_func](*f_call['args'], **f_call['kwargs'])
else:
data[func] = __salt__[mine_func](*f_call['args'])
res = salt.utils.functools.call_function(
__salt__[mine_function or name],
*args,
**kwargs
)
except Exception as exc: # pylint: disable=broad-except
log.error('Function %s in mine.send failed to execute: %s',
mine_func, exc)
trace = traceback.format_exc()
log.error('Function %s in mine.send failed to execute', mine_function or name)
log.debug('Error: %s', trace)
return False
if __opts__['file_client'] == 'local':
old = __salt__['data.get']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
}
return _mine_send(load, __opts__)
mine_data[name] = salt.utils.mine.wrap_acl_structure(
res,
allow_tgt=allow_tgt,
allow_tgt_type=allow_tgt_type
)
return _mine_store(mine_data)
def get(tgt,
@ -243,24 +265,17 @@ def get(tgt,
tgt_type='glob',
exclude_minion=False):
'''
Get data from the mine based on the target, function and tgt_type
Get data from the mine.
Targets can be matched based on any standard matching system that can be
matched on the master via these keywords:
- glob
- pcre
- grain
- grain_pcre
- compound
- pillar
- pillar_pcre
Note that all pillar matches, whether using the compound matching system or
the pillar matching system, will be exact matches, with globbing disabled.
exclude_minion
Excludes the current minion from the result set
:param str tgt: Target whose mine data to get.
:param fun: Function to get the mine data of. You can specify multiple functions
to retrieve using either a list or a comma-separated string of functions.
:type fun: str or list
:param str tgt_type: Default ``glob``. Target type to use with ``tgt``.
See :ref:`targeting` for more information.
Note that all pillar matches, whether using the compound matching system or
the pillar matching system, will be exact matches, with globbing disabled.
:param bool exclude_minion: Excludes the current minion from the result set.
CLI Example:
@ -286,6 +301,7 @@ def get(tgt,
fun='network.ip_addrs',
tgt_type='glob') %}
'''
# Load from local minion's cache
if __opts__['file_client'] == 'local':
ret = {}
is_target = {'glob': __salt__['match.glob'],
@ -298,28 +314,58 @@ def get(tgt,
'pillar': __salt__['match.pillar'],
'pillar_pcre': __salt__['match.pillar_pcre'],
}[tgt_type](tgt)
if is_target:
data = __salt__['data.get']('mine_cache')
if isinstance(data, dict) and fun in data:
ret[__opts__['id']] = data[fun]
if not is_target:
return ret
data = __salt__['data.get']('mine_cache')
if not isinstance(data, dict):
return ret
if isinstance(fun, six.string_types):
functions = list(set(fun.split(',')))
_ret_dict = len(functions) > 1
elif isinstance(fun, list):
functions = fun
_ret_dict = True
else:
return ret
for function in functions:
if function not in data:
continue
# If this is a mine item with minion_side_ACL, get its data
if salt.utils.mine.MINE_ITEM_ACL_ID in data[function]:
res = data[function][salt.utils.mine.MINE_ITEM_ACL_DATA]
else:
# Backwards compatibility with non-ACL mine data.
res = data[function]
if _ret_dict:
ret.setdefault(function, {})[__opts__['id']] = res
else:
ret[__opts__['id']] = res
return ret
# Load from master
load = {
'cmd': '_mine_get',
'id': __opts__['id'],
'tgt': tgt,
'fun': fun,
'tgt_type': tgt_type,
'cmd': '_mine_get',
'id': __opts__['id'],
'tgt': tgt,
'fun': fun,
'tgt_type': tgt_type,
}
ret = _mine_get(load, __opts__)
if exclude_minion:
if __opts__['id'] in ret:
del ret[__opts__['id']]
if exclude_minion and __opts__['id'] in ret:
del ret[__opts__['id']]
return ret
def delete(fun):
'''
Remove specific function contents of minion. Returns True on success.
Remove specific function contents of minion.
:param str fun: The name of the function.
:rtype: bool
:return: True on success.
CLI Example:
@ -333,16 +379,19 @@ def delete(fun):
del data[fun]
return __salt__['data.update']('mine_cache', data)
load = {
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
}
return _mine_send(load, __opts__)
def flush():
'''
Remove all mine contents of minion. Returns True on success.
Remove all mine contents of minion.
:rtype: bool
:return: True on success
CLI Example:
@ -353,8 +402,8 @@ def flush():
if __opts__['file_client'] == 'local':
return __salt__['data.update']('mine_cache', {})
load = {
'cmd': '_mine_flush',
'id': __opts__['id'],
'cmd': '_mine_flush',
'id': __opts__['id'],
}
return _mine_send(load, __opts__)
@ -477,30 +526,21 @@ def valid():
salt '*' mine.valid
'''
m_data = __salt__['config.merge']('mine_functions', {})
mine_functions = __salt__['config.merge']('mine_functions', {})
# If we don't have any mine functions configured, then we should just bail out
if not m_data:
if not mine_functions:
return
data = {}
for func in m_data:
if m_data[func] and isinstance(m_data[func], dict):
mine_func = m_data[func].pop('mine_function', func)
if not _mine_function_available(mine_func):
continue
data[func] = {mine_func: m_data[func]}
elif m_data[func] and isinstance(m_data[func], list):
mine_func = func
if isinstance(m_data[func][0], dict) and 'mine_function' in m_data[func][0]:
mine_func = m_data[func][0]['mine_function']
m_data[func].pop(0)
if not _mine_function_available(mine_func):
continue
data[func] = {mine_func: m_data[func]}
mine_data = {}
for function_alias, function_data in six.iteritems(mine_functions):
function_name, function_args, function_kwargs, minion_acl = \
salt.utils.mine.parse_function_definition(function_data)
if not _mine_function_available(function_name or function_alias):
continue
if function_name:
mine_data[function_alias] = {
function_name: function_args + [{key, value} for key, value in six.iteritems(function_kwargs)]
}
else:
if not _mine_function_available(func):
continue
data[func] = m_data[func]
return data
mine_data[function_alias] = function_data
return mine_data

View file

@ -3151,6 +3151,7 @@ def datadir_init(name,
password=password,
encoding=encoding,
locale=locale,
checksums=checksums,
runas=runas)
return ret['retcode'] == 0

File diff suppressed because it is too large Load diff

View file

@ -4584,6 +4584,30 @@ def network_info(name=None, **kwargs):
return result
def network_get_xml(name, **kwargs):
'''
Return the XML definition of a virtual network
:param name: libvirt network name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' virt.network_get_xml default
'''
conn = __get_conn(**kwargs)
try:
return conn.networkLookupByName(name).XMLDesc()
finally:
conn.close()
def network_start(name, **kwargs):
'''
Start a defined virtual network.
@ -5328,6 +5352,30 @@ def pool_info(name=None, **kwargs):
return result
def pool_get_xml(name, **kwargs):
'''
Return the XML definition of a virtual storage pool
:param name: libvirt storage pool name
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' virt.pool_get_xml default
'''
conn = __get_conn(**kwargs)
try:
return conn.storagePoolLookupByName(name).XMLDesc()
finally:
conn.close()
def pool_start(name, **kwargs):
'''
Start a defined libvirt storage pool.

View file

@ -131,9 +131,17 @@ def create(path,
CLI Example:
.. code-block:: bash
.. code-block:: console
salt '*' virtualenv.create /path/to/new/virtualenv
Example of using --always-copy environment variable (in case your fs doesn't support symlinks).
This will copy files into the virtualenv instead of symlinking them.
.. code-block:: yaml
- env:
- VIRTUALENV_ALWAYS_COPY: 1
'''
if venv_bin is None:
venv_bin = __opts__.get('venv_bin') or __pillar__.get('venv_bin')

View file

@ -5639,7 +5639,9 @@ def _getAdmlDisplayName(adml_xml_data, display_name):
displayNameId=displayname_id)
if search_results:
for result in search_results:
return result.text
# Needs the `strip()` because some adml data has an extra space
# at the end
return result.text.strip()
return None

View file

@ -11,7 +11,6 @@ from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
import os
import re
# Import Salt libs
import salt.utils.args
@ -50,7 +49,7 @@ def _normalize_dir(string_):
'''
Normalize the directory to make comparison possible
'''
return re.sub(r'\\$', '', salt.utils.stringutils.to_unicode(string_))
return os.path.normpath(salt.utils.stringutils.to_unicode(string_))
def rehash():
@ -200,7 +199,7 @@ def add(path, index=None, **kwargs):
elif index <= -num_dirs:
# Negative index is too large, shift index to beginning of list
index = pos = 0
elif index <= 0:
elif index < 0:
# Negative indexes (other than -1 which is handled above) must
# be inserted at index + 1 for the item to end up in the
# position you want, since list.insert() inserts before the

View file

@ -126,7 +126,7 @@ def _fix_ctx(m2_ctx, issuer=None):
def _new_extension(name, value, critical=0, issuer=None, _pyfree=1):
'''
Create new X509_Extension, This is required because M2Crypto
Create new X509_Extension, this is required because M2Crypto
doesn't support getting the publickeyidentifier from the issuer
to create the authoritykeyidentifier extension.
'''
@ -595,7 +595,7 @@ def read_certificate(certificate):
def read_certificates(glob_path):
'''
Returns a dict containing details of a all certificates matching a glob
Returns a dict containing details of all certificates matching a glob
glob_path:
A path to certificates to be read and returned.
@ -658,8 +658,8 @@ def read_crl(crl):
:depends: - OpenSSL command line tool
csl:
A path or PEM encoded string containing the CSL to read.
crl:
A path or PEM encoded string containing the CRL to read.
CLI Example:
@ -754,17 +754,17 @@ def write_pem(text, path, overwrite=True, pem_type=None):
PEM string input to be written out.
path:
Path of the file to write the pem out to.
Path of the file to write the PEM out to.
overwrite:
If True(default), write_pem will overwrite the entire pem file.
If ``True`` (default), write_pem will overwrite the entire PEM file.
Set False to preserve existing private keys and dh params that may
exist in the pem file.
exist in the PEM file.
pem_type:
The PEM type to be saved, for example ``CERTIFICATE`` or
``PUBLIC KEY``. Adding this will allow the function to take
input that may contain multiple pem types.
input that may contain multiple PEM types.
CLI Example:
@ -818,10 +818,10 @@ def create_private_key(path=None,
Length of the private key in bits. Default 2048
passphrase:
Passphrase for encryting the private key
Passphrase for encrypting the private key
cipher:
Cipher for encrypting the private key. Has no effect if passhprase is None.
Cipher for encrypting the private key. Has no effect if passphrase is None.
verbose:
Provide visual feedback on stdout. Default True
@ -878,7 +878,7 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
:depends: - PyOpenSSL Python module
path:
Path to write the crl to.
Path to write the CRL to.
text:
If ``True``, return the PEM text without writing to a file.
@ -886,14 +886,14 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
signing_private_key:
A path or string of the private key in PEM format that will be used
to sign this crl. This is required.
to sign the CRL. This is required.
signing_private_key_passphrase:
Passphrase to decrypt the private key.
signing_cert:
A certificate matching the private key that will be used to sign
this crl. This is required.
the CRL. This is required.
revoked:
A list of dicts containing all the certificates to revoke. Each dict
@ -1127,9 +1127,9 @@ def create_certificate(
Default ``False``.
overwrite:
If True(default), create_certificate will overwrite the entire pem
If ``True`` (default), create_certificate will overwrite the entire PEM
file. Set False to preserve existing private keys and dh params that
may exist in the pem file.
may exist in the PEM file.
kwargs:
Any of the properties below can be included as additional
@ -1139,7 +1139,7 @@ def create_certificate(
Request a remotely signed certificate from ca_server. For this to
work, a ``signing_policy`` must be specified, and that same policy
must be configured on the ca_server. See ``signing_policy`` for
details. Also the salt master must permit peers to call the
details. Also, the salt master must permit peers to call the
``sign_remote_certificate`` function.
Example:
@ -1200,7 +1200,7 @@ def create_certificate(
public_key:
The public key to be included in this certificate. This can be sourced
from a public key, certificate, csr or private key. If a private key
from a public key, certificate, CSR or private key. If a private key
is used, the matching public key from the private key will be
generated before any processing is done. This means you can request a
certificate from a remote CA using a private key file as your
@ -1264,7 +1264,7 @@ def create_certificate(
X509v3 Subject Alternative Name
crlDistributionPoints:
X509v3 CRL distribution points
X509v3 CRL Distribution Points
issuingDistributionPoint:
X509v3 Issuing Distribution Point
@ -1324,7 +1324,7 @@ def create_certificate(
signing_policy:
A signing policy that should be used to create this certificate.
Signing policies should be defined in the minion configuration, or in
a minion pillar. It should be a yaml formatted list of arguments
a minion pillar. It should be a YAML formatted list of arguments
which will override any arguments passed to this function. If the
``minions`` key is included in the signing policy, only minions
matching that pattern (see match.glob and match.compound) will be
@ -1385,11 +1385,11 @@ def create_certificate(
passphrase=kwargs['public_key_passphrase'])).replace('\n', '')
# Remove system entries in kwargs
# Including listen_in and preqreuired because they are not included
# Including listen_in and prerequired because they are not included
# in STATE_INTERNAL_KEYWORDS
# for salt 2014.7.2
for ignore in list(_STATE_INTERNAL_KEYWORDS) + \
['listen_in', 'preqrequired', '__prerequired__']:
['listen_in', 'prerequired', '__prerequired__']:
kwargs.pop(ignore, None)
# TODO: Make timeout configurable in Neon
certs = __salt__['publish.publish'](
@ -1712,7 +1712,7 @@ def verify_private_key(private_key, public_key, passphrase=None):
public_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or another private key.
certificate, CSR, or another private key.
passphrase:
Passphrase to decrypt the private key.
@ -1739,7 +1739,7 @@ def verify_signature(certificate, signing_pub_key=None,
signing_pub_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or private key.
certificate, CSR, or private key.
signing_pub_key_passphrase:
Passphrase to the signing_pub_key if it is an encrypted private key.

103
salt/modules/xml.py Normal file
View file

@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-
'''
XML file manager
.. versionadded:: Neon
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import xml.etree.ElementTree as ET
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'xml'
def __virtual__():
'''
Only load the module if all modules are imported correctly.
'''
return __virtualname__
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_value(file, element, value):
'''
Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value"
'''
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True
def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
def set_attribute(file, element, key, value):
'''
Set the requested attribute key and value for matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.set_attribute /tmp/test.xml ".//element[@id='3']" editedby "gal"
'''
try:
root = ET.parse(file)
element = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
element.set(key, str(value))
root.write(file)
return True

View file

@ -3180,12 +3180,18 @@ def _get_patches(installed_only=False):
for line in salt.utils.itertools.split(ret, os.linesep):
inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
line).groups()
if inst != 'i' and installed_only:
continue
patches[advisory_id] = {
'installed': True if inst == 'i' else False,
'summary': pkg
}
if advisory_id not in patches:
patches[advisory_id] = {
'installed': True if inst == 'i' else False,
'summary': [pkg]
}
else:
patches[advisory_id]['summary'].append(pkg)
if inst != 'i':
patches[advisory_id]['installed'] = False
if installed_only:
patches = {k: v for k, v in patches.items() if v['installed']}
return patches

View file

@ -273,7 +273,13 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
'''
# helper s3 query function
def __get_s3_meta():
def __get_s3_meta(continuation_token=None):
# We want to use ListObjectsV2 so we get the NextContinuationToken
params = {'prefix': prefix, 'list-type': 2}
if continuation_token:
params['continuation-token'] = continuation_token
return __utils__['s3.query'](
key=creds.key,
keyid=creds.keyid,
@ -283,7 +289,7 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
verify_ssl=creds.verify_ssl,
location=creds.location,
return_bin=False,
params={'prefix': prefix},
params=params,
path_style=creds.path_style,
https_enable=creds.https_enable)
@ -296,6 +302,12 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
environments = [(os.path.dirname(k['Key']).split('/', 1))[0] for k in files]
return set(environments)
def __get_continuation_token(s3_meta):
return next((item.get('NextContinuationToken')
for item in s3_meta
if item.get('NextContinuationToken')),
None)
log.debug('Refreshing S3 buckets pillar cache file')
metadata = {}
@ -312,6 +324,14 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
if s3_meta:
bucket_files[bucket] = __get_pillar_files_from_s3_meta(s3_meta)
# Check if we have a NextContinuationToken and loop until we don't
while True:
continuation_token = __get_continuation_token(s3_meta)
if not continuation_token:
break
s3_meta = __get_s3_meta(continuation_token)
bucket_files[bucket] += __get_pillar_files_from_s3_meta(s3_meta)
metadata[environment] = bucket_files
else:
@ -322,6 +342,15 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
# s3 query returned data
if s3_meta:
files = __get_pillar_files_from_s3_meta(s3_meta)
# Check if we have a NextContinuationToken and loop until we don't
while True:
continuation_token = __get_continuation_token(s3_meta)
if not continuation_token:
break
s3_meta = __get_s3_meta(continuation_token)
files += __get_pillar_files_from_s3_meta(s3_meta)
environments = __get_pillar_environments(files)
# pull out the files for the environment
@ -343,7 +372,7 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
log.debug('Writing S3 buckets pillar cache file')
with salt.utils.files.fopen(cache_file, 'w') as fp_:
with salt.utils.files.fopen(cache_file, 'wb') as fp_:
pickle.dump(metadata, fp_)
return metadata

View file

@ -8,7 +8,7 @@ from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.minions
import salt.daemons.masterapi
log = logging.getLevelName(__name__)
@ -24,7 +24,14 @@ def get(tgt, fun, tgt_type='glob'):
salt-run mine.get '*' network.interfaces
'''
ret = salt.utils.minions.mine_get(tgt, fun, tgt_type, __opts__)
masterapi = salt.daemons.masterapi.RemoteFuncs(__opts__)
load = {
'id': __opts__['id'],
'fun': fun,
'tgt': tgt,
'tgt_type': tgt_type,
}
ret = masterapi._mine_get(load)
return ret

View file

@ -1609,10 +1609,10 @@ def symlink(
Create a symbolic link (symlink, soft link)
If the file already exists and is a symlink pointing to any location other
than the specified target, the symlink will be replaced. If the symlink is
a regular file or directory then the state will return False. If the
regular file or directory is desired to be replaced with a symlink pass
force: True, if it is to be renamed, pass a backupname.
than the specified target, the symlink will be replaced. If an entry with
the same name exists then the state will return False. If the existing
entry is desired to be replaced with a symlink pass force: True, if it is
to be renamed, pass a backupname.
name
The location of the symlink to create
@ -1623,10 +1623,14 @@ def symlink(
force
If the name of the symlink exists and is not a symlink and
force is set to False, the state will fail. If force is set to
True, the file or directory in the way of the symlink file
True, the existing entry in the way of the symlink file
will be deleted to make room for the symlink, unless
backupname is set, when it will be renamed
.. versionchanged:: Neon
Force will now remove all types of existing file system entries,
not just files, directories and symlinks.
backupname
If the name of the symlink exists and is not a symlink, it will be
renamed to the backupname. If the backupname already
@ -1845,8 +1849,8 @@ def symlink(
'{1}:{2}'.format(name, user, group))
return ret
elif os.path.isfile(name) or os.path.isdir(name):
# It is not a link, but a file or dir
elif os.path.exists(name):
# It is not a link, but a file, dir, socket, FIFO etc.
if backupname is not None:
if not os.path.isabs(backupname):
if backupname == os.path.basename(backupname):
@ -1883,14 +1887,12 @@ def symlink(
__salt__['file.remove'](name)
else:
# Otherwise throw an error
if os.path.isfile(name):
return _error(ret,
('File exists where the symlink {0} should be'
.format(name)))
else:
return _error(ret, ((
'Directory exists where the symlink {0} should be'
).format(name)))
fs_entry_type = 'File' if os.path.isfile(name) else \
'Directory' if os.path.isdir(name) else \
'File system entry'
return _error(ret,
('{0} exists where the symlink {1} should be'
.format(fs_entry_type, name)))
if not os.path.exists(name):
# The link is not present, make it
@ -5186,15 +5188,9 @@ def comment(name, regex, char='#', backup='.bak'):
comment_regex = char + unanchor_regex
# Check if the line is already commented
if __salt__['file.search'](name, comment_regex, multiline=True):
commented = True
else:
commented = False
# Make sure the pattern appears in the file before continuing
if commented or not __salt__['file.search'](name, regex, multiline=True):
if __salt__['file.search'](name, unanchor_regex, multiline=True):
if not __salt__['file.search'](name, regex, multiline=True):
if __salt__['file.search'](name, comment_regex, multiline=True):
ret['comment'] = 'Pattern already commented'
ret['result'] = True
return ret
@ -5292,18 +5288,18 @@ def uncomment(name, regex, char='#', backup='.bak'):
# Make sure the pattern appears in the file
if __salt__['file.search'](
name,
'{0}[ \t]*{1}'.format(char, regex.lstrip('^')),
multiline=True):
# Line exists and is commented
pass
elif __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True):
ret['comment'] = 'Pattern already uncommented'
ret['result'] = True
return ret
elif __salt__['file.search'](
name,
'{0}[ \t]*{1}'.format(char, regex.lstrip('^')),
multiline=True):
# Line exists and is commented
pass
else:
return _error(ret, '{0}: Pattern not found'.format(regex))

View file

@ -138,7 +138,7 @@ def datasource_exists(name, jboss_config, datasource_properties, recreate=False,
read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
ds_new_properties = read_result['result']
else:
if ds_result['err_code'] == 'JBAS014807': # ok, resource not exists:
if ds_result['err_code'] in ('JBAS014807', 'WFLYCTL0216'): # ok, resource not exists:
create_result = __salt__['jboss7.create_datasource'](jboss_config=jboss_config, name=name, datasource_properties=datasource_properties, profile=profile)
if create_result['success']:
read_result = __salt__['jboss7.read_datasource'](jboss_config=jboss_config, name=name, profile=profile)
@ -246,7 +246,7 @@ def bindings_exist(name, jboss_config, bindings, profile=None):
else:
raise CommandExecutionError(update_result['failure-description'])
else:
if query_result['err_code'] == 'JBAS014807': # ok, resource not exists:
if query_result['err_code'] in ('JBAS014807', 'WFLYCTL0216'): # ok, resource not exists:
create_result = __salt__['jboss7.create_simple_binding'](binding_name=key, value=value, jboss_config=jboss_config, profile=profile)
if create_result['success']:
has_changed = True

View file

@ -309,7 +309,6 @@ import salt.utils.functools
import salt.utils.jid
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.six.moves import zip
from salt.exceptions import SaltInvocationError
from salt.utils.decorators import with_deprecated
@ -436,63 +435,24 @@ def run(**kwargs):
return ret
def _call_function(name, returner=None, **kwargs):
def _call_function(name, returner=None, func_args=None):
'''
Calls a function from the specified module.
:param name:
:param kwargs:
:return:
:param str name: module.function of the function to call
:param dict returner: Returner specification to use.
:param list func_args: List with args and dicts of kwargs (one dict per kwarg)
to pass to the function.
:return: Result of the function call
'''
argspec = salt.utils.args.get_function_argspec(__salt__[name])
# func_kw is initialized to a dictionary of keyword arguments the function to be run accepts
func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code
argspec.defaults or []))
# func_args is initialized to a list of positional arguments that the function to be run accepts
func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])]
arg_type, kw_to_arg_type, na_type, kw_type = [], {}, {}, False
for funcset in reversed(kwargs.get('func_args') or []):
if not isinstance(funcset, dict):
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
arg_type.append(funcset)
else:
for kwarg_key in six.iterkeys(funcset):
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in func_args:
kw_to_arg_type[kwarg_key] = funcset[kwarg_key]
continue
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
func_kw.update(funcset)
arg_type.reverse()
for arg in func_args:
if arg in kw_to_arg_type:
arg_type.append(kw_to_arg_type[arg])
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(arg_type)
missing = []
if na_type and _exp_prm > _passed_prm:
for arg in argspec.args:
if arg not in func_kw:
missing.append(arg)
if missing:
raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing)))
elif _exp_prm > _passed_prm:
raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format(
_exp_prm, _passed_prm))
mret = __salt__[name](*arg_type, **func_kw)
if func_args is None:
func_args = []
mret = salt.utils.functools.call_function(__salt__[name], *func_args)
if returner is not None:
returners = salt.loader.returners(__opts__, __salt__)
if returner in returners:
returners[returner]({'id': __opts__['id'], 'ret': mret,
'fun': name, 'jid': salt.utils.jid.gen_jid(__opts__)})
return mret

View file

@ -401,9 +401,9 @@ def running(name,
type: network
source: admin
- graphics:
- type: spice
type: spice
listen:
- type: address
type: address
address: 192.168.0.125
'''

View file

@ -4,11 +4,12 @@ Manage the Windows System PATH
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.stringutils
# Import Python libs
import os
# Import 3rd-party libs
# Import Salt libs
from salt.ext import six
import salt.utils.stringutils
def __virtual__():
@ -89,7 +90,7 @@ def exists(name, index=None):
- index: -1
'''
try:
name = salt.utils.stringutils.to_unicode(name)
name = os.path.normpath(salt.utils.stringutils.to_unicode(name))
except TypeError:
name = six.text_type(name)
@ -221,7 +222,7 @@ def exists(name, index=None):
'{0} {1} to the PATH{2}.'.format(
'Added' if ret['result'] else 'Failed to add',
name,
' at index {0}'.format(index) if index else ''
' at index {0}'.format(index) if index is not None else ''
)
)

View file

@ -6,7 +6,7 @@ Manage X509 Certificates
:depends: M2Crypto
This module can enable managing a complete PKI infrastructure including creating private keys, CA's,
This module can enable managing a complete PKI infrastructure including creating private keys, CAs,
certificates and CRLs. It includes the ability to generate a private key on a server, and have the
corresponding public key sent to a remote CA to create a CA signed certificate. This can be done in
a secure manner, where private keys are always generated locally and never moved across the network.
@ -94,7 +94,7 @@ the mine where it can be easily retrieved by other minions.
The signing policy defines properties that override any property requested or included in a CRL. It also
can define a restricted list of minons which are allowed to remotely invoke this signing policy.
can define a restricted list of minions which are allowed to remotely invoke this signing policy.
/srv/salt/signing_policies.conf
@ -117,7 +117,7 @@ can define a restricted list of minons which are allowed to remotely invoke this
This state will instruct all minions to trust certificates signed by our new CA.
Using jinja to strip newlines from the text avoids dealing with newlines in the rendered yaml,
Using Jinja to strip newlines from the text avoids dealing with newlines in the rendered YAML,
and the :mod:`sign_remote_certificate <salt.states.x509.sign_remote_certificate>` state will
handle properly formatting the text before writing the output.
@ -266,8 +266,8 @@ def private_key_managed(name,
Cipher for encrypting the private key.
new:
Always create a new key. Defaults to False.
Combining new with :mod:`prereq <salt.states.requsities.preqreq>`, or when used as part of a `managed_private_key` can allow key rotation whenever a new certificiate is generated.
Always create a new key. Defaults to ``False``.
Combining new with :mod:`prereq <salt.states.requsities.preqreq>`, or when used as part of a `managed_private_key` can allow key rotation whenever a new certificate is generated.
overwrite:
Overwrite an existing private key if the provided passphrase cannot decrypt it.
@ -283,7 +283,7 @@ def private_key_managed(name,
Example:
The jinja templating in this example ensures a private key is generated if the file doesn't exist
The JINJA templating in this example ensures a private key is generated if the file doesn't exist
and that a new private key is generated whenever the certificate that uses it is to be renewed.
.. code-block:: jinja
@ -382,7 +382,7 @@ def certificate_managed(name,
Manages the private key corresponding to the certificate. All of the
arguments supported by :py:func:`x509.private_key_managed
<salt.states.x509.private_key_managed>` are supported. If `name` is not
speicified or is the same as the name of the certificate, the private
specified or is the same as the name of the certificate, the private
key and certificate will be written together in the same file.
append_certs:
@ -595,14 +595,14 @@ def crl_managed(name,
Path to the certificate
signing_private_key
The private key that will be used to sign this crl. This is
The private key that will be used to sign the CRL. This is
usually your CA's private key.
signing_private_key_passphrase
Passphrase to decrypt the private key.
signing_cert
The certificate of the authority that will be used to sign this crl.
The certificate of the authority that will be used to sign the CRL.
This is usually your CA's certificate.
revoked
@ -618,8 +618,8 @@ def crl_managed(name,
of pyOpenSSL less than 0.14.
days_remaining : 30
The crl should be automatically recreated if there are less than
``days_remaining`` days until the crl expires. Set to 0 to disable
The CRL should be automatically recreated if there are less than
``days_remaining`` days until the CRL expires. Set to 0 to disable
automatic renewal.
include_expired : False

76
salt/states/xml.py Normal file
View file

@ -0,0 +1,76 @@
# -*- coding: utf-8 -*-
'''
XML Manager
===========
State managment of XML files
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the XML execution module is available.
'''
if 'xml.get_value' in __salt__:
return 'xml'
else:
return False, "The xml execution module is not available"
def value_present(name, xpath, value, **kwargs):
'''
.. versionadded:: Neon
Manages a given XML file
name : string
The location of the XML file to manage, as an absolute path.
xpath : string
xpath location to manage
value : string
value to ensure present
.. code-block:: yaml
ensure_value_true:
xml.value_present:
- name: /tmp/test.xml
- xpath: .//playwright[@id='1']
- value: William Shakespeare
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
current_value = __salt__['xml.get_value'](name, xpath)
if not current_value:
ret['result'] = False
ret['comment'] = 'xpath query {0} not found in {1}'.format(xpath, name)
return ret
if current_value != value:
if kwargs['test']:
ret['result'] = None
ret['comment'] = '{0} will be updated'.format(name)
ret['changes'] = {name: {'old': current_value, 'new': value}}
else:
results = __salt__['xml.set_value'](name, xpath, value)
ret['result'] = results
ret['comment'] = '{0} updated'.format(name)
ret['changes'] = {name: {'old': current_value, 'new': value}}
else:
ret['comment'] = '{0} is already present'.format(value)
return ret

View file

@ -1,8 +1,5 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
salt.syspaths
~~~~~~~~~~~~~
@ -21,9 +18,17 @@
from __future__ import absolute_import, print_function, unicode_literals
import sys
import os.path
import logging
__PLATFORM = sys.platform.lower()
typo_warning = True
log = logging.getLogger(__name__)
EXPECTED_VARIABLES = ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR',
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'HOME_DIR',
'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR',
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR',
'SPM_PARENT_PATH', 'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH',
'SPM_REACTOR_PATH', 'SHARE_DIR')
try:
# Let's try loading the system paths from the generated module at
@ -32,14 +37,19 @@ try:
except ImportError:
import types
__generated_syspaths = types.ModuleType(str('salt._syspaths')) # future lint: blacklisted-function
for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR',
'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'HOME_DIR',
'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR',
'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR',
'SPM_PARENT_PATH', 'SPM_FORMULA_PATH',
'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH', 'SHARE_DIR'):
for key in EXPECTED_VARIABLES:
setattr(__generated_syspaths, key, None)
else:
for key in EXPECTED_VARIABLES:
if hasattr(__generated_syspaths, key):
continue
else:
if typo_warning:
log.warning('Possible Typo?')
log.warning('To dissolve this warning add `[variable] = None` to _syspaths.py')
typo_warning = False
log.warning('Variable %s is missing, value set to None', key)
setattr(__generated_syspaths, key, None) # missing variables defaulted to None
# Let's find out the path of this module
if 'SETUP_DIRNAME' in globals():

View file

@ -434,16 +434,13 @@ class IPCMessageServer(IPCServer):
# Import Salt libs
import salt.transport.ipc
import salt.config
opts = salt.config.master_opts()
io_loop = tornado.ioloop.IOLoop.current()
ipc_server_socket_path = '/var/run/ipc_server.ipc'
ipc_server = salt.transport.ipc.IPCMessageServer(opts, io_loop=io_loop
stream_handler=print_to_console)
ipc_server = salt.transport.ipc.IPCMessageServer(ipc_server_socket_path, io_loop=io_loop,
payload_handler=print_to_console)
# Bind to the socket and prepare to run
ipc_server.start(ipc_server_socket_path)
ipc_server.start()
# Start the server
io_loop.start()

View file

@ -7,10 +7,18 @@ from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import types
import logging
# Import salt libs
from salt.exceptions import SaltInvocationError
import salt.utils.args
from salt.ext.six.moves import zip
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def namespaced_function(function, global_dict, defaults=None, preserve_context=False):
'''
@ -59,3 +67,76 @@ def alias_function(fun, name, doc=None):
alias_fun.__doc__ = alias_msg + (fun.__doc__ or '')
return alias_fun
def parse_function(function_arguments):
'''
Helper function to parse function_arguments (module.run format)
into args and kwargs.
This function is similar to salt.utils.data.repack_dictlist, except that this
handles mixed (i.e. dict and non-dict) arguments in the input list.
:param list function_arguments: List of items and dicts with kwargs.
:rtype: dict
:return: Dictionary with ``args`` and ``kwargs`` keyword.
'''
function_args = []
function_kwargs = {}
for item in function_arguments:
if isinstance(item, dict):
function_kwargs.update(item)
else:
function_args.append(item)
return {'args': function_args, 'kwargs': function_kwargs}
def call_function(salt_function, *args, **kwargs):
'''
Calls a function from the specified module.
:param function salt_function: Function reference to call
:return: The result of the function call
'''
argspec = salt.utils.args.get_function_argspec(salt_function)
# function_kwargs is initialized to a dictionary of keyword arguments the function to be run accepts
function_kwargs = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code
argspec.defaults or []))
# expected_args is initialized to a list of positional arguments that the function to be run accepts
expected_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])]
function_args, kw_to_arg_type = [], {}
for funcset in reversed(args or []):
if not isinstance(funcset, dict):
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
function_args.append(funcset)
else:
for kwarg_key in six.iterkeys(funcset):
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in expected_args:
kw_to_arg_type[kwarg_key] = funcset[kwarg_key]
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
function_kwargs.update(funcset)
function_args.reverse()
# Add kwargs passed as kwargs :)
function_kwargs.update(kwargs)
for arg in expected_args:
if arg in kw_to_arg_type:
function_args.append(kw_to_arg_type[arg])
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(function_args)
missing = []
if _exp_prm > _passed_prm:
for arg in argspec.args[_passed_prm:]:
if arg not in function_kwargs:
missing.append(arg)
if missing:
raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing)))
elif _exp_prm > _passed_prm:
raise SaltInvocationError('Function expects {0} positional parameters, '
'got only {1}'.format(_exp_prm, _passed_prm))
return salt_function(*function_args, **function_kwargs)

117
salt/utils/mine.py Normal file
View file

@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
'''
This module contains routines used for the salt mine
'''
# Import python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.data
# Import 3rd-party libs
log = logging.getLogger(__name__)
MINE_ITEM_ACL_ID = '__saltmine_acl__'
MINE_ITEM_ACL_VERSION = 1
MINE_ITEM_ACL_DATA = '__data__'
def minion_side_acl_denied(
minion_acl_cache,
mine_minion,
mine_function,
req_minion):
'''
Helper function to determine if a ``req_minion`` is not allowed to retrieve
``mine_function``-data from the mine of ``mine_minion``.
:param dict minion_acl_cache: Contains minion_id as first level key, and mine
function as 2nd level key. Value of 2nd level is a list of minions that
are allowed to retrieve the function from the mine of the minion.
:param str mine_minion: The minion that the mine value originated from.
:param str mine_function: The mine function that is requested.
:param str req_minion: The minion that is requesting the mine data.
:rtype: bool
:return:
False if no ACL has been defined for ``mine_minion``, ``mine_function``.
False if an ACL has been defined and it grants access.
True if an ACL has been defined and does not grant access.
'''
minion_acl_entry = minion_acl_cache.get(mine_minion, {}).get(mine_function, [])
ret = minion_acl_entry and req_minion not in minion_acl_entry
if ret:
log.debug('Salt mine request from %s for function %s on minion %s denied.',
req_minion,
mine_function,
mine_minion)
return ret
def wrap_acl_structure(
function_data,
allow_tgt=None,
allow_tgt_type=None):
'''
Helper function to convert an non-ACL mine entry into the new entry which
includes ACL data.
:param dict function_data: The function data to wrap.
:param str allow_tgt: The targeting string that designates which minions can
request this mine entry.
:param str allow_tgt_type: The type of targeting string.
.. seealso:: :ref:`targeting`
:rtype: dict
:return: Mine entry structured to include ACL data.
'''
res = {
MINE_ITEM_ACL_DATA: function_data,
MINE_ITEM_ACL_ID: MINE_ITEM_ACL_VERSION,
}
# Add minion-side ACL
if allow_tgt:
res.update(salt.utils.data.filter_falsey({
'allow_tgt': allow_tgt,
'allow_tgt_type': allow_tgt_type,
}))
return res
def parse_function_definition(function_definition):
'''
Helper function to parse the mine_function definition as provided in config,
or pillar.
:param function_definition: The function definition to parse.
:type function_definition: list or dict
:rtype: tuple
:return: Tuple with function_name, function_args, function_kwargs, minion_acl (dict)
'''
function_name = None
function_args = []
function_kwargs = {}
minion_acl = {}
if isinstance(function_definition, dict):
# dictionary format for specifying mine function
function_name = function_definition.pop('mine_function', None)
function_kwargs = function_definition
elif isinstance(function_definition, list):
for item in function_definition:
if isinstance(item, dict):
# if len(item) > 1: # Multiple kwargs in a single list item
function_kwargs.update(item)
else:
function_args.append(item)
function_name = function_kwargs.pop('mine_function', None)
minion_acl = salt.utils.data.filter_falsey({
'allow_tgt': function_kwargs.pop('allow_tgt', None),
'allow_tgt_type': function_kwargs.pop('allow_tgt_type', None),
})
return (function_name, function_args, function_kwargs, minion_acl)

View file

@ -1112,26 +1112,3 @@ class CkMinions(object):
if good:
return True
return False
def mine_get(tgt, fun, tgt_type='glob', opts=None):
'''
Gathers the data from the specified minions' mine, pass in the target,
function to look up and the target type
'''
ret = {}
serial = salt.payload.Serial(opts)
checker = CkMinions(opts)
_res = checker.check_minions(
tgt,
tgt_type)
minions = _res['minions']
cache = salt.cache.factory(opts)
for minion in minions:
mdata = cache.fetch('minions/{0}'.format(minion), 'mine')
if mdata is None:
continue
fdata = mdata.get(fun)
if fdata:
ret[minion] = fdata
return ret

View file

@ -46,12 +46,20 @@ def _get_config(**kwargs):
'''
Return configuration
'''
sk_file = kwargs.get('sk_file')
if not sk_file:
sk_file = os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl')
pk_file = kwargs.get('pk_file')
if not pk_file:
pk_file = os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl.pub')
config = {
'box_type': 'sealedbox',
'box_type': kwargs.get('box_type', 'sealedbox'),
'sk': None,
'sk_file': os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl'),
'sk_file': sk_file,
'pk': None,
'pk_file': os.path.join(kwargs['opts'].get('pki_dir'), 'master/nacl.pub'),
'pk_file': pk_file,
}
config_key = '{0}.config'.format(__virtualname__)
@ -122,13 +130,16 @@ def keygen(sk_file=None, pk_file=None, **kwargs):
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl
salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.keygen
sk_file
Path to where there secret key exists.
The argrument ``keyfile`` was deprecated
in favor of ``sk_file``. ``keyfile`` will
continue to work to ensure backwards
compatbility, but please use the preferred
``sk_file``.
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
sk_file = kwargs['keyfile']
if sk_file is None:
@ -190,23 +201,33 @@ def enc(data, **kwargs):
Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default)
sk_file
Path to where there secret key exists.
The argrument ``keyfile`` was deprecated
in favor of ``sk_file``. ``keyfile`` will
continue to work to ensure backwards
compatbility, but please use the preferred
``sk_file``.
sk
Secret key contents. The argument ``key``
was deprecated in favor of ``sk``. ``key``
will continue to work to ensure backwards
compatibility, but please use the preferred
``sk``.
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_encrypt(data, **kwargs)
@ -251,24 +272,28 @@ def dec(data, **kwargs):
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
sk_file
Path to where there secret key exists.
The argrument ``keyfile`` was deprecated
in favor of ``sk_file``. ``keyfile`` will
continue to work to ensure backwards
compatbility, but please use the preferred
``sk_file``.
sk
Secret key contents. The argument ``key``
was deprecated in favor of ``sk``. ``key``
will continue to work to ensure backwards
compatibility, but please use the preferred
``sk``.
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility

View file

@ -684,8 +684,8 @@ def system_information():
version = item
release = version
_, ver, _sp, extra = platform.win32_ver()
version = ' '.join([release, ver, _sp, extra])
_, ver, service_pack, extra = platform.win32_ver()
version = ' '.join([release, ver, service_pack, extra])
else:
version = system_version()
release = platform.release()

View file

@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: Li Kexian <doyenli@tencent.com>
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import Salt Libs
from salt.config import cloud_providers_config
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.runtests import RUNTIME_VARS
from tests.support.helpers import expensiveTest, generate_random_name
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = generate_random_name('CLOUD-TEST-')
PROVIDER_NAME = 'tencentcloud'
@expensiveTest
class TencentCloudTest(ShellCase):
'''
Integration tests for the Tencent Cloud cloud provider in Salt-Cloud
'''
def setUp(self):
'''
Sets up the test requirements
'''
super(TencentCloudTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'tencentcloud-config'
providers = self.run_cloud('--list-providers')
if profile_str + ':' not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(PROVIDER_NAME)
)
# check if personal access token, ssh_key_file, and ssh_key_names are present
config = cloud_providers_config(
os.path.join(
RUNTIME_VARS.FILES,
'conf',
'cloud.providers.d',
PROVIDER_NAME + '.conf'
)
)
tid = config[profile_str][PROVIDER_NAME]['id']
key = config[profile_str][PROVIDER_NAME]['key']
if tid == '' or key == '':
self.skipTest(
'An api id and key must be provided to run these tests. Check '
'tests/integration/files/conf/cloud.providers.d/{0}.conf'.format(
PROVIDER_NAME
)
)
def test_instance(self):
'''
Test creating an instance on Tencent Cloud
'''
# check if instance with salt installed returned
try:
self.assertIn(
INSTANCE_NAME,
[i.strip() for i in self.run_cloud(
'-p tencentcloud-test {0}'.format(INSTANCE_NAME), timeout=500)]
)
except AssertionError:
self.run_cloud(
'-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)
raise
# delete the instance
self.assertIn(
INSTANCE_NAME + ':',
[i.strip() for i in self.run_cloud(
'-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)]
)
def tearDown(self):
'''
Clean up after tests
'''
query = self.run_cloud('--query')
ret_str = ' {0}:'.format(INSTANCE_NAME)
# if test instance is still present, delete it
if ret_str in query:
self.run_cloud(
'-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500)

View file

@ -0,0 +1,8 @@
tencentcloud-test:
provider: tencentcloud-config
availability_zone: ap-beijing-1
image: img-31tjrtph
size: S1.SMALL1
allocate_public_ip: False
internet_max_bandwidth_out: 1
password: ''

View file

@ -0,0 +1,5 @@
tencentcloud-config:
driver: tencentcloud
id: ''
key: ''
location: 'ap-beijing'

View file

@ -0,0 +1,3 @@
saltcheck-test-pass:
test.succeed_without_changes:
- name: testing-saltcheck

View file

@ -0,0 +1,7 @@
check_all_validate:
module_and_function: test.echo
args:
- "check"
kwargs:
assertion: assertEqual
expected_return: 'check'

View file

@ -0,0 +1,7 @@
echo_test_hello:
module_and_function: test.echo
args:
- "hello"
kwargs:
assertion: assertEqual
expected_return: 'hello'

View file

@ -0,0 +1,3 @@
saltcheck-prod-test-pass:
test.succeed_without_changes:
- name: testing-saltcheck-prodenv

View file

@ -0,0 +1,7 @@
check_all_validate_prod:
module_and_function: test.echo
args:
- "check-prod"
kwargs:
assertion: assertEqual
expected_return: 'check-prod'

View file

@ -0,0 +1,7 @@
echo_test_prod_env:
module_and_function: test.echo
args:
- "test-prod"
kwargs:
assertion: assertEqual
expected_return: 'test-prod'

View file

@ -12,12 +12,12 @@ from tests.support.helpers import (
destructiveTest,
requires_network,
requires_salt_modules,
requires_salt_states,
requires_system_grains,
skip_if_not_root)
from tests.support.unit import skipIf
# Import Salt libs
from salt.ext import six
import salt.utils.path
import salt.utils.pkg
import salt.utils.platform
@ -181,37 +181,47 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
test_remove()
@destructiveTest
@requires_salt_modules('pkg.hold', 'pkg.unhold', 'pkg.install', 'pkg.version', 'pkg.remove')
@requires_salt_modules('pkg.hold', 'pkg.unhold', 'pkg.install', 'pkg.version', 'pkg.remove', 'pkg.list_pkgs')
@requires_salt_states('pkg.installed')
@requires_network()
@requires_system_grains
def test_hold_unhold(self, grains):
'''
test holding and unholding a package
'''
ret = None
versionlock_pkg = None
if grains['os_family'] == 'RedHat':
# get correct plugin for dnf packages following the logic in `salt.modules.yumpkg._yum`
lock_pkg = 'yum-versionlock' if grains['osmajorrelease'] == '5' else 'yum-plugin-versionlock'
if 'fedora' in grains['os'].lower() and int(grains['osrelease']) >= 22:
if int(grains['osmajorrelease']) >= 26:
lock_pkg = 'python{py}-dnf-plugin-versionlock'.format(py=3 if six.PY3 else 2)
else:
lock_pkg = 'python{py}-dnf-plugins-extras-versionlock'.format(py=3 if six.PY3 else '')
ret = self.run_state('pkg.installed', name=lock_pkg)
pkgs = {p for p in self.run_function('pkg.list_pkgs') if '-versionlock' in p}
if not pkgs:
self.skipTest('No versionlock package found in repositories')
for versionlock_pkg in pkgs:
ret = self.run_state('pkg.installed', name=versionlock_pkg, refresh=False)
# Exit loop if a versionlock package installed correctly
try:
self.assertSaltTrueReturn(ret)
break
except AssertionError:
pass
else:
self.fail('Could not install versionlock package from {}'.format(pkgs))
self.run_function('pkg.install', [self.pkg])
hold_ret = self.run_function('pkg.hold', [self.pkg])
if 'versionlock is not installed' in hold_ret:
self.run_function('pkg.remove', [self.pkg])
self.skipTest('Versionlock could not be installed on this system: {}'.format(ret))
self.assertIn(self.pkg, hold_ret)
self.assertTrue(hold_ret[self.pkg]['result'])
try:
hold_ret = self.run_function('pkg.hold', [self.pkg])
if versionlock_pkg and '-versionlock is not installed' in str(hold_ret):
self.skipTest('{} `{}` is installed'.format(hold_ret, versionlock_pkg))
self.assertIn(self.pkg, hold_ret)
self.assertTrue(hold_ret[self.pkg]['result'])
unhold_ret = self.run_function('pkg.unhold', [self.pkg])
self.assertIn(self.pkg, unhold_ret)
self.assertTrue(unhold_ret[self.pkg]['result'])
self.run_function('pkg.remove', [self.pkg])
unhold_ret = self.run_function('pkg.unhold', [self.pkg])
self.assertIn(self.pkg, unhold_ret)
self.assertTrue(unhold_ret[self.pkg]['result'])
self.run_function('pkg.remove', [self.pkg])
finally:
if versionlock_pkg:
ret = self.run_state('pkg.removed', name=versionlock_pkg)
self.assertSaltTrueReturn(ret)
@destructiveTest
@requires_salt_modules('pkg.refresh_db')
@ -345,6 +355,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
@destructiveTest
@skipIf(salt.utils.platform.is_darwin(), 'The jenkins user is equivalent to root on mac, causing the test to be unrunnable')
@requires_salt_modules('pkg.remove', 'pkg.latest_version')
@requires_salt_states('pkg.removed')
@requires_system_grains
def test_pkg_latest_version(self, grains):
'''

View file

@ -0,0 +1,65 @@
# -*- coding: utf-8 -*-
'''
Test the saltcheck module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase
class SaltcheckModuleTest(ModuleCase):
'''
Test the saltcheck module
'''
def test_saltcheck_run(self):
'''
saltcheck.run_test
'''
saltcheck_test = {"module_and_function": "test.echo",
"assertion": "assertEqual",
"expected_return": "This works!",
"args": ["This works!"]}
ret = self.run_function('saltcheck.run_test', test=saltcheck_test)
self.assertDictContainsSubset({'status': 'Pass'}, ret)
def test_saltcheck_state(self):
'''
saltcheck.run_state_tests
'''
saltcheck_test = 'validate-saltcheck'
ret = self.run_function('saltcheck.run_state_tests', [saltcheck_test])
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['echo_test_hello'])
def test_topfile_validation(self):
'''
saltcheck.run_highstate_tests
'''
expected_top_states = self.run_function('state.show_top').get('base', [])
expected_top_states.append('TEST RESULTS')
ret = self.run_function('saltcheck.run_highstate_tests')
for top_state_dict in ret:
self.assertIn(list(top_state_dict)[0], expected_top_states)
def test_saltcheck_checkall(self):
'''
Validate saltcheck.run_state_tests check_all for the default saltenv of base.
validate-saltcheck state hosts a saltcheck-tests directory with 2 .tst files. By running
check_all=True, both files should be found and show passed results.
'''
saltcheck_test = 'validate-saltcheck'
ret = self.run_function('saltcheck.run_state_tests', [saltcheck_test], check_all=True)
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['echo_test_hello'])
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['check_all_validate'])
def test_saltcheck_checkall_saltenv(self):
'''
Validate saltcheck.run_state_tests check_all for the prod saltenv
validate-saltcheck state hosts a saltcheck-tests directory with 2 .tst files. By running
check_all=True, both files should be found and show passed results.
'''
saltcheck_test = 'validate-saltcheck'
ret = self.run_function('saltcheck.run_state_tests', [saltcheck_test], saltenv='prod', check_all=True)
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['echo_test_prod_env'])
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['check_all_validate_prod'])

View file

@ -12,8 +12,8 @@ import textwrap
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest, skip_if_not_root, flaky
from tests.support.unit import skipIf, SkipTest
from tests.support.helpers import destructiveTest, skip_if_not_root, flaky, requires_system_grains
# Import Salt libs
import salt.utils.files
@ -32,23 +32,33 @@ class SystemModuleTest(ModuleCase):
'''
Validate the date/time functions in the system module
'''
fmt_str = "%Y-%m-%d %H:%M:%S"
_hwclock_has_compare_ = None
_systemd_timesyncd_available_ = None
@classmethod
def setUpClass(cls):
@requires_system_grains
def setUpClass(cls, grains): # pylint: disable=arguments-differ
if grains['kernel'] != 'Linux':
raise SkipTest(
'Test not applicable to \'{kernel}\' kernel'.format(
**grains
)
)
cls.fmt_str = "%Y-%m-%d %H:%M:%S"
cls._orig_time = None
cls._machine_info = True
@classmethod
def tearDownClass(cls):
for name in ('fmt_str', '_orig_time', '_machine_info'):
delattr(cls, name)
def setUp(self):
super(SystemModuleTest, self).setUp()
os_grain = self.run_function('grains.item', ['kernel'])
if os_grain['kernel'] not in 'Linux':
self.skipTest(
'Test not applicable to \'{kernel}\' kernel'.format(
**os_grain
)
)
if self.run_function('service.available', ['systemd-timesyncd']):
if self._systemd_timesyncd_available_ is None:
SystemModuleTest._systemd_timesyncd_available_ = self.run_function('service.available', ['systemd-timesyncd'])
if self._systemd_timesyncd_available_:
self.run_function('service.stop', ['systemd-timesyncd'])
def tearDown(self):
@ -58,7 +68,7 @@ class SystemModuleTest(ModuleCase):
if self._machine_info is not True:
self._restore_machine_info()
self._machine_info = True
if self.run_function('service.available', ['systemd-timesyncd']):
if self._systemd_timesyncd_available_:
self.run_function('service.start', ['systemd-timesyncd'])
def _save_time(self):
@ -87,8 +97,12 @@ class SystemModuleTest(ModuleCase):
systems where it's not present so that we can skip the
comparison portion of the test.
'''
res = self.run_function('cmd.run_all', cmd='hwclock -h')
return res['retcode'] == 0 and res['stdout'].find('--compare') > 0
if self._hwclock_has_compare_ is None:
res = self.run_function('cmd.run_all', cmd='hwclock -h')
SystemModuleTest._hwclock_has_compare_ = (
res['retcode'] == 0 and res['stdout'].find('--compare') > 0
)
return self._hwclock_has_compare_
def _test_hwclock_sync(self):
'''

View file

@ -10,7 +10,7 @@ import time
# Import Salt Testing Libs
from tests.support.unit import skipIf
from tests.support.case import ModuleCase, ShellCase
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, flaky
from tests.support.paths import FILES
@ -24,7 +24,7 @@ log = logging.getLogger(__name__)
@destructiveTest
@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed')
@skipIf(not salt.utils.path.which('vault'), 'Vault not installed')
class VaultTestCase(ModuleCase, ShellCase):
class VaultTestCase(ModuleCase):
'''
Test vault module
'''

View file

@ -15,7 +15,7 @@ try:
import libnacl.secret # pylint: disable=unused-import
import libnacl.sealed # pylint: disable=unused-import
HAS_LIBNACL = True
except ImportError:
except (ImportError, OSError, AttributeError):
HAS_LIBNACL = False
log = logging.getLogger(__name__)

View file

@ -7,12 +7,11 @@ Simple Smoke Tests for Connected SSH minions
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import requires_sshd_server
from tests.support.case import SSHCase
from tests.support.helpers import skip_if_not_root, requires_system_grains
@requires_sshd_server
class SSHMasterTestCase(ModuleCase):
class SSHMasterTestCase(SSHCase):
'''
Test ssh master functionality
'''
@ -20,13 +19,15 @@ class SSHMasterTestCase(ModuleCase):
'''
Ensure the proxy can ping
'''
ret = self.run_function('test.ping', minion_tgt='localhost')
ret = self.run_function('test.ping')
self.assertEqual(ret, True)
def test_service(self):
@requires_system_grains
@skip_if_not_root
def test_service(self, grains):
service = 'cron'
os_family = self.run_function('grains.get', ['os_family'], minion_tgt='localhost')
os_release = self.run_function('grains.get', ['osrelease'], minion_tgt='localhost')
os_family = grains['os_family']
os_release = grains['osrelease']
if os_family == 'RedHat':
service = 'crond'
elif os_family == 'Arch':
@ -35,29 +36,30 @@ class SSHMasterTestCase(ModuleCase):
service = 'org.ntp.ntpd'
if int(os_release.split('.')[1]) >= 13:
service = 'com.apple.AirPlayXPCHelper'
ret = self.run_function('service.get_all', minion_tgt='localhost')
ret = self.run_function('service.get_all')
self.assertIn(service, ret)
self.run_function('service.stop', [service], minion_tgt='localhost')
ret = self.run_function('service.status', [service], minion_tgt='localhost')
self.run_function('service.stop', [service])
ret = self.run_function('service.status', [service])
self.assertFalse(ret)
self.run_function('service.start', [service], minion_tgt='localhost')
ret = self.run_function('service.status', [service], minion_tgt='localhost')
self.run_function('service.start', [service])
ret = self.run_function('service.status', [service])
self.assertTrue(ret)
def test_grains_items(self):
os_family = self.run_function('grains.get', ['os_family'], minion_tgt='localhost')
ret = self.run_function('grains.items', minion_tgt='localhost')
@requires_system_grains
def test_grains_items(self, grains):
os_family = grains['os_family']
ret = self.run_function('grains.items')
if os_family == 'MacOS':
self.assertEqual(ret['kernel'], 'Darwin')
else:
self.assertEqual(ret['kernel'], 'Linux')
def test_state_apply(self):
ret = self.run_function('state.apply', ['core'], minion_tgt='localhost')
ret = self.run_function('state.apply', ['core'])
for key, value in ret.items():
self.assertTrue(value['result'])
def test_state_highstate(self):
ret = self.run_function('state.highstate', minion_tgt='localhost')
ret = self.run_function('state.highstate')
for key, value in ret.items():
self.assertTrue(value['result'])

View file

@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.case import SSHCase
from tests.support.unit import skipIf
# Import Salt Libs
import salt.utils.platform
@skipIf(salt.utils.platform.is_windows(), 'salt-ssh not available on Windows')
class SSHSaltcheckTest(SSHCase):
'''
testing saltcheck with salt-ssh
'''
def test_saltcheck_run_test(self):
'''
test saltcheck.run_test with salt-ssh
'''
saltcheck_test = {"module_and_function": "test.echo",
"assertion": "assertEqual",
"expected-return": "Test Works",
"args": ["Test Works"]}
ret = self.run_function('saltcheck.run_test', test=saltcheck_test)
self.assertDictContainsSubset({'status': 'Pass'}, ret)
def test_saltcheck_state(self):
'''
saltcheck.run_state_tests
'''
saltcheck_test = 'validate-saltcheck'
ret = self.run_function('saltcheck.run_state_tests', [saltcheck_test])
self.assertDictContainsSubset({'status': 'Pass'}, ret[0]['validate-saltcheck']['echo_test_hello'])

View file

@ -5,7 +5,6 @@ tests for pkg state
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import time
@ -17,7 +16,10 @@ from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_salt_modules,
)
requires_salt_states,
requires_system_grains,
runs_on,
not_runs_on)
# Import Salt libs
import salt.utils.files
@ -29,73 +31,50 @@ import salt.utils.platform
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
try:
from distro import LinuxDistribution
pre_grains = LinuxDistribution()
except ImportError:
pre_grains = None
log = logging.getLogger(__name__)
_PKG_EPOCH_TARGETS = []
_PKG_TARGETS = ['figlet', 'sl']
_PKG_32_TARGETS = []
_PKG_CAP_TARGETS = []
_PKG_DOT_TARGETS = []
_WILDCARDS_SUPPORTED = False
_VERSION_SPEC_SUPPORTED = True
if salt.utils.platform.is_windows():
_PKG_TARGETS = ['7zip', 'putty']
elif salt.utils.platform.is_freebsd():
_VERSION_SPEC_SUPPORTED = False
elif pre_grains:
if any(arch in pre_grains.like() for arch in ('arch', 'archlinux')):
_WILDCARDS_SUPPORTED = True
elif 'debian' in pre_grains.like():
_WILDCARDS_SUPPORTED = True
elif 'rhel' in pre_grains.like():
_PKG_TARGETS = ['units', 'zsh-html']
_WILDCARDS_SUPPORTED = True
if pre_grains.id() == 'centos':
if pre_grains.major_version() == 5:
_PKG_32_TARGETS = ['xz-devel.i386']
else:
_PKG_32_TARGETS.append('xz-devel.i686')
if pre_grains.major_version() == 5:
_PKG_DOT_TARGETS = ['python-migrate0.5']
elif pre_grains.major_version() == 6:
_PKG_DOT_TARGETS = ['tomcat6-el-2.1-api']
elif pre_grains.major_version() == 7:
_PKG_DOT_TARGETS = ['tomcat-el-2.2-api']
_PKG_EPOCH_TARGETS = ['comps-extras']
elif pre_grains.id() in ('sles', 'opensuse'):
_PKG_TARGETS = ['figlet', 'htop']
_PKG_CAP_TARGETS = [('perl(ZNC)', 'znc-perl')]
def runs_on(platforms=None, os_like=None, reason=''):
def decorator(caller):
@functools.wraps(caller)
def wrapper(cls):
if platforms:
if not any(getattr(salt.utils.platform, 'is_' + platform)() for platform in platforms):
cls.skipTest(reason if reason else 'OS not in [{}]'.format(', '.join(platforms)))
if pre_grains and os_like:
if not any(x in pre_grains.like() for x in os_like):
cls.skipTest(reason if reason else 'OS not similar to [{}]'.format(', '.join(os_like)))
return caller(cls)
return wrapper
return decorator
@destructiveTest
class PkgTest(ModuleCase, SaltReturnAssertsMixin):
_PKG_EPOCH_TARGETS = []
_PKG_32_TARGETS = []
_PKG_CAP_TARGETS = []
_PKG_DOT_TARGETS = []
_WILDCARDS_SUPPORTED = False
_VERSION_SPEC_SUPPORTED = True
@classmethod
def setUpClass(cls):
@requires_system_grains
def setUpClass(cls, grains=None): # pylint:disable=W0221
cls.ctx = {}
cls._PKG_TARGETS = ['figlet', 'sl']
if grains['os'] == 'Windows':
cls._PKG_TARGETS = ['7zip', 'putty']
elif grains['os'] == 'freebsd':
cls._VERSION_SPEC_SUPPORTED = False
elif grains['os_family'] in ('Arch', 'Debian'):
cls._WILDCARDS_SUPPORTED = True
elif grains['os'] == 'Amazon':
cls._PKG_TARGETS = ['lynx', 'gnuplot']
elif grains['os_family'] == 'RedHat':
cls._PKG_TARGETS = ['units', 'zsh-html']
cls._WILDCARDS_SUPPORTED = True
if grains['os'] == 'CentOS':
if grains['osmajorrelease'] == 5:
cls._PKG_32_TARGETS = ['xz-devel.i386']
else:
cls._PKG_32_TARGETS.append('xz-devel.i686')
if grains['osmajorrelease'] == 5:
cls._PKG_DOT_TARGETS = ['python-migrate0.5']
elif grains['osmajorrelease'] == 6:
cls._PKG_DOT_TARGETS = ['tomcat6-el-2.1-api']
elif grains['osmajorrelease'] == 7:
cls._PKG_DOT_TARGETS = ['tomcat-el-2.2-api']
cls._PKG_EPOCH_TARGETS = ['comps-extras']
elif grains['os_family'] == 'Suse':
cls._PKG_TARGETS = ['lynx', 'htop']
if grains['os'] == 'SUSE':
cls._PKG_CAP_TARGETS = [('perl(ZNC)', 'znc-perl')]
@classmethod
def tearDownClass(cls):
@ -126,18 +105,30 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
return ret[names[0]]
return ret
def setUp(self):
@requires_system_grains
def setUp(self, grains=None): # pylint:disable=W0221
super(PkgTest, self).setUp()
if 'refresh' not in self.ctx:
self.run_function('pkg.refresh_db')
self.ctx['refresh'] = True
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed')
# If this is Arch Linux, check if pacman is in use by another process
if grains['os_family'] == 'Arch':
for _ in range(12):
if not os.path.isfile('/var/lib/pacman/db.lck'):
break
else:
time.sleep(5)
else:
raise Exception('Package database locked after 60 seconds, bailing out')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_001_installed(self):
'''
This is a destructive test as it installs and then removes a package
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.run_function('pkg.version', [target])
# If this assert fails, we need to find new targets, this test needs to
@ -151,21 +142,12 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _VERSION_SPEC_SUPPORTED, 'Version specification not supported')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_002_installed_with_version(self):
'''
This is a destructive test as it installs and then removes a package
'''
if pre_grains and 'arch' in pre_grains.like():
for idx in range(13):
if idx == 12:
raise Exception('Package database locked after 60 seconds, '
'bailing out')
if not os.path.isfile('/var/lib/pacman/db.lck'):
break
time.sleep(5)
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.latest_version(target)
# If this assert fails, we need to find new targets, this test needs to
@ -181,52 +163,43 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_003_installed_multipkg(self):
'''
This is a destructive test as it installs and then removes two packages
'''
version = self.run_function('pkg.version', _PKG_TARGETS)
version = self.run_function('pkg.version', self._PKG_TARGETS)
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so these
# packages need to not be installed before we run the states below
self.assertFalse(any(version.values()))
self.assertSaltTrueReturn(self.run_state('pkg.removed', name=None, pkgs=_PKG_TARGETS))
self.assertSaltTrueReturn(self.run_state('pkg.removed', name=None, pkgs=self._PKG_TARGETS))
try:
ret = self.run_state('pkg.installed',
name=None,
pkgs=_PKG_TARGETS,
pkgs=self._PKG_TARGETS,
refresh=False)
self.assertSaltTrueReturn(ret)
finally:
ret = self.run_state('pkg.removed', name=None, pkgs=_PKG_TARGETS)
ret = self.run_state('pkg.removed', name=None, pkgs=self._PKG_TARGETS)
self.assertSaltTrueReturn(ret)
@skipIf(not _VERSION_SPEC_SUPPORTED, 'Version specification not supported')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_004_installed_multipkg_with_version(self):
'''
This is a destructive test as it installs and then removes two packages
'''
if pre_grains and 'arch' in pre_grains.like():
for idx in range(13):
if idx == 12:
raise Exception('Package database locked after 60 seconds, '
'bailing out')
if not os.path.isfile('/var/lib/pacman/db.lck'):
break
time.sleep(5)
version = self.latest_version(_PKG_TARGETS[0])
version = self.latest_version(self._PKG_TARGETS[0])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so these
# packages need to not be installed before we run the states below
self.assertTrue(bool(version))
pkgs = [{_PKG_TARGETS[0]: version}, _PKG_TARGETS[1]]
pkgs = [{self._PKG_TARGETS[0]: version}, self._PKG_TARGETS[1]]
try:
ret = self.run_state('pkg.installed',
@ -235,16 +208,17 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
refresh=False)
self.assertSaltTrueReturn(ret)
finally:
ret = self.run_state('pkg.removed', name=None, pkgs=_PKG_TARGETS)
ret = self.run_state('pkg.removed', name=None, pkgs=self._PKG_TARGETS)
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_32_TARGETS, 'No 32 bit packages have been specified for testing')
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_005_installed_32bit(self):
'''
This is a destructive test as it installs and then removes a package
'''
target = _PKG_32_TARGETS[0]
target = self._PKG_32_TARGETS[0]
# _PKG_TARGETS_32 is only populated for platforms for which Salt has to
# munge package names for 32-bit-on-x86_64 (Currently only Ubuntu and
@ -265,25 +239,16 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_32_TARGETS, 'No 32 bit packages have been specified for testing')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_006_installed_32bit_with_version(self):
'''
This is a destructive test as it installs and then removes a package
'''
target = _PKG_32_TARGETS[0]
target = self._PKG_32_TARGETS[0]
# _PKG_TARGETS_32 is only populated for platforms for which Salt has to
# munge package names for 32-bit-on-x86_64 (Currently only Ubuntu and
# RHEL-based). Don't actually perform this test on other platforms.
if pre_grains and 'arch' in pre_grains.like():
for idx in range(13):
if idx == 12:
raise Exception('Package database locked after 60 seconds, '
'bailing out')
if not os.path.isfile('/var/lib/pacman/db.lck'):
break
time.sleep(5)
version = self.latest_version(target)
# If this assert fails, we need to find a new target. This test
@ -301,7 +266,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_DOT_TARGETS, 'No packages with "." in their name have been configured for')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_007_with_dot_in_pkgname(self=None):
'''
This tests for the regression found in the following issue:
@ -309,7 +274,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
This is a destructive test as it installs a package
'''
target = _PKG_DOT_TARGETS[0]
target = self._PKG_DOT_TARGETS[0]
version = self.latest_version(target)
# If this assert fails, we need to find a new target. This test
@ -323,7 +288,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_EPOCH_TARGETS, 'No targets have been configured with "epoch" in the version')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_008_epoch_in_version(self):
'''
This tests for the regression found in the following issue:
@ -331,7 +296,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
This is a destructive test as it installs a package
'''
target = _PKG_EPOCH_TARGETS[0]
target = self._PKG_EPOCH_TARGETS[0]
version = self.latest_version(target)
# If this assert fails, we need to find a new target. This test
@ -347,8 +312,10 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.version', 'pkg.info_installed', 'pkg.installed', 'pkg.removed')
@runs_on(platforms=['linux'], reason='This test only runs on linux')
@requires_salt_modules('pkg.version', 'pkg.info_installed')
@requires_salt_states('pkg.installed', 'pkg.removed')
@runs_on(kernel='linux')
@not_runs_on(os='Amazon')
def test_pkg_009_latest_with_epoch(self):
'''
This tests for the following issue:
@ -367,13 +334,13 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_function('pkg.info_installed', [package])
self.assertTrue(pkgquery in six.text_type(ret))
@requires_salt_modules('pkg.latest', 'pkg.removed')
@requires_salt_states('pkg.latest', 'pkg.removed')
def test_pkg_010_latest(self):
'''
This tests pkg.latest with a package that has no epoch (or a zero
epoch).
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.latest_version(target)
# If this assert fails, we need to find new targets, this test needs to
@ -386,14 +353,15 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.latest', 'pkg.list_pkgs', 'pkg.list_upgrades', 'pkg.version')
@runs_on(platforms=['linux'], os_like=['debian'], reason='This test only runs on Debian based linux distributions')
@requires_salt_modules('pkg.list_pkgs', 'pkg.list_upgrades', 'pkg.version')
@requires_salt_states('pkg.latest')
@runs_on(kernel='linux', os_family='Debian')
def test_pkg_011_latest_only_upgrade(self):
'''
WARNING: This test will pick a package with an available upgrade (if
there is one) and upgrade it to the latest version.
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
# If this assert fails, we need to find new targets, this test needs to
# be able to test that the state fails when you try to run the state
@ -437,12 +405,13 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
)
@skipIf(not _WILDCARDS_SUPPORTED, 'Wildcards in pkg.install are not supported')
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_012_installed_with_wildcard_version(self):
'''
This is a destructive test as it installs and then removes a package
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.run_function('pkg.version', [target])
# If this assert fails, we need to find new targets, this test needs to
@ -486,13 +455,14 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.version', 'pkg.latest_version', 'pkg.installed', 'pkg.removed')
@runs_on(platforms=['linux'], os_like=['debian', 'redhat'], reason='Comparison operator not specially implemented')
@requires_salt_modules('pkg.version', 'pkg.latest_version')
@requires_salt_states('pkg.installed', 'pkg.removed')
@runs_on(kernel='linux', os_family=['Debian', 'RedHat'])
def test_pkg_013_installed_with_comparison_operator(self):
'''
This is a destructive test as it installs and then removes a package
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.run_function('pkg.version', [target])
# If this assert fails, we need to find new targets, this test needs to
@ -522,14 +492,15 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed')
@runs_on(platforms=['linux'], os_like=['redhat'], reason='Comparison operator not specially implemented')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed')
@runs_on(kernel='linux', os_familiy='RedHat')
def test_pkg_014_installed_missing_release(self):
'''
Tests that a version number missing the release portion still resolves
as correctly installed. For example, version 2.0.2 instead of 2.0.2-1.el7
'''
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
version = self.run_function('pkg.version', [target])
# If this assert fails, we need to find new targets, this test needs to
@ -549,23 +520,31 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
@requires_salt_modules('pkg.hold', 'pkg.unhold', 'pkg.installed', 'pkg.removed')
def test_pkg_015_installed_held(self):
@requires_salt_modules('pkg.hold', 'pkg.unhold', 'pkg.version', 'pkg.list_pkgs')
@requires_salt_states('pkg.installed', 'pkg.removed')
@requires_system_grains
def test_pkg_015_installed_held(self, grains=None):
'''
Tests that a package can be held even when the package is already installed.
'''
versionlock_pkg = None
if grains['os_family'] == 'RedHat':
pkgs = {p for p in self.run_function('pkg.list_pkgs') if '-versionlock' in p}
if not pkgs:
self.skipTest('No versionlock package found in repositories')
for versionlock_pkg in pkgs:
ret = self.run_state('pkg.installed', name=versionlock_pkg, refresh=False)
# Exit loop if a versionlock package installed correctly
try:
self.assertSaltTrueReturn(ret)
log.debug('Installed versionlock package: {}'.format(versionlock_pkg))
break
except AssertionError as e:
log.debug('Versionlock package not found:\n{}'.format(e))
else:
self.fail('Could not install versionlock package from {}'.format(pkgs))
if pre_grains and 'redhat' in pre_grains.like():
# If we're in the Red Hat family first we ensure that
# the yum-plugin-versionlock package is installed
ret = self.run_state(
'pkg.installed',
name='yum-plugin-versionlock',
refresh=False,
)
self.assertSaltTrueReturn(ret)
target = _PKG_TARGETS[0]
target = self._PKG_TARGETS[0]
# First we ensure that the package is installed
ret = self.run_state(
@ -583,12 +562,15 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
refresh=False,
)
if versionlock_pkg and '-versionlock is not installed' in str(ret):
self.skipTest('{} `{}` is installed'.format(ret, versionlock_pkg))
# changes from pkg.hold for Red Hat family are different
if pre_grains:
if 'redhat' in pre_grains.like():
target_changes = {'new': 'hold', 'old': ''}
elif 'debian' in pre_grains.like():
target_changes = {'new': 'hold', 'old': 'install'}
target_changes = {}
if grains['os_family'] == 'RedHat':
target_changes = {'new': 'hold', 'old': ''}
elif grains['os_family'] == 'Debian':
target_changes = {'new': 'hold', 'old': 'install'}
try:
tag = 'pkg_|-{0}_|-{0}_|-installed'.format(target)
@ -596,33 +578,36 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertIn(tag, ret)
self.assertIn('changes', ret[tag])
self.assertIn(target, ret[tag]['changes'])
if not target_changes:
self.skipTest(
'Test needs to be configured for {}: {}'.format(grains['os'], ret[tag]['changes'][target]))
self.assertEqual(ret[tag]['changes'][target], target_changes)
finally:
# Clean up, unhold package and remove
self.run_function('pkg.unhold', name=target)
ret = self.run_state('pkg.removed', name=target)
self.assertSaltTrueReturn(ret)
if pre_grains and 'redhat' in pre_grains.like():
ret = self.run_state('pkg.removed',
name='yum-plugin-versionlock')
if versionlock_pkg:
ret = self.run_state('pkg.removed', name=versionlock_pkg)
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_CAP_TARGETS, 'Capability not provided')
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_cap_001_installed(self):
'''
This is a destructive test as it installs and then removes a package
'''
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.run_function('pkg.version', [target])
realver = self.run_function('pkg.version', [realpkg])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to not be installed before we run the states below
self.assertFalse(version)
self.assertFalse(realver)
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
try:
ret = self.run_state('pkg.installed', name=target, refresh=False, resolve_capabilities=True, test=True)
@ -634,20 +619,20 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_CAP_TARGETS, 'Capability not available')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_cap_002_already_installed(self):
'''
This is a destructive test as it installs and then removes a package
'''
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.run_function('pkg.version', [target])
realver = self.run_function('pkg.version', [realpkg])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to not be installed before we run the states below
self.assertFalse(version)
self.assertFalse(realver)
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
try:
# install the package
@ -668,32 +653,24 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
@skipIf(not _PKG_CAP_TARGETS, 'Capability not available')
@skipIf(not _VERSION_SPEC_SUPPORTED, 'Version specification not supported')
@requires_salt_modules('pkg.installed', 'pkg.removed')
@requires_salt_states('pkg.installed', 'pkg.removed')
def test_pkg_cap_003_installed_multipkg_with_version(self):
'''
This is a destructive test as it installs and then removes two packages
'''
if pre_grains and ('arch' in pre_grains.like() or 'archlinux' in pre_grains.like()):
for idx in range(13):
if idx == 12:
raise Exception('Package database locked after 60 seconds, '
'bailing out')
if not os.path.isfile('/var/lib/pacman/db.lck'):
break
time.sleep(5)
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.latest_version(target)
realver = self.latest_version(realpkg)
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so these
# packages need to not be installed before we run the states below
self.assertTrue(version, 'new pkg cap targets required')
self.assertTrue(realver, 'new pkg cap targets required')
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
cleanup_pkgs = self._PKG_TARGETS
try:
pkgs = [{_PKG_TARGETS[0]: version}, _PKG_TARGETS[1], {target: realver}]
pkgs = [{self._PKG_TARGETS[0]: version}, self._PKG_TARGETS[1], {target: realver}]
ret = self.run_state('pkg.installed',
name='test_pkg_cap_003_installed_multipkg_with_version-install',
pkgs=pkgs,
@ -712,7 +689,6 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
pkgs=pkgs,
refresh=False, resolve_capabilities=True)
self.assertSaltTrueReturn(ret)
cleanup_pkgs = _PKG_TARGETS
cleanup_pkgs.append(realpkg)
finally:
ret = self.run_state('pkg.removed',
@ -721,21 +697,22 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_CAP_TARGETS, 'Capability not available')
@requires_salt_modules('pkg.version', 'pkg.latest', 'pkg.removed')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.latest', 'pkg.removed')
def test_pkg_cap_004_latest(self):
'''
This tests pkg.latest with a package that has no epoch (or a zero
epoch).
'''
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.run_function('pkg.version', [target])
realver = self.run_function('pkg.version', [realpkg])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to not be installed before we run the states below
self.assertFalse(version)
self.assertFalse(realver)
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
try:
ret = self.run_state('pkg.latest', name=target, refresh=False, resolve_capabilities=True, test=True)
@ -751,20 +728,21 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_CAP_TARGETS, 'Capability not available')
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed', 'pkg.downloaded')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed', 'pkg.downloaded')
def test_pkg_cap_005_downloaded(self):
'''
This is a destructive test as it installs and then removes a package
'''
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.run_function('pkg.version', [target])
realver = self.run_function('pkg.version', [realpkg])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to not be installed before we run the states below
self.assertFalse(version)
self.assertFalse(realver)
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
ret = self.run_state('pkg.downloaded', name=target, refresh=False)
self.assertSaltFalseReturn(ret)
@ -776,20 +754,21 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
self.assertSaltTrueReturn(ret)
@skipIf(not _PKG_CAP_TARGETS, 'Capability not available')
@requires_salt_modules('pkg.version', 'pkg.installed', 'pkg.removed', 'pkg.uptodate')
@requires_salt_modules('pkg.version')
@requires_salt_states('pkg.installed', 'pkg.removed', 'pkg.uptodate')
def test_pkg_cap_006_uptodate(self):
'''
This is a destructive test as it installs and then removes a package
'''
target, realpkg = _PKG_CAP_TARGETS[0]
target, realpkg = self._PKG_CAP_TARGETS[0]
version = self.run_function('pkg.version', [target])
realver = self.run_function('pkg.version', [realpkg])
# If this assert fails, we need to find new targets, this test needs to
# be able to test successful installation of packages, so this package
# needs to not be installed before we run the states below
self.assertFalse(version)
self.assertFalse(realver)
# If this condition is False, we need to find new targets.
# This needs to be able to test successful installation of packages.
# These packages need to not be installed before we run the states below
if not (version and realver):
self.skipTest('TODO: New pkg cap targets required')
try:
ret = self.run_state('pkg.installed', name=target,

View file

@ -14,11 +14,13 @@ from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_salt_modules,
requires_salt_states,
requires_system_grains,
)
# Import Salt libs
import salt.utils.platform
import salt.utils.files
# Import 3rd-party libs
from salt.ext import six
@ -77,21 +79,22 @@ class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_salt_modules('pkgrepo.absent', 'pkgrepo.managed')
@requires_salt_states('pkgrepo.absent', 'pkgrepo.managed')
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
'''
Test adding a repo with comments
'''
if grains['os_family'] in ('redhat',):
kwargs = {}
if grains['os_family'] == 'RedHat':
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif grains['os_family'] in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest('{}/{} test case needed'.format(grains['os_family'], grains['os']))
try:
# Run the state to add the repo
@ -127,7 +130,7 @@ class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
# Clean up
self.run_state('pkgrepo.absent', name=kwargs['name'])
@requires_salt_modules('pkgrepo.managed')
@requires_salt_states('pkgrepo.managed')
@requires_system_grains
def test_pkgrepo_04_apt_with_architectures(self, grains):
'''

View file

@ -72,7 +72,7 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
def run_ssh(self, arg_str, with_retcode=False, timeout=25,
catch_stderr=False, wipe=False, raw=False):
catch_stderr=False, wipe=False, raw=False, **kwargs):
'''
Execute salt-ssh
'''
@ -84,7 +84,12 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
arg_str
)
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, raw=True, timeout=timeout)
return self.run_script('salt-ssh',
arg_str, with_retcode=with_retcode,
catch_stderr=catch_stderr,
raw=True,
timeout=timeout,
**kwargs)
def run_run(self,
arg_str,
@ -197,7 +202,8 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None):
log_output=None,
**kwargs):
'''
Execute a script with the given argument string
@ -237,6 +243,11 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin
cmd += 'python{0}.{1} '.format(*sys.version_info)
cmd += '{0} '.format(script_path)
cmd += '{0} '.format(arg_str)
if kwargs:
# late import
import salt.utils.json
for key, value in kwargs.items():
cmd += "'{0}={1} '".format(key, salt.utils.json.dumps(value))
tmp_file = tempfile.SpooledTemporaryFile()
@ -453,7 +464,7 @@ class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixi
return ret
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT, wipe=True, raw=False):
timeout=RUN_TIMEOUT, wipe=True, raw=False, **kwargs):
'''
Execute salt-ssh
'''
@ -469,8 +480,9 @@ class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixi
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
raw=True)
log.debug('Result of run_ssh for command \'%s\': %s', arg_str, ret)
raw=True,
**kwargs)
log.debug('Result of run_ssh for command \'%s %s\': %s', arg_str, kwargs, ret)
return ret
def run_run(self, arg_str, with_retcode=False, catch_stderr=False,
@ -945,8 +957,8 @@ class SSHCase(ShellCase):
We use a 180s timeout here, which some slower systems do end up needing
'''
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout,
wipe=wipe, raw=raw)
log.debug('SSHCase run_function executed %s with arg %s', function, arg)
wipe=wipe, raw=raw, **kwargs)
log.debug('SSHCase run_function executed %s with arg %s and kwargs %s', function, arg, kwargs)
log.debug('SSHCase JSON return: %s', ret)
# Late import

View file

@ -1081,42 +1081,91 @@ def requires_system_grains(func):
return decorator
def requires_salt_modules(*names):
@requires_system_grains
def runs_on(grains=None, **kwargs):
'''
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
'''
def _check_required_salt_modules(*required_salt_modules):
# Late import
from tests.support.sminion import create_sminion
required_salt_modules = set(required_salt_modules)
sminion = create_sminion(minion_id='runtests-internal-sminion')
available_modules = list(sminion.functions)
not_available_modules = set()
try:
cached_not_available_modules = sminion.__not_availiable_modules__
except AttributeError:
cached_not_available_modules = sminion.__not_availiable_modules__ = set()
def decorator(caller):
@functools.wraps(caller)
def wrapper(cls):
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(str(grains.get(kw)).lower() != str(v).lower() for v in value):
cls.skipTest('This test does not run on {}={}'.format(kw, grains.get(kw)))
else:
if str(grains.get(kw)).lower() != str(value).lower():
cls.skipTest('This test runs on {}={}, not {}'.format(kw, value, grains.get(kw)))
return caller(cls)
return wrapper
return decorator
if cached_not_available_modules:
for not_available_module in cached_not_available_modules:
if not_available_module in required_salt_modules:
not_available_modules.add(not_available_module)
required_salt_modules.remove(not_available_module)
for required_module_name in required_salt_modules:
search_name = required_module_name
if '.' not in search_name:
search_name += '.*'
if not fnmatch.filter(available_modules, search_name):
not_available_modules.add(required_module_name)
cached_not_available_modules.add(required_module_name)
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
'''
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
'''
def decorator(caller):
@functools.wraps(caller)
def wrapper(cls):
for kw, value in kwargs.items():
if isinstance(value, list):
if any(str(grains.get(kw)).lower() == str(v).lower() for v in value):
cls.skipTest('This test does not run on {}={}'.format(kw, grains.get(kw)))
else:
if str(grains.get(kw)).lower() == str(value).lower():
cls.skipTest('This test does not run on {}={}, got {}'.format(kw, value, grains.get(kw)))
return caller(cls)
return wrapper
return decorator
if not_available_modules:
if len(not_available_modules) == 1:
raise SkipTest('Salt module \'{}\' is not available'.format(*not_available_modules))
raise SkipTest('Salt modules not available: {}'.format(', '.join(not_available_modules)))
def _check_required_sminion_attributes(sminion_attr, *required_items):
'''
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
'''
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id='runtests-internal-sminion')
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = '__not_available_{items}s__'.format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if '.' not in search_name:
search_name += '.*'
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
'''
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
'''
not_available = _check_required_sminion_attributes('states', *names)
def decorator(caller):
if inspect.isclass(caller):
@ -1124,7 +1173,9 @@ def requires_salt_modules(*names):
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
_check_required_salt_modules(*names)
if not_available:
raise SkipTest('Unavailable salt states: {}'.format(*not_available))
if old_setup is not None:
old_setup(self, *args, **kwargs)
@ -1134,7 +1185,42 @@ def requires_salt_modules(*names):
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
_check_required_salt_modules(*names)
if not_available:
raise SkipTest('Unavailable salt states: {}'.format(*not_available))
return caller(cls)
return wrapper
return decorator
def requires_salt_modules(*names):
'''
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
'''
not_available = _check_required_sminion_attributes('functions', *names)
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if not_available:
raise SkipTest('Unavailable salt modules: {}'.format(*not_available))
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not_available:
raise SkipTest('Unavailable salt modules: {}'.format(*not_available))
return caller(cls)
return wrapper
@ -1233,7 +1319,7 @@ def repeat(caller=None, condition=True, times=5):
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times+1):
for attempt in range(1, times + 1):
log.info('%s test run %d of %s times', cls, attempt, times)
caller(cls)
return cls

View file

@ -0,0 +1,107 @@
# coding: utf-8
# Python libs
from __future__ import absolute_import
import logging
# Salt testing libs
from tests.support.unit import TestCase
from tests.support.mock import patch, mock_open
from tests.support.mixins import LoaderModuleMockMixin
# Salt libs
import salt.beacons.cert_info as cert_info
log = logging.getLogger(__name__)
_TEST_CERT = '''
-----BEGIN CERTIFICATE-----
MIIC/jCCAeagAwIBAgIJAIQMfu6ShHvfMA0GCSqGSIb3DQEBCwUAMCQxIjAgBgNV
BAMMGXNhbHR0ZXN0LTAxLmV4YW1wbGUubG9jYWwwHhcNMTkwNjAzMjA1OTIyWhcN
MjkwNTMxMjA1OTIyWjAkMSIwIAYDVQQDDBlzYWx0dGVzdC0wMS5leGFtcGxlLmxv
Y2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv5UxxKGsOO8n2hUk
KjL8r2Rjt0om4wwdXUu0R1fQUlaSO0g+vk0wHHaovoVcEU6uZlhDPw1qZ4C+cp9Z
rDzSfwI2Njg813I5jzTBgox+3pJ+82vgXZ14xpqZ+f0ACMo4uRPjBkyQpHqYiDJ3
VockZSxm5s7RT05xDnedDfPgu1WAvzQovWO6slCs+Hlp8sh6QAy/hIwOZ0hT8y3J
NV6PSPqK7BEypOPak36+ogtiuPxxat4da74SUVS8Ffupnr40BjqVqEXBvfIIHiQt
3r5gpjoBjrWX2ccgQlHQP8gFaToFxWLSSYVT6E8Oj5UEywpmvPDRjJsJ5epscblT
oFyVXQIDAQABozMwMTAJBgNVHRMEAjAAMCQGA1UdEQQdMBuCGXNhbHR0ZXN0LTAx
LmV4YW1wbGUubG9jYWwwDQYJKoZIhvcNAQELBQADggEBABPqQlkaZDV5dPwNO/s2
PBT/19LroOwQ+fBJgZpbGha5/ZaSr+jcYZf2jAicPajWGlY/rXAdBSuxpmUYCC12
23tI4stwGyB8Quuoyg2Z+5LQJSDA1LxNJ1kxQfDUnS3tVQa0wJVtq8W9wNryNONL
noaQaDcdbGx3V15W+Bx0as5NfIWqz1uVi4MGGxI6hMBuDD7E7M+k1db8EaS+tI4u
seZBENjwjJA6zZmTXvYyzV5OBP4JyOhYuG9aqr7e6/yjPBEtZv0TJ9KMMbcywvE9
9FF+l4Y+wgKR/icrpDEpPlC4wYn64sy5vk7EGVagnVyhkjLJ52rn4trzyPox8FmO
2Zw=
-----END CERTIFICATE-----
'''
class CertInfoBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.cert_info
'''
def setup_loader_modules(self):
return {
cert_info: {
'__context__': {},
'__salt__': {},
}
}
def test_non_list_config(self):
config = {}
ret = cert_info.validate(config)
self.assertEqual(ret, (False, 'Configuration for cert_info beacon must'
' be a list.'))
def test_empty_config(self):
config = [{}]
ret = cert_info.validate(config)
self.assertEqual(ret, (False, 'Configuration for cert_info beacon '
'must contain files option.'))
def test_cert_information(self):
with patch('salt.utils.files.fopen',
mock_open(read_data=_TEST_CERT)):
config = [{'files': ['/etc/pki/tls/certs/mycert.pem'],
'notify_days': -1
}]
ret = cert_info.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
_expected_return = [
{
'certificates': [
{
'cert_path': '/etc/pki/tls/certs/mycert.pem',
'extensions': [{'ext_data': 'CA:FALSE',
'ext_name': 'basicConstraints'},
{'ext_data': 'DNS:salttest-01.example.local',
'ext_name': 'subjectAltName'}],
'has_expired': False,
'issuer': 'CN="salttest-01.example.local"',
'issuer_dict': {'CN': 'salttest-01.example.local'},
'notAfter': '2029-05-31 20:59:22Z',
'notAfter_raw': '20290531205922Z',
'notBefore': '2019-06-03 20:59:22Z',
'notBefore_raw': '20190603205922Z',
'serial_number': 9515119675852487647,
'signature_algorithm': 'sha256WithRSAEncryption',
'subject': 'CN="salttest-01.example.local"',
'subject_dict': {'CN': 'salttest-01.example.local'},
'version': 2
}
]
}
]
ret = cert_info.beacon(config)
self.assertEqual(ret, _expected_return)

View file

@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: Tyler Johnson <tjohnson@saltstack.com>
'''
# Import Salt Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch,
)
# Import Salt Libs
from salt.cloud.clouds import proxmox
class ProxmoxTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {
proxmox: {
'__utils__': {
'cloud.fire_event': MagicMock(),
'cloud.bootstrap': MagicMock()
},
'__opts__': {
'sock_dir': True,
'transport': True,
'providers': {'my_proxmox': {}},
'profiles': {'my_proxmox': {}}
},
'__active_provider_name__': 'my_proxmox:proxmox'
}
}
def setUp(self):
self.vm_ = {
'profile': 'my_proxmox',
'name': 'vm4',
'driver': 'proxmox',
'technology': 'qemu',
'host': '127.0.0.1',
'clone': True,
'ide0': 'data',
'sata0': 'data',
'scsi0': 'data',
'net0': 'a=b,c=d',
}
def tearDown(self):
del self.vm_
def test__stringlist_to_dictionary(self):
result = proxmox._stringlist_to_dictionary('')
self.assertEqual(result, {})
result = proxmox._stringlist_to_dictionary('foo=bar, ignored_space=bar,internal space=bar')
self.assertEqual(result, {'foo': 'bar', 'ignored_space': 'bar', 'internal space': 'bar'})
# Negative cases
self.assertRaises(ValueError, proxmox._stringlist_to_dictionary, 'foo=bar,foo')
self.assertRaises(ValueError, proxmox._stringlist_to_dictionary, 'foo=bar,totally=invalid=assignment')
def test__dictionary_to_stringlist(self):
result = proxmox._dictionary_to_stringlist({})
self.assertEqual(result, '')
result = proxmox._dictionary_to_stringlist({'a': 'a'})
self.assertEqual(result, 'a=a')
result = proxmox._dictionary_to_stringlist({'a': 'a', 'b': 'b'})
self.assertEqual(result, 'a=a,b=b')
def test__reconfigure_clone(self):
# The return_value is for the net reconfigure assertions, it is irrelevant for the rest
with patch.object(proxmox, 'query', return_value={'net0': 'c=overwritten,g=h'}) as query:
# Test a vm that lacks the required attributes
proxmox._reconfigure_clone({}, 0)
query.assert_not_called()
# Test a fully mocked vm
proxmox._reconfigure_clone(self.vm_, 0)
# net reconfigure
query.assert_any_call('get', 'nodes/127.0.0.1/qemu/0/config')
query.assert_any_call('post', 'nodes/127.0.0.1/qemu/0/config', {'net0': 'a=b,c=d,g=h'})
# hdd reconfigure
query.assert_any_call('post', 'nodes/127.0.0.1/qemu/0/config', {'ide0': 'data'})
query.assert_any_call('post', 'nodes/127.0.0.1/qemu/0/config', {'sata0': 'data'})
query.assert_any_call('post', 'nodes/127.0.0.1/qemu/0/config', {'scsi0': 'data'})

View file

@ -602,3 +602,134 @@ class RemoteFuncsTestCase(TestCase):
This is what minions before Nitrogen would issue.
'''
self.test_mine_get(tgt_type_key='expr_form')
def test_mine_get_dict_str(self, tgt_type_key='tgt_type'):
'''
Asserts that ``mine_get`` gives the expected results when request
is a comma-separated list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
'''
self.funcs.cache.store('minions/webserver', 'mine',
dict(ip_addr='2001:db8::1:3', ip4_addr='127.0.0.1'))
with patch('salt.utils.minions.CkMinions._check_compound_minions',
MagicMock(return_value=(dict(
minions=['webserver'],
missing=[])))):
ret = self.funcs._mine_get(
{
'id': 'requester_minion',
'tgt': 'G@roles:web',
'fun': 'ip_addr,ip4_addr',
tgt_type_key: 'compound',
}
)
self.assertDictEqual(ret, dict(ip_addr=dict(webserver='2001:db8::1:3'), ip4_addr=dict(webserver='127.0.0.1')))
def test_mine_get_dict_list(self, tgt_type_key='tgt_type'):
'''
Asserts that ``mine_get`` gives the expected results when request
is a list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
'''
self.funcs.cache.store('minions/webserver', 'mine',
dict(ip_addr='2001:db8::1:3', ip4_addr='127.0.0.1'))
with patch('salt.utils.minions.CkMinions._check_compound_minions',
MagicMock(return_value=(dict(
minions=['webserver'],
missing=[])))):
ret = self.funcs._mine_get(
{
'id': 'requester_minion',
'tgt': 'G@roles:web',
'fun': ['ip_addr', 'ip4_addr'],
tgt_type_key: 'compound',
}
)
self.assertDictEqual(ret, dict(ip_addr=dict(webserver='2001:db8::1:3'), ip4_addr=dict(webserver='127.0.0.1')))
def test_mine_get_acl_allowed(self):
'''
Asserts that ``mine_get`` gives the expected results when this is allowed
in the client-side ACL that was stored in the mine data.
'''
self.funcs.cache.store(
'minions/webserver',
'mine',
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:4',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'requester_minion',
'allow_tgt_type': 'glob',
},
}
)
# The glob check is for the resolution of the allow_tgt
# The compound check is for the resolution of the tgt in the mine_get request.
with \
patch('salt.utils.minions.CkMinions._check_glob_minions',
MagicMock(return_value={'minions': ['requester_minion'], 'missing': []})
), \
patch('salt.utils.minions.CkMinions._check_compound_minions',
MagicMock(return_value={'minions': ['webserver'], 'missing': []})
):
ret = self.funcs._mine_get(
{
'id': 'requester_minion',
'tgt': 'anything',
'tgt_type': 'compound',
'fun': ['ip_addr'],
}
)
self.assertDictEqual(
ret,
{'ip_addr': {'webserver': '2001:db8::1:4'}}
)
def test_mine_get_acl_rejected(self):
'''
Asserts that ``mine_get`` gives the expected results when this is rejected
in the client-side ACL that was stored in the mine data. This results in
no data being sent back (just as if the entry wouldn't exist).
'''
self.funcs.cache.store(
'minions/webserver',
'mine',
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:4',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'not_requester_minion',
'allow_tgt_type': 'glob',
}
}
)
# The glob check is for the resolution of the allow_tgt
# The compound check is for the resolution of the tgt in the mine_get request.
with \
patch('salt.utils.minions.CkMinions._check_glob_minions',
MagicMock(return_value={'minions': ['not_requester_minion'], 'missing': []})
), \
patch('salt.utils.minions.CkMinions._check_compound_minions',
MagicMock(return_value={'minions': ['webserver'], 'missing': []})
):
ret = self.funcs._mine_get(
{
'id': 'requester_minion',
'tgt': 'anything',
'tgt_type': 'compound',
'fun': ['ip_addr'],
}
)
self.assertDictEqual(
ret,
{}
)

View file

@ -1430,3 +1430,58 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
self.assertIn('virtual', osdata_grains)
self.assertNotEqual(osdata_grains['virtual'], 'physical')
@skipIf(salt.utils.platform.is_windows(), 'System is Windows')
def test_bsd_osfullname(self):
'''
Test to ensure osfullname exists on *BSD systems
'''
_path_exists_map = {}
_path_isfile_map = {}
_cmd_run_map = {
'freebsd-version -u': '10.3-RELEASE',
'/sbin/sysctl -n hw.physmem': '2121781248',
'/sbin/sysctl -n vm.swap_total': '419430400'
}
path_exists_mock = MagicMock(side_effect=lambda x: _path_exists_map[x])
path_isfile_mock = MagicMock(
side_effect=lambda x: _path_isfile_map.get(x, False)
)
cmd_run_mock = MagicMock(
side_effect=lambda x: _cmd_run_map[x]
)
empty_mock = MagicMock(return_value={})
mock_freebsd_uname = ('FreeBSD',
'freebsd10.3-hostname-8148',
'10.3-RELEASE',
'FreeBSD 10.3-RELEASE #0 r297264: Fri Mar 25 02:10:02 UTC 2016 root@releng1.nyi.freebsd.org:/usr/obj/usr/src/sys/GENERIC',
'amd64',
'amd64')
with patch('platform.uname',
MagicMock(return_value=mock_freebsd_uname)):
with patch.object(salt.utils.platform, 'is_linux',
MagicMock(return_value=False)):
with patch.object(salt.utils.platform, 'is_freebsd',
MagicMock(return_value=True)):
# Skip the first if statement
with patch.object(salt.utils.platform, 'is_proxy',
MagicMock(return_value=False)):
# Skip the init grain compilation (not pertinent)
with patch.object(os.path, 'exists', path_exists_mock):
with patch('salt.utils.path.which') as mock:
mock.return_value = '/sbin/sysctl'
# Make a bunch of functions return empty dicts,
# we don't care about these grains for the
# purposes of this test.
with patch.object(core, '_bsd_cpudata', empty_mock), \
patch.object(core, '_hw_data', empty_mock), \
patch.object(core, '_virtual', empty_mock), \
patch.object(core, '_ps', empty_mock), \
patch.dict(core.__salt__, {'cmd.run': cmd_run_mock}):
os_grains = core.os_data()
self.assertIn('osfullname', os_grains)
self.assertEqual(os_grains.get('osfullname'), 'FreeBSD')

View file

@ -13,6 +13,8 @@ from salt.ext import six
import salt.modules.jboss7_cli as jboss7_cli
from salt.exceptions import CommandExecutionError
from tests.support.mock import patch
class CmdMock(object):
commands = []
@ -430,3 +432,17 @@ class JBoss7CliTestCase(TestCase, LoaderModuleMockMixin):
jboss7_cli.run_operation(self.jboss_config, operation)
self.assertEqual(self.cmd.get_last_command(), r'/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect --controller="123.234.345.456:9999" --user="jbossadm" --password="jbossadm" --command="/subsystem=naming/binding=\"java:/sampleapp/web-module/ldap/username\":add(binding-type=simple, value=\"DOMAIN\\\\\\\\user\")"')
def test_run_operation_wflyctl_error(self):
call_cli_ret = {'retcode': 1,
'stdout': '{"failure-description" => "WFLYCTL0234523: ops"}'}
with patch('salt.modules.jboss7_cli._call_cli', return_value=call_cli_ret) as _call_cli:
ret = jboss7_cli.run_operation(None, "ls", False)
self.assertEqual(ret['err_code'], "WFLYCTL0234523")
def test_run_operation_no_code_error(self):
call_cli_ret = {'retcode': 1,
'stdout': '{"failure-description" => "ERROR234523: ops"}'}
with patch('salt.modules.jboss7_cli._call_cli', return_value=call_cli_ret) as _call_cli:
ret = jboss7_cli.run_operation(None, "ls", False)
self.assertEqual(ret['err_code'], "-1")

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
:codeauthor: Herbert Buurman <herbert.buurman@ogd.nl>
'''
# Import Python Libs
@ -10,19 +11,517 @@ from __future__ import absolute_import, print_function, unicode_literals
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch,
)
# Import Salt Libs
import salt.modules.mine as mine
import salt.utils.mine
from salt.utils.odict import OrderedDict
class FakeCache(object):
def __init__(self):
self.data = {}
def store(self, bank, key, value):
self.data[bank, key] = value
return 'FakeCache:StoreSuccess!'
def fetch(self, bank, key):
return self.data.get((bank, key), {})
def debug(self):
print(__name__ + ':FakeCache dump:\n'
'{}'.format(self.data))
class MineTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mine
'''
def setUp(self):
self.cache = FakeCache()
def setup_loader_modules(self):
return {mine: {}}
mock_match = MagicMock(return_value='webserver')
return {mine: {'__salt__': {
'match.glob': mock_match,
'match.pcre': mock_match,
'match.list': mock_match,
'match.grain': mock_match,
'match.grain_pcre': mock_match,
'match.ipcidr': mock_match,
'match.compound': mock_match,
'match.pillar': mock_match,
'match.pillar_pcre': mock_match,
'data.get': lambda key: self.cache.fetch('minions/webserver', key),
'data.update': lambda key, value: self.cache.store('minions/webserver', key, value),
}}}
def test_get_local_empty(self):
'''
Tests getting function data from the local mine that does not exist.
'''
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret_classic = mine.get('*', 'funky.doodle')
ret_dict = mine.get('*', ['funky.doodle'])
self.assertEqual(ret_classic, {})
self.assertEqual(ret_dict, {})
def test_get_local_classic(self):
'''
Tests getting function data from the local mine that was stored without minion-side ACL.
This verifies backwards compatible reads from a salt mine.
'''
# Prefill minion cache with a non-ACL value
self.cache.store('minions/webserver', 'mine_cache', {'foobard': 'barfood'})
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret_classic = mine.get('*', 'foobard')
ret_dict = mine.get('*', ['foobard'])
self.assertEqual(ret_classic, {'webserver': 'barfood'})
self.assertEqual(ret_dict, {'foobard': {'webserver': 'barfood'}})
def test_send_get_local(self):
'''
Tests sending an item to the mine in the minion's local cache,
and then immediately fetching it again (since tests are executed unordered).
Also verify that the stored mine cache has the correct structure (with ACL).
'''
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}), \
patch.dict(mine.__salt__, {
'network.ip_addrs': MagicMock(return_value='2001:db8::1:3'),
'foo.bar': MagicMock(return_value='baz'),
}):
ret = mine.send('ip_addr', mine_function='network.ip_addrs')
mine.send('foo.bar')
self.assertEqual(ret, 'FakeCache:StoreSuccess!')
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
}
)
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret_single = mine.get('*', 'ip_addr')
ret_single_dict = mine.get('*', ['ip_addr'])
ret_multi = mine.get('*', 'ip_addr,foo.bar')
ret_multi2 = mine.get('*', ['ip_addr', 'foo.bar'])
self.assertEqual(ret_single, {'webserver': '2001:db8::1:3'})
self.assertEqual(ret_single_dict, {'ip_addr': {'webserver': '2001:db8::1:3'}})
self.assertEqual(ret_multi, {'ip_addr': {'webserver': '2001:db8::1:3'}, 'foo.bar': {'webserver': 'baz'}})
self.assertEqual(ret_multi, ret_multi2)
def test_send_get_acl_local(self):
'''
Tests sending an item to the mine in the minion's local cache,
including ACL information (useless when only working locally, but hey),
and then immediately fetching it again (since tests are executed unordered).
Also verify that the stored mine cache has the correct structure (with ACL).
'''
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}), \
patch.dict(mine.__salt__, {
'network.ip_addrs': MagicMock(return_value='2001:db8::1:3'),
'foo.bar': MagicMock(return_value='baz'),
}):
ret = mine.send('ip_addr', mine_function='network.ip_addrs', allow_tgt='web*', allow_tgt_type='glob')
mine.send('foo.bar')
self.assertEqual(ret, 'FakeCache:StoreSuccess!')
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'web*',
'allow_tgt_type': 'glob',
},
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
}
)
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret_single = mine.get('*', 'ip_addr')
self.assertEqual(ret_single, {'webserver': '2001:db8::1:3'})
def test_send_master(self):
'''
Tests sending an item to the mine stored on the master.
This is done by capturing the load that is sent to the master.
'''
with patch.object(mine, '_mine_send', MagicMock(side_effect=lambda x, y: x)),\
patch.dict(mine.__salt__, {
'foo.bar': MagicMock(return_value='baz'),
}), \
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'foo',
}):
ret = mine.send('foo.bar')
self.assertEqual(
ret,
{
'id': 'foo',
'cmd': '_mine',
'data': {
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
},
'clear': False,
}
)
def test_send_master_acl(self):
'''
Tests sending an item to the mine stored on the master. Now with ACL.
This is done by capturing the load that is sent to the master.
'''
with patch.object(mine, '_mine_send', MagicMock(side_effect=lambda x, y: x)),\
patch.dict(mine.__salt__, {
'foo.bar': MagicMock(return_value='baz'),
}), \
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'foo',
}):
ret = mine.send('foo.bar', allow_tgt='roles:web', allow_tgt_type='grains')
self.assertEqual(
ret,
{
'id': 'foo',
'cmd': '_mine',
'data': {
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'roles:web',
'allow_tgt_type': 'grains',
},
},
'clear': False,
}
)
def test_get_master(self):
'''
Tests loading a mine item from the mine stored on the master.
'''
mock_load = {
'tgt_type': 'qux',
'tgt': 'baz',
'cmd': '_mine_get',
'fun': 'foo.bar',
'id': 'foo'
}
with patch.object(mine, '_mine_get', MagicMock(return_value=mock_load)),\
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'foo',
}):
# Verify the correct load
self.assertEqual(
mine.get('*', 'foo.bar'),
mock_load
)
def test_get_master_exclude_minion(self):
'''
Tests the exclude_minion-parameter for mine.get
'''
_mine_get_ret = OrderedDict([('webserver', 'value')])
with patch.object(mine, '_mine_get', MagicMock(return_value=_mine_get_ret)),\
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'webserver',
}):
self.assertEqual(
mine.get('*', 'foo.bar', exclude_minion=False),
{'webserver': 'value'}
)
self.assertEqual(
mine.get('*', 'foo.bar', exclude_minion=True),
{}
)
def test_update_local(self):
'''
Tests the ``update``-function on the minion's local cache.
Updates mine functions from pillar+config only.
'''
config_mine_functions = {
'ip_addr': {'mine_function': 'network.ip_addrs'},
'network.ip_addrs': [],
'kernel': [{'mine_function': 'grains.get'}, 'kernel', {'allow_tgt': 'web*'}],
'foo.bar': {'allow_tgt': 'G@roles:webserver', 'allow_tgt_type': 'compound'},
}
with \
patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}), \
patch.dict(mine.__salt__, {
'config.merge': MagicMock(return_value=config_mine_functions),
'grains.get': lambda x: 'Linux!',
'network.ip_addrs': MagicMock(return_value='2001:db8::1:3'),
'foo.bar': MagicMock(return_value='baz'),
}):
ret = mine.update()
self.assertEqual(ret, 'FakeCache:StoreSuccess!')
# Check if the mine entries have been stored properly in the FakeCache.
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'network.ip_addrs': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'G@roles:webserver',
'allow_tgt_type': 'compound',
},
'kernel': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'Linux!',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'web*',
},
}
)
def test_update_local_specific(self):
'''
Tests the ``update``-function on the minion's local cache.
Updates mine functions from kwargs only.
'''
manual_mine_functions = {
'ip_addr': {'mine_function': 'network.ip_addrs'},
'network.ip_addrs': [],
'kernel': [{'mine_function': 'grains.get'}, 'kernel', {'allow_tgt': 'web*'}],
'foo.bar': {'allow_tgt': 'G@roles:webserver', 'allow_tgt_type': 'compound'},
}
with \
patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}), \
patch.dict(mine.__salt__, {
'config.merge': MagicMock(return_value={}),
'grains.get': lambda x: 'Linux!!',
'network.ip_addrs': MagicMock(return_value='2001:db8::1:4'),
'foo.bar': MagicMock(return_value='baz'),
}):
ret = mine.update(mine_functions=manual_mine_functions)
self.assertEqual(ret, 'FakeCache:StoreSuccess!')
# Check if the mine entries have been stored properly in the FakeCache.
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:4',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'network.ip_addrs': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:4',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'G@roles:webserver',
'allow_tgt_type': 'compound',
},
'kernel': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'Linux!!',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
'allow_tgt': 'web*',
},
}
)
def test_update_master(self):
'''
Tests whether the ``update``-function sends the correct data to the master.
'''
config_mine_functions = {
'ip_addr': {'mine_function': 'network.ip_addrs'},
'network.ip_addrs': [],
'kernel': [{'mine_function': 'grains.get'}, 'kernel'],
'foo.bar': {},
}
mock_load = {
'id': 'webserver',
'cmd': '_mine',
'data': {
'ip_addr': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'network.ip_addrs': {
salt.utils.mine.MINE_ITEM_ACL_DATA: '2001:db8::1:3',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'foo.bar': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'baz',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
'kernel': {
salt.utils.mine.MINE_ITEM_ACL_DATA: 'Linux!',
salt.utils.mine.MINE_ITEM_ACL_ID: salt.utils.mine.MINE_ITEM_ACL_VERSION,
},
},
'clear': False,
}
with \
patch.object(mine, '_mine_send', MagicMock(side_effect=lambda x, y: x)),\
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'webserver',
}), \
patch.dict(mine.__salt__, {
'config.merge': MagicMock(return_value=config_mine_functions),
'grains.get': lambda x: 'Linux!',
'network.ip_addrs': MagicMock(return_value='2001:db8::1:3'),
'foo.bar': MagicMock(return_value='baz'),
}):
# Verify the correct load
self.assertEqual(
mine.update(),
mock_load
)
def test_delete_local(self):
'''
Tests the ``delete``-function on the minion's local cache.
'''
# Prefill minion cache with a non-ACL value
self.cache.store('minions/webserver', 'mine_cache', {'foobard': 'barfood'})
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret = mine.delete('foobard')
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{}
)
def test_delete_master(self):
'''
Tests whether the ``delete``-function sends the correct data to the master.
'''
# Prefill minion cache with a non-ACL value
self.cache.store('minions/webserver', 'mine_cache', {'foobard': 'barfood'})
mock_load = {
'cmd': '_mine_delete',
'fun': 'foobard',
'id': 'foo',
}
with patch.object(mine, '_mine_send', MagicMock(side_effect=lambda x, y: x)),\
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'foo',
}):
# Verify the correct load
self.assertEqual(
mine.delete('foobard'),
mock_load
)
def test_flush_local(self):
'''
Tests the ``flush``-function on the minion's local cache.
'''
# Prefill minion cache with a non-ACL value
self.cache.store('minions/webserver', 'mine_cache', {'foobard': 'barfood'})
with patch.dict(mine.__opts__, {
'file_client': 'local',
'id': 'webserver',
}):
ret = mine.flush()
self.assertEqual(
self.cache.fetch('minions/webserver', 'mine_cache'),
{}
)
def test_flush_master(self):
'''
Tests whether the ``flush``-function sends the correct data to the master.
'''
mock_load = {
'cmd': '_mine_flush',
'id': 'foo'
}
with patch.object(mine, '_mine_send', MagicMock(side_effect=lambda x, y: x)),\
patch.dict(mine.__opts__, {
'file_client': 'remote',
'id': 'foo',
}):
# Verify the correct load
self.assertEqual(
mine.flush(),
mock_load
)
def test_valid(self):
'''
Tests the ``valid``-function.
Note that mine functions defined as list are returned in dict format.
Mine functions that do not exist in __salt__ are not returned.
'''
config_mine_functions = {
'network.ip_addrs': [],
'kernel': [{'mine_function': 'grains.get'}, 'kernel'],
'fubar': [{'mine_function': 'does.not_exist'}],
}
with \
patch.dict(mine.__salt__, {
'config.merge': MagicMock(return_value=config_mine_functions),
'network.ip_addrs': lambda: True,
'grains.get': lambda: True,
}):
self.assertEqual(
mine.valid(),
{
'network.ip_addrs': [],
'kernel': {'grains.get': ['kernel']},
}
)
def test_get_docker(self):
'''

View file

@ -7,7 +7,6 @@ from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.stringutils
import salt.modules.nacl as nacl
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
@ -18,8 +17,9 @@ from tests.support.unit import skipIf
try:
import libnacl.secret # pylint: disable=unused-import
import libnacl.sealed # pylint: disable=unused-import
import salt.modules.nacl as nacl
HAS_LIBNACL = True
except ImportError:
except (ImportError, OSError, AttributeError):
HAS_LIBNACL = False

View file

@ -1466,6 +1466,7 @@ class PostgresTestCase(TestCase, LoaderModuleMockMixin):
locale=None,
password='test',
runas='postgres',
checksums=False,
user='postgres',
)
self.assertTrue(ret)

Some files were not shown because too many files have changed in this diff Show more