mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'master' into add-global-state-conditions
This commit is contained in:
commit
01cd8f2d73
247 changed files with 6239 additions and 3104 deletions
1
changelog/45823.fixed
Normal file
1
changelog/45823.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fix fun_args missing from syndic returns
|
1
changelog/49310.fixed
Normal file
1
changelog/49310.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Issue #49310: Allow users to touch a file with Unix date of birth
|
1
changelog/52400.fixed
Normal file
1
changelog/52400.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Swapping out args and kwargs for arg and kwarg respectively in the Slack engine when the command passed is a runner.
|
1
changelog/55226.fixed
Normal file
1
changelog/55226.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Cleaned up bytes response data before sending to non-bytes compatible returners (postgres, mysql)
|
1
changelog/56093.fixed
Normal file
1
changelog/56093.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed pillar.filter_by with salt-ssh
|
1
changelog/57500.added
Normal file
1
changelog/57500.added
Normal file
|
@ -0,0 +1 @@
|
|||
Added resource tagging functions to boto_dynamodb execution module
|
1
changelog/58953.fixed
Normal file
1
changelog/58953.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed formatting for terse output mode
|
1
changelog/59183.fixed
Normal file
1
changelog/59183.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed saltnado websockets disconnecting immediately
|
1
changelog/59766.fixed
Normal file
1
changelog/59766.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed influxdb_continuous_query.present state to provide the client args to the underlying module on create.
|
1
changelog/59786.fixed
Normal file
1
changelog/59786.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Warn when using insecure (http:// based) key_urls for apt-based systems in pkgrepo.managed, and add a kwarg that determines the validity of such a url.
|
1
changelog/60365.fixed
Normal file
1
changelog/60365.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed stdout and stderr being empty sometimes when use_vt=True for the cmd.run[*] functions
|
1
changelog/61083.fixed
Normal file
1
changelog/61083.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Made salt-ssh respect --wipe again
|
1
changelog/61122.fixed
Normal file
1
changelog/61122.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fix ipset state when the comment kwarg is set.
|
1
changelog/61153.added
Normal file
1
changelog/61153.added
Normal file
|
@ -0,0 +1 @@
|
|||
Initial work to allow parallel startup of proxy minions when used as sub proxies with Deltaproxy.
|
1
changelog/61805.fixed
Normal file
1
changelog/61805.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Need to move the creation of the proxy object for the ProxyMinion further down in the initialization for sub proxies to ensure that all modules, especially any custom proxy modules, are available before attempting to run the init function.
|
1
changelog/62019.fixed
Normal file
1
changelog/62019.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Make Salt to return an error on "pkg" modules and states when targeting duplicated package names
|
1
changelog/62131.fixed
Normal file
1
changelog/62131.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Ignore some command return codes in openbsdrcctl_service to prevent spurious errors
|
1
changelog/62139.fixed
Normal file
1
changelog/62139.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed extra period in filename output in tls module. Instead of "server.crt." it will now be "server.crt".
|
1
changelog/62152.fixed
Normal file
1
changelog/62152.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Make sure lingering PAexec-*.exe files in the Windows directory are cleaned up
|
1
changelog/62281.fixed
Normal file
1
changelog/62281.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fix Salt Package Manager (SPM) exception when calling spm create_repo .
|
1
changelog/62754.deprecated
Normal file
1
changelog/62754.deprecated
Normal file
|
@ -0,0 +1 @@
|
|||
Deprecate core ESXi and associated states and modules, vcenter and vsphere support in favor of Salt VMware Extensions
|
1
changelog/62937.fixed
Normal file
1
changelog/62937.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Updated pyzmq to version 22.0.3 on Windows builds because the old version was causing salt-minion/salt-call to hang
|
1
changelog/62968.fixed
Normal file
1
changelog/62968.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Issue #62968: Fix issue where cloud deployments were putting the keys in the wrong location on Windows hosts
|
1
changelog/62977.fixed
Normal file
1
changelog/62977.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed gpg_passphrase issue with gpg decrypt/encrypt functions
|
1
changelog/62978.added
Normal file
1
changelog/62978.added
Normal file
|
@ -0,0 +1 @@
|
|||
Added output and bare functionality to export_key gpg module function
|
1
changelog/62983.added
Normal file
1
changelog/62983.added
Normal file
|
@ -0,0 +1 @@
|
|||
Add keyvalue serializer for environment files
|
1
changelog/62988.fixed
Normal file
1
changelog/62988.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fixed bug where module.wait states were detected as running legacy module.run syntax
|
1
changelog/63012.changed
Normal file
1
changelog/63012.changed
Normal file
|
@ -0,0 +1 @@
|
|||
Requisite state chunks now all consistently contain `__id__`, `__sls__` and `name`.
|
3
changelog/63013.fixed
Normal file
3
changelog/63013.fixed
Normal file
|
@ -0,0 +1,3 @@
|
|||
The `__opts__` dunder dictionary is now added to the loader's `pack` if not
|
||||
already present, which makes it accessible via the
|
||||
`salt.loader.context.NamedLoaderContext` class.
|
1
changelog/63024.fixed
Normal file
1
changelog/63024.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Issue #63024: Fix issue where grains and config data were being place in the wrong location on Windows hosts
|
1
changelog/63025.fixed
Normal file
1
changelog/63025.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fix btrfs.subvolume_snapshot command failing
|
1
changelog/63033.fixed
Normal file
1
changelog/63033.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Fix file.retention_schedule always reports changes
|
1
changelog/63042.added
Normal file
1
changelog/63042.added
Normal file
|
@ -0,0 +1 @@
|
|||
Add ability to ignore symlinks in file.tidied
|
3
changelog/63058.fixed
Normal file
3
changelog/63058.fixed
Normal file
|
@ -0,0 +1,3 @@
|
|||
Fix mongo authentication for mongo ext_pillar and mongo returner
|
||||
|
||||
This fix also include the ability to use the mongo connection string for mongo ext_pillar
|
|
@ -12,6 +12,7 @@ serializer modules
|
|||
|
||||
configparser
|
||||
json
|
||||
keyvalue
|
||||
msgpack
|
||||
plist
|
||||
python
|
||||
|
|
6
doc/ref/serializers/all/salt.serializers.keyvalue.rst
Normal file
6
doc/ref/serializers/all/salt.serializers.keyvalue.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
salt.serializers.keyvalue
|
||||
=========================
|
||||
|
||||
.. automodule:: salt.serializers.keyvalue
|
||||
:members:
|
||||
|
|
@ -54,6 +54,15 @@ For a security fix your filename would look like this: changelog/cve-2021-25283.
|
|||
If your PR does not align with any of the types, then you do not need to add a
|
||||
changelog entry.
|
||||
|
||||
.. note::
|
||||
|
||||
Requirement Files:
|
||||
Updates to package requirements files also require a changelog file. This will usually
|
||||
be associated with `.fixed` if its resolving an issue or `.security` if it's resolving
|
||||
a CVE issue in an upstream project. If updates are made to testing requirement files
|
||||
it does not require a changelog.
|
||||
|
||||
|
||||
.. _generate-changelog:
|
||||
|
||||
How to generate the changelog
|
||||
|
|
|
@ -155,6 +155,11 @@ The following dunder dictionaries are always defined, but may be empty
|
|||
__opts__
|
||||
--------
|
||||
|
||||
..versionchanged:: 3006.0
|
||||
|
||||
The ``__opts__`` dictionary can now be accessed via
|
||||
:py:mod:`~salt.loader.context``.
|
||||
|
||||
Defined in: All modules
|
||||
|
||||
The ``__opts__`` dictionary contains all of the options passed in the
|
||||
|
|
|
@ -309,11 +309,7 @@ def _run_with_coverage(session, *test_cmd, env=None):
|
|||
if SKIP_REQUIREMENTS_INSTALL is False:
|
||||
coverage_requirement = COVERAGE_REQUIREMENT
|
||||
if coverage_requirement is None:
|
||||
version_info = _get_session_python_version_info(session)
|
||||
if version_info < (3, 7):
|
||||
coverage_requirement = "coverage==6.2"
|
||||
else:
|
||||
coverage_requirement = "coverage==6.5.0"
|
||||
coverage_requirement = "coverage==5.2"
|
||||
session.install(
|
||||
"--progress-bar=off", coverage_requirement, silent=PIP_INSTALL_SILENT
|
||||
)
|
||||
|
|
|
@ -59,7 +59,7 @@ export MACOSX_DEPLOYMENT_TARGET
|
|||
|
||||
# Versions we're going to install
|
||||
PY_VERSION=3.9
|
||||
PY_DOT_VERSION=3.9.12
|
||||
PY_DOT_VERSION=3.9.15
|
||||
ZMQ_VERSION=4.3.4
|
||||
LIBSODIUM_VERSION=1.0.18
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ echo "**** Setting Variables"
|
|||
SRCDIR=`git rev-parse --show-toplevel`
|
||||
PKGRESOURCES=$SRCDIR/pkg/osx
|
||||
PY_VERSION=3.9
|
||||
PY_DOT_VERSION=3.9.12
|
||||
PY_DOT_VERSION=3.9.15
|
||||
|
||||
################################################################################
|
||||
# Make sure this is the Salt Repository
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
###############################################################################
|
||||
# Define Variables
|
||||
###############################################################################
|
||||
PY_DOT_VERSION=3.9.12
|
||||
PY_DOT_VERSION=3.9.15
|
||||
INSTALL_DIR="/opt/salt"
|
||||
BIN_DIR="$INSTALL_DIR/bin"
|
||||
CONFIG_DIR="/etc/salt"
|
||||
|
|
|
@ -68,7 +68,7 @@ quit_on_error() {
|
|||
echo "**** Setting Variables"
|
||||
INSTALL_DIR=/opt/salt
|
||||
PY_VERSION=3.9
|
||||
PY_DOT_VERSION=3.9.12
|
||||
PY_DOT_VERSION=3.9.15
|
||||
CMD_OUTPUT=$(mktemp -t cmd.log)
|
||||
SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
|
|
|
@ -1,14 +1,17 @@
|
|||
<#
|
||||
.SYNOPSIS
|
||||
Script that builds Python from source
|
||||
Script that builds Python from source using the Relative Environment for Python
|
||||
project (relenv):
|
||||
|
||||
https://github.com/saltstack/relative-environment-for-python
|
||||
|
||||
.DESCRIPTION
|
||||
This script builds python from Source. It then creates the directory
|
||||
structure as created by the Python installer in C:\Python##. This includes
|
||||
all header files, scripts, dlls, library files, and pip.
|
||||
This script builds python from Source. It then creates the directory structure
|
||||
as created by the Python installer. This includes all header files, scripts,
|
||||
dlls, library files, and pip.
|
||||
|
||||
.EXAMPLE
|
||||
build_python.ps1 -Version 3.8.13
|
||||
build_python.ps1 -Version 3.8.14 -Architecture x86
|
||||
|
||||
#>
|
||||
param(
|
||||
|
@ -21,6 +24,7 @@ param(
|
|||
#"3.9.13",
|
||||
#"3.9.12",
|
||||
#"3.9.11",
|
||||
"3.8.15",
|
||||
"3.8.14",
|
||||
"3.8.13",
|
||||
"3.8.12",
|
||||
|
@ -31,12 +35,12 @@ param(
|
|||
# The version of Python to be built. Pythonnet only supports up to Python
|
||||
# 3.8 for now. Pycurl stopped building wheel files after 7.43.0.5 which
|
||||
# supported up to 3.8. So we're pinned to the latest version of Python 3.8.
|
||||
# We may have to drop support for pycurl.
|
||||
# Default is: 3.8.14
|
||||
[String] $Version = "3.8.14",
|
||||
# We may have to drop support for pycurl or build it ourselves.
|
||||
# Default is: 3.8.15
|
||||
[String] $Version = "3.8.15",
|
||||
|
||||
[Parameter(Mandatory=$false)]
|
||||
[ValidateSet("x86", "x64")]
|
||||
[ValidateSet("x64", "x86")]
|
||||
[Alias("a")]
|
||||
# The System Architecture to build. "x86" will build a 32-bit installer.
|
||||
# "x64" will build a 64-bit installer. Default is: x64
|
||||
|
@ -87,7 +91,7 @@ If (!(Get-IsAdministrator)) {
|
|||
#-------------------------------------------------------------------------------
|
||||
|
||||
Write-Host $("=" * 80)
|
||||
Write-Host "Build Python with Mayflower" -ForegroundColor Cyan
|
||||
Write-Host "Build Python with Relenv" -ForegroundColor Cyan
|
||||
Write-Host "- Python Version: $Version"
|
||||
Write-Host "- Architecture: $Architecture"
|
||||
Write-Host $("-" * 80)
|
||||
|
@ -136,34 +140,30 @@ Write-Host "Success" -ForegroundColor Green
|
|||
# Script Variables
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
# Script Variables
|
||||
$PROJ_DIR = $(git rev-parse --show-toplevel)
|
||||
$MAYFLOWER_DIR = "$SCRIPT_DIR\Mayflower"
|
||||
$MAYFLOWER_URL = "https://github.com/saltstack/mayflower"
|
||||
|
||||
# Python Variables
|
||||
|
||||
$PY_DOT_VERSION = $Version
|
||||
$PY_VERSION = [String]::Join(".", $Version.Split(".")[0..1])
|
||||
$BIN_DIR = "$SCRIPT_DIR\buildenv\bin"
|
||||
$SCRIPTS_DIR = "$BIN_DIR\Scripts"
|
||||
$RELENV_DIR = "$SCRIPT_DIR\relative-environment-for-python"
|
||||
$RELENV_URL = "https://github.com/saltstack/relative-environment-for-python"
|
||||
$BIN_DIR = "$SCRIPT_DIR\buildenv\bin"
|
||||
$SCRIPTS_DIR = "$BIN_DIR\Scripts"
|
||||
$BUILD_DIR = "${env:LOCALAPPDATA}\relenv\build"
|
||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies"
|
||||
|
||||
if ( $Architecture -eq "x64" ) {
|
||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies/64"
|
||||
$BUILD_DIR = "$MAYFLOWER_DIR\mayflower\_build\x86_64-win"
|
||||
$SALT_DEP_URL = "$SALT_DEP_URL/64"
|
||||
$BUILD_DIR = "$BUILD_DIR\amd64-win"
|
||||
$ARCH = "amd64"
|
||||
} else {
|
||||
$SALT_DEP_URL = "https://repo.saltproject.io/windows/dependencies/32"
|
||||
# Not sure of the exact name here
|
||||
$BUILD_DIR = "$MAYFLOWER_DIR\mayflower\_build\x86_32-win"
|
||||
$SALT_DEP_URL = "$SALT_DEP_URL/32"
|
||||
$BUILD_DIR = "$BUILD_DIR\x86-win"
|
||||
$ARCH = "x86"
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Prepping Environment
|
||||
#-------------------------------------------------------------------------------
|
||||
if ( Test-Path -Path "$MAYFLOWER_DIR" ) {
|
||||
Write-Host "Removing existing mayflower directory: " -NoNewline
|
||||
Remove-Item -Path "$MAYFLOWER_DIR" -Recurse -Force
|
||||
if ( Test-Path -Path "$MAYFLOWER_DIR" ) {
|
||||
if ( Test-Path -Path "$RELENV_DIR" ) {
|
||||
Write-Host "Removing existing relenv directory: " -NoNewline
|
||||
Remove-Item -Path "$RELENV_DIR" -Recurse -Force
|
||||
if ( Test-Path -Path "$RELENV_DIR" ) {
|
||||
Write-Host "Failed" -ForegroundColor Red
|
||||
exit 1
|
||||
} else {
|
||||
|
@ -182,18 +182,29 @@ if ( Test-Path -Path "$BIN_DIR" ) {
|
|||
}
|
||||
}
|
||||
|
||||
if ( Test-Path -Path "$BUILD_DIR" ) {
|
||||
Write-Host "Removing existing build directory: " -NoNewline
|
||||
Remove-Item -Path "$BUILD_DIR" -Recurse -Force
|
||||
if ( Test-Path -Path "$BUILD_DIR" ) {
|
||||
Write-Host "Failed" -ForegroundColor Red
|
||||
exit 1
|
||||
} else {
|
||||
Write-Host "Success" -ForegroundColor Green
|
||||
}
|
||||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Downloading Mayflower
|
||||
# Downloading Relenv
|
||||
#-------------------------------------------------------------------------------
|
||||
# TODO: Eventually we should just download the tarball from a release, but since
|
||||
# TODO: there is no release yet, we'll just clone the directory
|
||||
|
||||
Write-Host "Cloning Mayflower: " -NoNewline
|
||||
$args = "clone", "--depth", "1", "$MAYFLOWER_URL", "$MAYFLOWER_DIR"
|
||||
Write-Host "Cloning Relenv: " -NoNewline
|
||||
$args = "clone", "--depth", "1", "$RELENV_URL", "$RELENV_DIR"
|
||||
Start-Process -FilePath git `
|
||||
-ArgumentList $args `
|
||||
-Wait -WindowStyle Hidden
|
||||
if ( Test-Path -Path "$MAYFLOWER_DIR\mayflower") {
|
||||
if ( Test-Path -Path "$RELENV_DIR\relenv") {
|
||||
Write-Host "Success" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "Failed" -ForegroundColor Red
|
||||
|
@ -201,12 +212,12 @@ if ( Test-Path -Path "$MAYFLOWER_DIR\mayflower") {
|
|||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Installing Mayflower
|
||||
# Installing Relenv
|
||||
#-------------------------------------------------------------------------------
|
||||
Write-Host "Installing Mayflower: " -NoNewLine
|
||||
$output = pip install -e "$MAYFLOWER_DIR\." --disable-pip-version-check
|
||||
Write-Host "Installing Relenv: " -NoNewLine
|
||||
$output = pip install -e "$RELENV_DIR\." --disable-pip-version-check
|
||||
$output = pip list --disable-pip-version-check
|
||||
if ("mayflower" -in $output.split()) {
|
||||
if ("relenv" -in $output.split()) {
|
||||
Write-Host "Success" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "Failed" -ForegroundColor Red
|
||||
|
@ -214,10 +225,10 @@ if ("mayflower" -in $output.split()) {
|
|||
}
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Building Python with Mayflower
|
||||
# Building Python with Relenv
|
||||
#-------------------------------------------------------------------------------
|
||||
Write-Host "Building Python with Mayflower (long-running): " -NoNewLine
|
||||
$output = python -m mayflower build --clean
|
||||
Write-Host "Building Python with Relenv (long-running): " -NoNewLine
|
||||
$output = python -m relenv build --clean --arch $ARCH
|
||||
if ( Test-Path -Path "$BUILD_DIR\Scripts\python.exe") {
|
||||
Write-Host "Success" -ForegroundColor Green
|
||||
} else {
|
||||
|
@ -239,7 +250,7 @@ if ( !( Test-Path -Path $BIN_DIR ) ) {
|
|||
}
|
||||
}
|
||||
|
||||
Write-Host "Moving Python to bin directory: " -NoNewLine
|
||||
Write-Host "Moving Python to bin: " -NoNewLine
|
||||
Move-Item -Path "$BUILD_DIR\*" -Destination "$BIN_DIR"
|
||||
if ( Test-Path -Path "$SCRIPTS_DIR\python.exe") {
|
||||
Write-Host "Success" -ForegroundColor Green
|
||||
|
@ -325,7 +336,7 @@ if ( Test-Path -Path "$profile.salt_bak" ) {
|
|||
# Finished
|
||||
#-------------------------------------------------------------------------------
|
||||
Write-Host $("-" * 80)
|
||||
Write-Host "Build Python $Architecture with Mayflower Completed" `
|
||||
Write-Host "Build Python $Architecture with Relenv Completed" `
|
||||
-ForegroundColor Cyan
|
||||
Write-Host "Environment Location: $BIN_DIR"
|
||||
Write-Host $("=" * 80)
|
||||
|
|
|
@ -5,5 +5,7 @@ PyYAML
|
|||
MarkupSafe
|
||||
requests>=1.0.0
|
||||
distro>=1.0.1
|
||||
contextvars
|
||||
psutil>=5.0.0
|
||||
|
||||
# We need contextvars for salt-ssh
|
||||
contextvars
|
||||
|
|
|
@ -2,8 +2,8 @@ mock >= 3.0.0
|
|||
# PyTest
|
||||
pytest >= 6.1.0; python_version < "3.6"
|
||||
pytest >= 7.0.1; python_version >= "3.6"
|
||||
pytest-salt-factories >= 1.0.0rc20; sys_platform == 'win32'
|
||||
pytest-salt-factories[docker] >= 1.0.0rc20; sys_platform != 'win32'
|
||||
pytest-salt-factories >= 1.0.0rc21; sys_platform == 'win32'
|
||||
pytest-salt-factories[docker] >= 1.0.0rc21; sys_platform != 'win32'
|
||||
pytest-tempdir >= 2019.10.12
|
||||
pytest-helpers-namespace >= 2019.1.8
|
||||
pytest-subtests
|
||||
|
|
|
@ -688,7 +688,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -686,7 +686,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -683,7 +683,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -705,7 +705,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -271,7 +271,7 @@ pyrsistent==0.17.3
|
|||
# via jsonschema
|
||||
pytest-helpers-namespace==2019.1.8
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories==1.0.0rc17
|
||||
pytest-salt-factories==1.0.0rc21
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-subtests==0.4.0
|
||||
# via -r requirements/pytest.txt
|
||||
|
|
|
@ -710,7 +710,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -727,7 +727,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -732,7 +732,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -723,7 +723,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -743,7 +743,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -266,7 +266,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories==1.0.0rc20 ; sys_platform == "win32"
|
||||
pytest-salt-factories==1.0.0rc21 ; sys_platform == "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
@ -329,7 +329,7 @@ pyyaml==5.4.1
|
|||
# clustershell
|
||||
# kubernetes
|
||||
# yamllint
|
||||
pyzmq==18.0.1 ; python_version < "3.9"
|
||||
pyzmq==22.0.3 ; python_version < "3.9" and sys_platform == "win32"
|
||||
# via
|
||||
# -r requirements/zeromq.txt
|
||||
# pytest-salt-factories
|
||||
|
|
|
@ -721,7 +721,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -713,7 +713,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -733,7 +733,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -254,7 +254,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories==1.0.0rc20 ; sys_platform == "win32"
|
||||
pytest-salt-factories==1.0.0rc21 ; sys_platform == "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
@ -317,7 +317,7 @@ pyyaml==5.4.1
|
|||
# clustershell
|
||||
# kubernetes
|
||||
# yamllint
|
||||
pyzmq==19.0.0 ; python_version < "3.9"
|
||||
pyzmq==22.0.3 ; python_version < "3.9" and sys_platform == "win32"
|
||||
# via
|
||||
# -r requirements/zeromq.txt
|
||||
# pytest-salt-factories
|
||||
|
|
|
@ -724,7 +724,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -719,7 +719,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -716,7 +716,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -738,7 +738,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories[docker]==1.0.0rc20 ; sys_platform != "win32"
|
||||
pytest-salt-factories[docker]==1.0.0rc21 ; sys_platform != "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -255,7 +255,7 @@ pytest-helpers-namespace==2021.4.29
|
|||
# pytest-shell-utilities
|
||||
pytest-httpserver==1.0.4
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-salt-factories==1.0.0rc20 ; sys_platform == "win32"
|
||||
pytest-salt-factories==1.0.0rc21 ; sys_platform == "win32"
|
||||
# via -r requirements/pytest.txt
|
||||
pytest-shell-utilities==1.6.0
|
||||
# via pytest-salt-factories
|
||||
|
|
|
@ -110,7 +110,7 @@ pywin32==303
|
|||
# wmi
|
||||
pyyaml==5.4.1
|
||||
# via -r requirements/base.txt
|
||||
pyzmq==18.0.1 ; python_version < "3.9"
|
||||
pyzmq==22.0.3 ; python_version < "3.9" and sys_platform == "win32"
|
||||
# via -r requirements/zeromq.txt
|
||||
requests==2.25.1
|
||||
# via
|
||||
|
|
|
@ -108,7 +108,7 @@ pywin32==303
|
|||
# wmi
|
||||
pyyaml==5.4.1
|
||||
# via -r requirements/base.txt
|
||||
pyzmq==19.0.0 ; python_version < "3.9"
|
||||
pyzmq==22.0.3 ; python_version < "3.9" and sys_platform == "win32"
|
||||
# via -r requirements/zeromq.txt
|
||||
requests==2.25.1
|
||||
# via
|
||||
|
|
|
@ -4,3 +4,7 @@
|
|||
pyzmq<=20.0.0 ; python_version < "3.6"
|
||||
pyzmq>=17.0.0 ; python_version < "3.9"
|
||||
pyzmq>19.0.2 ; python_version >= "3.9"
|
||||
|
||||
# We can't use 23+ on Windows until they fix this:
|
||||
# https://github.com/zeromq/pyzmq/issues/1472
|
||||
pyzmq>=20.0.0, <=22.0.3 ; python_version < "3.9" and sys_platform == 'win32'
|
||||
|
|
|
@ -1205,6 +1205,13 @@ class Single:
|
|||
opts["grains"][grain] = self.target["grains"][grain]
|
||||
|
||||
opts["pillar"] = data.get("pillar")
|
||||
|
||||
# Restore --wipe. Note: Since it is also a CLI option, it should not
|
||||
# be read from cache, hence it is restored here. This is currently only
|
||||
# of semantic distinction since data_cache has been disabled, so refresh
|
||||
# above always evaluates to True. TODO: cleanup?
|
||||
opts["ssh_wipe"] = self.opts.get("ssh_wipe", False)
|
||||
|
||||
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
|
||||
opts,
|
||||
self.id,
|
||||
|
|
|
@ -3,15 +3,12 @@ Return/control aspects of the grains data
|
|||
"""
|
||||
|
||||
|
||||
import copy
|
||||
import math
|
||||
from collections.abc import Mapping
|
||||
|
||||
import salt.utils.data
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.json
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.exceptions import SaltException
|
||||
|
||||
# Seed the grains dict so cython will build
|
||||
__grains__ = {}
|
||||
|
@ -184,7 +181,7 @@ def filter_by(lookup_dict, grain="os_family", merge=None, default="default", bas
|
|||
{% set apache = salt['grains.filter_by']({
|
||||
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
|
||||
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
|
||||
}), default='Debian' %}
|
||||
}, default='Debian') %}
|
||||
|
||||
myapache:
|
||||
pkg.installed:
|
||||
|
@ -216,26 +213,47 @@ def filter_by(lookup_dict, grain="os_family", merge=None, default="default", bas
|
|||
values relevant to systems matching that grain. For example, a key
|
||||
could be the grain for an OS and the value could the name of a package
|
||||
on that particular OS.
|
||||
|
||||
.. versionchanged:: 2016.11.0
|
||||
|
||||
The dictionary key could be a globbing pattern. The function will
|
||||
return the corresponding ``lookup_dict`` value where grain value
|
||||
matches the pattern. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# this will render 'got some salt' if Minion ID begins from 'salt'
|
||||
salt '*' grains.filter_by '{salt*: got some salt, default: salt is not here}' id
|
||||
|
||||
:param grain: The name of a grain to match with the current system's
|
||||
grains. For example, the value of the "os_family" grain for the current
|
||||
system could be used to pull values from the ``lookup_dict``
|
||||
dictionary.
|
||||
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
|
||||
the lookup. This allows Pillar to override the values in the
|
||||
|
||||
.. versionchanged:: 2016.11.0
|
||||
|
||||
The grain value could be a list. The function will return the
|
||||
``lookup_dict`` value for a first found item in the list matching
|
||||
one of the ``lookup_dict`` keys.
|
||||
|
||||
:param merge: A dictionary to merge with the results of the grain selection
|
||||
from ``lookup_dict``. This allows Pillar to override the values in the
|
||||
``lookup_dict``. This could be useful, for example, to override the
|
||||
values for non-standard package names such as when using a different
|
||||
Python version from the default Python version provided by the OS
|
||||
(e.g., ``python26-mysql`` instead of ``python-mysql``).
|
||||
:param default: default lookup_dict's key used if the grain does not exists
|
||||
or if the grain value has no match on lookup_dict.
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
:param default: default lookup_dict's key used if the grain does not exists
|
||||
or if the grain value has no match on lookup_dict. If unspecified
|
||||
the value is "default".
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
|
||||
:param base: A lookup_dict key to use for a base dictionary. The
|
||||
grain-selected ``lookup_dict`` is merged over this and then finally
|
||||
the ``merge`` dictionary is merged. This allows common values for
|
||||
each case to be collected in the base and overridden by the grain
|
||||
selection dictionary and the merge dictionary. Default is None.
|
||||
selection dictionary and the merge dictionary. Default is unset.
|
||||
|
||||
.. versionadded:: 2015.8.11,2016.3.2
|
||||
|
||||
|
@ -245,31 +263,16 @@ def filter_by(lookup_dict, grain="os_family", merge=None, default="default", bas
|
|||
|
||||
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
|
||||
# this one will render {D: {E: I, G: H}, J: K}
|
||||
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
|
||||
salt '*' grains.filter_by '{A: B, C: {D: {E: F, G: H}}}' 'xxx' '{D: {E: I}, J: K}' 'C'
|
||||
# next one renders {A: {B: G}, D: J}
|
||||
salt '*' grains.filter_by '{default: {A: {B: C}, D: E}, F: {A: {B: G}}, H: {D: I}}' 'xxx' '{D: J}' 'F' 'default'
|
||||
# next same as above when default='H' instead of 'F' renders {A: {B: C}, D: J}
|
||||
"""
|
||||
ret = lookup_dict.get(
|
||||
__grains__.get(grain, default), lookup_dict.get(default, None)
|
||||
return salt.utils.data.filter_by(
|
||||
lookup_dict=lookup_dict,
|
||||
lookup=grain,
|
||||
traverse=__grains__.value(),
|
||||
merge=merge,
|
||||
default=default,
|
||||
base=base,
|
||||
)
|
||||
|
||||
if base and base in lookup_dict:
|
||||
base_values = lookup_dict[base]
|
||||
if ret is None:
|
||||
ret = base_values
|
||||
|
||||
elif isinstance(base_values, Mapping):
|
||||
if not isinstance(ret, Mapping):
|
||||
raise SaltException(
|
||||
"filter_by default and look-up values must both be dictionaries."
|
||||
)
|
||||
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
|
||||
|
||||
if merge:
|
||||
if not isinstance(merge, Mapping):
|
||||
raise SaltException("filter_by merge argument must be a dictionary.")
|
||||
else:
|
||||
if ret is None:
|
||||
ret = merge
|
||||
else:
|
||||
salt.utils.dictupdate.update(ret, merge)
|
||||
|
||||
return ret
|
||||
|
|
|
@ -141,6 +141,64 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM):
|
|||
return ret.keys()
|
||||
|
||||
|
||||
def filter_by(lookup_dict, pillar, merge=None, default="default", base=None):
|
||||
"""
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Look up the given pillar in a given dictionary and return the result
|
||||
|
||||
:param lookup_dict: A dictionary, keyed by a pillar, containing a value or
|
||||
values relevant to systems matching that pillar. For example, a key
|
||||
could be a pillar for a role and the value could the name of a package
|
||||
on that particular OS.
|
||||
|
||||
The dictionary key can be a globbing pattern. The function will return
|
||||
the corresponding ``lookup_dict`` value where the pillar value matches
|
||||
the pattern. For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# this will render 'got some salt' if ``role`` begins with 'salt'
|
||||
salt '*' pillar.filter_by '{salt*: got some salt, default: salt is not here}' role
|
||||
|
||||
:param pillar: The name of a pillar to match with the system's pillar. For
|
||||
example, the value of the "role" pillar could be used to pull values
|
||||
from the ``lookup_dict`` dictionary.
|
||||
|
||||
The pillar value can be a list. The function will return the
|
||||
``lookup_dict`` value for a first found item in the list matching
|
||||
one of the ``lookup_dict`` keys.
|
||||
|
||||
:param merge: A dictionary to merge with the results of the pillar
|
||||
selection from ``lookup_dict``. This allows another dictionary to
|
||||
override the values in the ``lookup_dict``.
|
||||
|
||||
:param default: default lookup_dict's key used if the pillar does not exist
|
||||
or if the pillar value has no match on lookup_dict. If unspecified
|
||||
the value is "default".
|
||||
|
||||
:param base: A lookup_dict key to use for a base dictionary. The
|
||||
pillar-selected ``lookup_dict`` is merged over this and then finally
|
||||
the ``merge`` dictionary is merged. This allows common values for
|
||||
each case to be collected in the base and overridden by the pillar
|
||||
selection dictionary and the merge dictionary. Default is unset.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pillar.filter_by '{web: Serve it up, db: I query, default: x_x}' role
|
||||
"""
|
||||
return salt.utils.data.filter_by(
|
||||
lookup_dict=lookup_dict,
|
||||
lookup=pillar,
|
||||
traverse=__pillar__.value(),
|
||||
merge=merge,
|
||||
default=default,
|
||||
base=base,
|
||||
)
|
||||
|
||||
|
||||
# Allow pillar.data to also be used to return pillar data
|
||||
items = raw
|
||||
data = items
|
||||
|
|
|
@ -948,7 +948,7 @@ class SlackClient:
|
|||
log.debug("Command %s will run via runner_functions", cmd)
|
||||
# pylint is tripping
|
||||
# pylint: disable=missing-whitespace-after-comma
|
||||
job_id_dict = runner.asynchronous(cmd, {"args": args, "kwargs": kwargs})
|
||||
job_id_dict = runner.asynchronous(cmd, {"arg": args, "kwarg": kwargs})
|
||||
job_id = job_id_dict["jid"]
|
||||
|
||||
# Default to trying to run as a client module.
|
||||
|
|
|
@ -590,6 +590,10 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
|||
if key == "logger":
|
||||
continue
|
||||
mod_opts[key] = val
|
||||
|
||||
if "__opts__" not in self.pack:
|
||||
self.pack["__opts__"] = mod_opts
|
||||
|
||||
return mod_opts
|
||||
|
||||
def _iter_files(self, mod_name):
|
||||
|
|
|
@ -1720,7 +1720,7 @@ class AESFuncs(TransportMethods):
|
|||
if any(key not in load for key in ("return", "jid", "id")):
|
||||
continue
|
||||
# if we have a load, save it
|
||||
if load.get("load"):
|
||||
if load.get("load") and self.opts["master_job_cache"]:
|
||||
fstr = "{}.save_load".format(self.opts["master_job_cache"])
|
||||
self.mminion.returners[fstr](load["jid"], load["load"])
|
||||
|
||||
|
@ -1743,8 +1743,8 @@ class AESFuncs(TransportMethods):
|
|||
ret["master_id"] = load["master_id"]
|
||||
if "fun" in load:
|
||||
ret["fun"] = load["fun"]
|
||||
if "arg" in load:
|
||||
ret["fun_args"] = load["arg"]
|
||||
if "fun_args" in load:
|
||||
ret["fun_args"] = load["fun_args"]
|
||||
if "out" in load:
|
||||
ret["out"] = load["out"]
|
||||
if "sig" in load:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Proxy minion metaproxy modules
|
||||
#
|
||||
|
||||
import copy
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
|
@ -320,170 +320,229 @@ def post_master_init(self, master):
|
|||
self.proxy_pillar = {}
|
||||
self.proxy_context = {}
|
||||
self.add_periodic_callback("cleanup", self.cleanup_subprocesses)
|
||||
for _id in self.opts["proxy"].get("ids", []):
|
||||
control_id = self.opts["id"]
|
||||
proxyopts = self.opts.copy()
|
||||
proxyopts["id"] = _id
|
||||
|
||||
proxyopts = salt.config.proxy_config(
|
||||
self.opts["conf_file"], defaults=proxyopts, minion_id=_id
|
||||
)
|
||||
proxyopts["id"] = proxyopts["proxyid"] = _id
|
||||
if self.opts["proxy"].get("parallel_startup"):
|
||||
log.debug("Initiating parallel startup for proxies")
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(
|
||||
subproxy_post_master_init,
|
||||
_id,
|
||||
uid,
|
||||
self.opts,
|
||||
self.proxy,
|
||||
self.utils,
|
||||
)
|
||||
for _id in self.opts["proxy"].get("ids", [])
|
||||
]
|
||||
|
||||
proxyopts["subproxy"] = True
|
||||
for f in concurrent.futures.as_completed(futures):
|
||||
sub_proxy_data = f.result()
|
||||
minion_id = sub_proxy_data["proxy_opts"].get("id")
|
||||
|
||||
self.proxy_context[_id] = {"proxy_id": _id}
|
||||
if sub_proxy_data["proxy_minion"]:
|
||||
self.deltaproxy_opts[minion_id] = sub_proxy_data["proxy_opts"]
|
||||
self.deltaproxy_objs[minion_id] = sub_proxy_data["proxy_minion"]
|
||||
|
||||
# We need grains first to be able to load pillar, which is where we keep the proxy
|
||||
# configurations
|
||||
self.proxy_grains[_id] = salt.loader.grains(
|
||||
proxyopts, proxy=self.proxy, context=self.proxy_context[_id]
|
||||
)
|
||||
self.proxy_pillar[_id] = yield salt.pillar.get_async_pillar(
|
||||
proxyopts,
|
||||
self.proxy_grains[_id],
|
||||
_id,
|
||||
saltenv=proxyopts["saltenv"],
|
||||
pillarenv=proxyopts.get("pillarenv"),
|
||||
).compile_pillar()
|
||||
|
||||
proxyopts["proxy"] = self.proxy_pillar[_id].get("proxy", {})
|
||||
if not proxyopts["proxy"]:
|
||||
log.warning(
|
||||
"Pillar data for proxy minion %s could not be loaded, skipping.", _id
|
||||
)
|
||||
continue
|
||||
|
||||
# Remove ids
|
||||
proxyopts["proxy"].pop("ids", None)
|
||||
|
||||
proxyopts["pillar"] = self.proxy_pillar[_id]
|
||||
proxyopts["grains"] = self.proxy_grains[_id]
|
||||
|
||||
proxyopts["hash_id"] = self.opts["id"]
|
||||
|
||||
_proxy_minion = ProxyMinion(proxyopts)
|
||||
_proxy_minion.proc_dir = salt.minion.get_proc_dir(
|
||||
proxyopts["cachedir"], uid=uid
|
||||
)
|
||||
|
||||
_proxy_minion.proxy = salt.loader.proxy(
|
||||
proxyopts, utils=self.utils, context=self.proxy_context[_id]
|
||||
)
|
||||
_proxy_minion.subprocess_list = self.subprocess_list
|
||||
|
||||
# a long-running req channel
|
||||
_proxy_minion.req_channel = salt.transport.client.AsyncReqChannel.factory(
|
||||
proxyopts, io_loop=self.io_loop
|
||||
)
|
||||
|
||||
# And load the modules
|
||||
(
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
_proxy_minion.function_errors,
|
||||
_proxy_minion.executors,
|
||||
) = _proxy_minion._load_modules(
|
||||
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
|
||||
)
|
||||
|
||||
# we can then sync any proxymodules down from the master
|
||||
# we do a sync_all here in case proxy code was installed by
|
||||
# SPM or was manually placed in /srv/salt/_modules etc.
|
||||
_proxy_minion.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
|
||||
|
||||
# And re-load the modules so the __proxy__ variable gets injected
|
||||
(
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
_proxy_minion.function_errors,
|
||||
_proxy_minion.executors,
|
||||
) = _proxy_minion._load_modules(
|
||||
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
|
||||
)
|
||||
|
||||
_proxy_minion.functions.pack["__proxy__"] = _proxy_minion.proxy
|
||||
_proxy_minion.proxy.pack["__salt__"] = _proxy_minion.functions
|
||||
_proxy_minion.proxy.pack["__ret__"] = _proxy_minion.returners
|
||||
_proxy_minion.proxy.pack["__pillar__"] = proxyopts["pillar"]
|
||||
_proxy_minion.proxy.pack["__grains__"] = proxyopts["grains"]
|
||||
|
||||
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
|
||||
_proxy_minion.proxy.utils = salt.loader.utils(
|
||||
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
|
||||
)
|
||||
|
||||
_proxy_minion.proxy.pack["__utils__"] = _proxy_minion.proxy.utils
|
||||
|
||||
# Reload all modules so all dunder variables are injected
|
||||
_proxy_minion.proxy.reload_modules()
|
||||
|
||||
_proxy_minion.connected = True
|
||||
|
||||
_fq_proxyname = proxyopts["proxy"]["proxytype"]
|
||||
|
||||
proxy_init_fn = _proxy_minion.proxy[_fq_proxyname + ".init"]
|
||||
try:
|
||||
proxy_init_fn(proxyopts)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.error(
|
||||
"An exception occured during the initialization of minion %s: %s",
|
||||
_id,
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
|
||||
# Reload the grains
|
||||
self.proxy_grains[_id] = salt.loader.grains(
|
||||
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
|
||||
)
|
||||
proxyopts["grains"] = self.proxy_grains[_id]
|
||||
|
||||
if not hasattr(_proxy_minion, "schedule"):
|
||||
_proxy_minion.schedule = salt.utils.schedule.Schedule(
|
||||
proxyopts,
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
cleanup=[salt.minion.master_event(type="alive")],
|
||||
proxy=_proxy_minion.proxy,
|
||||
new_instance=True,
|
||||
_subprocess_list=_proxy_minion.subprocess_list,
|
||||
if self.deltaproxy_opts[minion_id] and self.deltaproxy_objs[minion_id]:
|
||||
self.deltaproxy_objs[
|
||||
minion_id
|
||||
].req_channel = salt.transport.client.AsyncReqChannel.factory(
|
||||
sub_proxy_data["proxy_opts"], io_loop=self.io_loop
|
||||
)
|
||||
else:
|
||||
log.debug("Initiating non-parallel startup for proxies")
|
||||
for _id in self.opts["proxy"].get("ids", []):
|
||||
sub_proxy_data = subproxy_post_master_init(
|
||||
_id, uid, self.opts, self.proxy, self.utils
|
||||
)
|
||||
|
||||
self.deltaproxy_objs[_id] = _proxy_minion
|
||||
self.deltaproxy_opts[_id] = copy.deepcopy(proxyopts)
|
||||
minion_id = sub_proxy_data["proxy_opts"].get("id")
|
||||
|
||||
# proxy keepalive
|
||||
_proxy_alive_fn = _fq_proxyname + ".alive"
|
||||
if (
|
||||
_proxy_alive_fn in _proxy_minion.proxy
|
||||
and "status.proxy_reconnect" in self.deltaproxy_objs[_id].functions
|
||||
and proxyopts.get("proxy_keep_alive", True)
|
||||
):
|
||||
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
|
||||
_proxy_minion.schedule.add_job(
|
||||
{
|
||||
"__proxy_keepalive": {
|
||||
"function": "status.proxy_reconnect",
|
||||
"minutes": proxyopts.get(
|
||||
"proxy_keep_alive_interval", 1
|
||||
), # by default, check once per minute
|
||||
"jid_include": True,
|
||||
"maxrunning": 1,
|
||||
"return_job": False,
|
||||
"kwargs": {"proxy_name": _fq_proxyname},
|
||||
}
|
||||
},
|
||||
persist=True,
|
||||
)
|
||||
_proxy_minion.schedule.enable_schedule()
|
||||
else:
|
||||
_proxy_minion.schedule.delete_job("__proxy_keepalive", persist=True)
|
||||
if sub_proxy_data["proxy_minion"]:
|
||||
self.deltaproxy_opts[minion_id] = sub_proxy_data["proxy_opts"]
|
||||
self.deltaproxy_objs[minion_id] = sub_proxy_data["proxy_minion"]
|
||||
|
||||
if self.deltaproxy_opts[minion_id] and self.deltaproxy_objs[minion_id]:
|
||||
self.deltaproxy_objs[
|
||||
minion_id
|
||||
].req_channel = salt.transport.client.AsyncReqChannel.factory(
|
||||
sub_proxy_data["proxy_opts"], io_loop=self.io_loop
|
||||
)
|
||||
|
||||
self.ready = True
|
||||
|
||||
|
||||
def subproxy_post_master_init(minion_id, uid, opts, main_proxy, main_utils):
|
||||
"""
|
||||
Function to finish init after a deltaproxy proxy
|
||||
minion has finished connecting to a master.
|
||||
|
||||
This is primarily loading modules, pillars, etc. (since they need
|
||||
to know which master they connected to) for the sub proxy minions.
|
||||
"""
|
||||
proxy_grains = {}
|
||||
proxy_pillar = {}
|
||||
|
||||
proxyopts = opts.copy()
|
||||
proxyopts["id"] = minion_id
|
||||
|
||||
proxyopts = salt.config.proxy_config(
|
||||
opts["conf_file"], defaults=proxyopts, minion_id=minion_id
|
||||
)
|
||||
proxyopts.update({"id": minion_id, "proxyid": minion_id, "subproxy": True})
|
||||
|
||||
proxy_context = {"proxy_id": minion_id}
|
||||
|
||||
# We need grains first to be able to load pillar, which is where we keep the proxy
|
||||
# configurations
|
||||
proxy_grains = salt.loader.grains(
|
||||
proxyopts, proxy=main_proxy, context=proxy_context
|
||||
)
|
||||
proxy_pillar = salt.pillar.get_pillar(
|
||||
proxyopts,
|
||||
proxy_grains,
|
||||
minion_id,
|
||||
saltenv=proxyopts["saltenv"],
|
||||
pillarenv=proxyopts.get("pillarenv"),
|
||||
).compile_pillar()
|
||||
|
||||
proxyopts["proxy"] = proxy_pillar.get("proxy", {})
|
||||
if not proxyopts["proxy"]:
|
||||
log.warning(
|
||||
"Pillar data for proxy minion %s could not be loaded, skipping.", minion_id
|
||||
)
|
||||
return {"proxy_minion": None, "proxy_opts": {}}
|
||||
|
||||
# Remove ids
|
||||
proxyopts["proxy"].pop("ids", None)
|
||||
|
||||
proxyopts.update(
|
||||
{
|
||||
"pillar": proxy_pillar,
|
||||
"grains": proxy_grains,
|
||||
"hash_id": opts["id"],
|
||||
}
|
||||
)
|
||||
|
||||
_proxy_minion = ProxyMinion(proxyopts)
|
||||
_proxy_minion.proc_dir = salt.minion.get_proc_dir(proxyopts["cachedir"], uid=uid)
|
||||
|
||||
# And load the modules
|
||||
(
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
_proxy_minion.function_errors,
|
||||
_proxy_minion.executors,
|
||||
) = _proxy_minion._load_modules(
|
||||
opts=proxyopts,
|
||||
grains=proxyopts["grains"],
|
||||
context=proxy_context,
|
||||
)
|
||||
|
||||
# we can then sync any proxymodules down from the master
|
||||
# we do a sync_all here in case proxy code was installed by
|
||||
# SPM or was manually placed in /srv/salt/_modules etc.
|
||||
_proxy_minion.functions["saltutil.sync_all"](saltenv=opts["saltenv"])
|
||||
|
||||
# And re-load the modules so the __proxy__ variable gets injected
|
||||
(
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
_proxy_minion.function_errors,
|
||||
_proxy_minion.executors,
|
||||
) = _proxy_minion._load_modules(
|
||||
opts=proxyopts,
|
||||
grains=proxyopts["grains"],
|
||||
context=proxy_context,
|
||||
)
|
||||
|
||||
# Create this after modules are synced to ensure
|
||||
# any custom modules, eg. custom proxy modules
|
||||
# are avaiable.
|
||||
_proxy_minion.proxy = salt.loader.proxy(
|
||||
proxyopts, utils=main_utils, context=proxy_context
|
||||
)
|
||||
|
||||
_proxy_minion.functions.pack["__proxy__"] = _proxy_minion.proxy
|
||||
_proxy_minion.proxy.pack["__salt__"] = _proxy_minion.functions
|
||||
_proxy_minion.proxy.pack["__ret__"] = _proxy_minion.returners
|
||||
_proxy_minion.proxy.pack["__pillar__"] = proxyopts["pillar"]
|
||||
_proxy_minion.proxy.pack["__grains__"] = proxyopts["grains"]
|
||||
|
||||
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
|
||||
_proxy_minion.proxy.utils = salt.loader.utils(
|
||||
proxyopts, proxy=_proxy_minion.proxy, context=proxy_context
|
||||
)
|
||||
|
||||
_proxy_minion.proxy.pack["__utils__"] = _proxy_minion.proxy.utils
|
||||
|
||||
# Reload all modules so all dunder variables are injected
|
||||
_proxy_minion.proxy.reload_modules()
|
||||
|
||||
_proxy_minion.connected = True
|
||||
|
||||
_fq_proxyname = proxyopts["proxy"]["proxytype"]
|
||||
|
||||
proxy_init_fn = _proxy_minion.proxy[_fq_proxyname + ".init"]
|
||||
try:
|
||||
proxy_init_fn(proxyopts)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.error(
|
||||
"An exception occured during the initialization of minion %s: %s",
|
||||
minion_id,
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
return {"proxy_minion": None, "proxy_opts": {}}
|
||||
|
||||
# Reload the grains
|
||||
proxy_grains = salt.loader.grains(
|
||||
proxyopts, proxy=_proxy_minion.proxy, context=proxy_context
|
||||
)
|
||||
proxyopts["grains"] = proxy_grains
|
||||
|
||||
if not hasattr(_proxy_minion, "schedule"):
|
||||
_proxy_minion.schedule = salt.utils.schedule.Schedule(
|
||||
proxyopts,
|
||||
_proxy_minion.functions,
|
||||
_proxy_minion.returners,
|
||||
cleanup=[salt.minion.master_event(type="alive")],
|
||||
proxy=_proxy_minion.proxy,
|
||||
new_instance=True,
|
||||
_subprocess_list=_proxy_minion.subprocess_list,
|
||||
)
|
||||
|
||||
# proxy keepalive
|
||||
_proxy_alive_fn = _fq_proxyname + ".alive"
|
||||
if (
|
||||
_proxy_alive_fn in _proxy_minion.proxy
|
||||
and "status.proxy_reconnect" in _proxy_minion.functions
|
||||
and proxyopts.get("proxy_keep_alive", True)
|
||||
):
|
||||
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
|
||||
_proxy_minion.schedule.add_job(
|
||||
{
|
||||
"__proxy_keepalive": {
|
||||
"function": "status.proxy_reconnect",
|
||||
"minutes": proxyopts.get(
|
||||
"proxy_keep_alive_interval", 1
|
||||
), # by default, check once per minute
|
||||
"jid_include": True,
|
||||
"maxrunning": 1,
|
||||
"return_job": False,
|
||||
"kwargs": {"proxy_name": _fq_proxyname},
|
||||
}
|
||||
},
|
||||
persist=True,
|
||||
)
|
||||
_proxy_minion.schedule.enable_schedule()
|
||||
else:
|
||||
_proxy_minion.schedule.delete_job("__proxy_keepalive", persist=True)
|
||||
|
||||
return {"proxy_minion": _proxy_minion, "proxy_opts": proxyopts}
|
||||
|
||||
|
||||
def target(cls, minion_instance, opts, data, connected):
|
||||
"""
|
||||
Handle targeting of the minion.
|
||||
|
@ -1031,9 +1090,30 @@ def tune_in(self, start=True):
|
|||
Lock onto the publisher. This is the main event loop for the minion
|
||||
:rtype : None
|
||||
"""
|
||||
for proxy_id in self.deltaproxy_objs:
|
||||
_proxy_minion = self.deltaproxy_objs[proxy_id]
|
||||
_proxy_minion.setup_scheduler()
|
||||
_proxy_minion.setup_beacons()
|
||||
_proxy_minion._state_run()
|
||||
if self.opts["proxy"].get("parallel_startup"):
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(subproxy_tune_in, self.deltaproxy_objs[proxy_minion])
|
||||
for proxy_minion in self.deltaproxy_objs
|
||||
]
|
||||
|
||||
for f in concurrent.futures.as_completed(futures):
|
||||
_proxy_minion = f.result()
|
||||
log.debug("Tune in for sub proxy %r finished", _proxy_minion.opts.get("id"))
|
||||
else:
|
||||
for proxy_minion in self.deltaproxy_objs:
|
||||
_proxy_minion = subproxy_tune_in(self.deltaproxy_objs[proxy_minion])
|
||||
log.debug("Tune in for sub proxy %r finished", _proxy_minion.opts.get("id"))
|
||||
super(ProxyMinion, self).tune_in(start=start)
|
||||
|
||||
|
||||
def subproxy_tune_in(proxy_minion, start=True):
|
||||
"""
|
||||
Tunein sub proxy minions
|
||||
"""
|
||||
proxy_minion.setup_scheduler()
|
||||
proxy_minion.setup_beacons()
|
||||
proxy_minion.add_periodic_callback("cleanup", proxy_minion.cleanup_subprocesses)
|
||||
proxy_minion._state_run()
|
||||
|
||||
return proxy_minion
|
||||
|
|
|
@ -3683,7 +3683,7 @@ class SyndicManager(MinionBase):
|
|||
# __'s to make sure it doesn't print out on the master cli
|
||||
jdict["__master_id__"] = master
|
||||
ret = {}
|
||||
for key in "return", "retcode", "success":
|
||||
for key in "return", "retcode", "success", "fun_args":
|
||||
if key in data:
|
||||
ret[key] = data[key]
|
||||
jdict[data["id"]] = ret
|
||||
|
@ -3799,6 +3799,16 @@ class ProxyMinion(Minion):
|
|||
mp_call = _metaproxy_call(self.opts, "post_master_init")
|
||||
return mp_call(self, master)
|
||||
|
||||
@salt.ext.tornado.gen.coroutine
|
||||
def subproxy_post_master_init(self, minion_id, uid):
|
||||
"""
|
||||
Function to finish init for the sub proxies
|
||||
|
||||
:rtype : None
|
||||
"""
|
||||
mp_call = _metaproxy_call(self.opts, "subproxy_post_master_init")
|
||||
return mp_call(self, minion_id, uid)
|
||||
|
||||
def tune_in(self, start=True):
|
||||
"""
|
||||
Lock onto the publisher. This is the main event loop for the minion
|
||||
|
|
|
@ -49,10 +49,16 @@ import time
|
|||
import salt.utils.versions
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
logging.getLogger("boto").setLevel(logging.INFO)
|
||||
|
||||
|
||||
try:
|
||||
# pylint: disable=unused-import
|
||||
import boto
|
||||
import boto3 # pylint: disable=unused-import
|
||||
import boto.dynamodb2
|
||||
import botocore
|
||||
|
||||
# pylint: enable=unused-import
|
||||
from boto.dynamodb2.fields import (
|
||||
|
@ -79,12 +85,138 @@ def __virtual__():
|
|||
"""
|
||||
Only load if boto libraries exist.
|
||||
"""
|
||||
has_boto_reqs = salt.utils.versions.check_boto_reqs(check_boto3=False)
|
||||
has_boto_reqs = salt.utils.versions.check_boto_reqs()
|
||||
if has_boto_reqs is True:
|
||||
__utils__["boto.assign_funcs"](__name__, "dynamodb2", pack=__salt__)
|
||||
return has_boto_reqs
|
||||
|
||||
|
||||
def list_tags_of_resource(
|
||||
resource_arn, region=None, key=None, keyid=None, profile=None
|
||||
):
|
||||
"""
|
||||
Returns a dictionary of all tags currently attached to a given resource.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_dynamodb.list_tags_of_resource \
|
||||
resource_arn=arn:aws:dynamodb:us-east-1:012345678901:table/my-table
|
||||
|
||||
.. versionadded:: 3006
|
||||
"""
|
||||
conn3 = __utils__["boto3.get_connection"](
|
||||
"dynamodb", region=region, key=key, keyid=keyid, profile=profile
|
||||
)
|
||||
retries = 10
|
||||
sleep = 6
|
||||
tags = []
|
||||
while retries:
|
||||
try:
|
||||
log.debug("Garnering tags of resource %s", resource_arn)
|
||||
marker = ""
|
||||
while marker is not None:
|
||||
ret = conn3.list_tags_of_resource(
|
||||
ResourceArn=resource_arn, NextToken=marker
|
||||
)
|
||||
tags += ret.get("Tags", [])
|
||||
marker = ret.get("NextToken")
|
||||
return {tag["Key"]: tag["Value"] for tag in tags}
|
||||
except botocore.exceptions.ParamValidationError as err:
|
||||
raise SaltInvocationError(str(err))
|
||||
except botocore.exceptions.ClientError as err:
|
||||
if retries and err.response.get("Error", {}).get("Code") == "Throttling":
|
||||
retries -= 1
|
||||
log.debug("Throttled by AWS API, retrying in %s seconds...", sleep)
|
||||
time.sleep(sleep)
|
||||
continue
|
||||
log.error(
|
||||
"Failed to list tags for resource %s: %s", resource_arn, err.message
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def tag_resource(resource_arn, tags, region=None, key=None, keyid=None, profile=None):
|
||||
"""
|
||||
Sets given tags (provided as list or dict) on the given resource.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_dynamodb.tag_resource \
|
||||
resource_arn=arn:aws:dynamodb:us-east-1:012345678901:table/my-table \
|
||||
tags='{Name: my-table, Owner: Ops}'
|
||||
|
||||
.. versionadded:: 3006
|
||||
"""
|
||||
conn3 = __utils__["boto3.get_connection"](
|
||||
"dynamodb", region=region, key=key, keyid=keyid, profile=profile
|
||||
)
|
||||
retries = 10
|
||||
sleep = 6
|
||||
if isinstance(tags, dict):
|
||||
tags = [{"Key": key, "Value": val} for key, val in tags.items()]
|
||||
while retries:
|
||||
try:
|
||||
log.debug("Setting tags on resource %s", resource_arn)
|
||||
conn3.tag_resource(ResourceArn=resource_arn, Tags=tags)
|
||||
return True
|
||||
except botocore.exceptions.ParamValidationError as err:
|
||||
raise SaltInvocationError(str(err))
|
||||
except botocore.exceptions.ClientError as err:
|
||||
if retries and err.response.get("Error", {}).get("Code") == "Throttling":
|
||||
retries -= 1
|
||||
log.debug("Throttled by AWS API, retrying in %s seconds...", sleep)
|
||||
time.sleep(sleep)
|
||||
continue
|
||||
log.error(
|
||||
"Failed to set tags on resource %s: %s", resource_arn, err.message
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def untag_resource(
|
||||
resource_arn, tag_keys, region=None, key=None, keyid=None, profile=None
|
||||
):
|
||||
"""
|
||||
Removes given tags (provided as list) from the given resource.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_dynamodb.untag_resource \
|
||||
resource_arn=arn:aws:dynamodb:us-east-1:012345678901:table/my-table \
|
||||
tag_keys='[Name, Owner]'
|
||||
|
||||
.. versionadded:: 3006
|
||||
"""
|
||||
conn3 = __utils__["boto3.get_connection"](
|
||||
"dynamodb", region=region, key=key, keyid=keyid, profile=profile
|
||||
)
|
||||
retries = 10
|
||||
sleep = 6
|
||||
while retries:
|
||||
try:
|
||||
log.debug("Removing tags from resource %s", resource_arn)
|
||||
ret = conn3.untag_resource(ResourceArn=resource_arn, TagKeys=tag_keys)
|
||||
return True
|
||||
except botocore.exceptions.ParamValidationError as err:
|
||||
raise SaltInvocationError(str(err))
|
||||
except botocore.exceptions.ClientError as err:
|
||||
if retries and err.response.get("Error", {}).get("Code") == "Throttling":
|
||||
retries -= 1
|
||||
log.debug("Throttled by AWS API, retrying in %s seconds...", sleep)
|
||||
time.sleep(sleep)
|
||||
continue
|
||||
log.error(
|
||||
"Failed to remove tags from resource %s: %s", resource_arn, err.message
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def create_table(
|
||||
table_name,
|
||||
region=None,
|
||||
|
|
|
@ -1222,6 +1222,9 @@ def subvolume_snapshot(source, dest=None, name=None, read_only=False):
|
|||
cmd = ["btrfs", "subvolume", "snapshot"]
|
||||
if read_only:
|
||||
cmd.append("-r")
|
||||
|
||||
cmd.append(source)
|
||||
|
||||
if dest and not name:
|
||||
cmd.append(dest)
|
||||
if dest and name:
|
||||
|
|
|
@ -838,6 +838,8 @@ def _run(
|
|||
stream_stderr=True,
|
||||
)
|
||||
ret["pid"] = proc.pid
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
while proc.has_unread_data:
|
||||
try:
|
||||
try:
|
||||
|
@ -848,12 +850,8 @@ def _run(
|
|||
cstdout, cstderr = "", ""
|
||||
if cstdout:
|
||||
stdout += cstdout
|
||||
else:
|
||||
stdout = ""
|
||||
if cstderr:
|
||||
stderr += cstderr
|
||||
else:
|
||||
stderr = ""
|
||||
if timeout and (time.time() > will_timeout):
|
||||
ret["stderr"] = "SALT: Timeout after {}s\n{}".format(
|
||||
timeout, stderr
|
||||
|
|
|
@ -3,6 +3,7 @@ Module used to access the esxcluster proxy connection methods
|
|||
"""
|
||||
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
|
@ -22,5 +23,27 @@ def __virtual__():
|
|||
return (False, "Must be run on a proxy minion")
|
||||
|
||||
|
||||
def _deprecation_message(function):
|
||||
"""
|
||||
Decorator wrapper to warn about azurearm deprecation
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def wrapped(*args, **kwargs):
|
||||
salt.utils.versions.warn_until(
|
||||
"Argon",
|
||||
"The 'esxcluster' functionality in Salt has been deprecated and its "
|
||||
"functionality will be removed in version 3008 in favor of the "
|
||||
"saltext.vmware Salt Extension. "
|
||||
"(https://github.com/saltstack/salt-ext-modules-vmware)",
|
||||
category=FutureWarning,
|
||||
)
|
||||
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
|
||||
return ret
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
@_deprecation_message
|
||||
def get_details():
|
||||
return __proxy__["esxcluster.get_details"]()
|
||||
|
|
|
@ -3,6 +3,7 @@ Module used to access the esxdatacenter proxy connection methods
|
|||
"""
|
||||
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
|
@ -22,5 +23,27 @@ def __virtual__():
|
|||
return (False, "Must be run on a proxy minion")
|
||||
|
||||
|
||||
def _deprecation_message(function):
|
||||
"""
|
||||
Decorator wrapper to warn about azurearm deprecation
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def wrapped(*args, **kwargs):
|
||||
salt.utils.versions.warn_until(
|
||||
"Argon",
|
||||
"The 'esxdatacenter' functionality in Salt has been deprecated and its "
|
||||
"functionality will be removed in version 3008 in favor of the "
|
||||
"saltext.vmware Salt Extension. "
|
||||
"(https://github.com/saltstack/salt-ext-modules-vmware)",
|
||||
category=FutureWarning,
|
||||
)
|
||||
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
|
||||
return ret
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
@_deprecation_message
|
||||
def get_details():
|
||||
return __proxy__["esxdatacenter.get_details"]()
|
||||
|
|
|
@ -28,6 +28,7 @@ type manor.
|
|||
|
||||
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
|
@ -49,6 +50,28 @@ def __virtual__():
|
|||
)
|
||||
|
||||
|
||||
def _deprecation_message(function):
|
||||
"""
|
||||
Decorator wrapper to warn about azurearm deprecation
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def wrapped(*args, **kwargs):
|
||||
salt.utils.versions.warn_until(
|
||||
"Argon",
|
||||
"The 'esxi' functionality in Salt has been deprecated and its "
|
||||
"functionality will be removed in version 3008 in favor of the "
|
||||
"saltext.vmware Salt Extension. "
|
||||
"(https://github.com/saltstack/salt-ext-modules-vmware)",
|
||||
category=FutureWarning,
|
||||
)
|
||||
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
|
||||
return ret
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
@_deprecation_message
|
||||
def cmd(command, *args, **kwargs):
|
||||
proxy_prefix = __opts__["proxy"]["proxytype"]
|
||||
proxy_cmd = proxy_prefix + ".ch_config"
|
||||
|
@ -56,5 +79,6 @@ def cmd(command, *args, **kwargs):
|
|||
return __proxy__[proxy_cmd](command, *args, **kwargs)
|
||||
|
||||
|
||||
@_deprecation_message
|
||||
def get_details():
|
||||
return __proxy__["esxi.get_details"]()
|
||||
|
|
|
@ -4,6 +4,7 @@ Module used to access the esx proxy connection methods
|
|||
|
||||
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import salt.utils.platform
|
||||
|
||||
|
@ -23,5 +24,27 @@ def __virtual__():
|
|||
return (False, "Must be run on a proxy minion")
|
||||
|
||||
|
||||
def _deprecation_message(function):
|
||||
"""
|
||||
Decorator wrapper to warn about azurearm deprecation
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def wrapped(*args, **kwargs):
|
||||
salt.utils.versions.warn_until(
|
||||
"Argon",
|
||||
"The 'esxvm' functionality in Salt has been deprecated and its "
|
||||
"functionality will be removed in version 3008 in favor of the "
|
||||
"saltext.vmware Salt Extension. "
|
||||
"(https://github.com/saltstack/salt-ext-modules-vmware)",
|
||||
category=FutureWarning,
|
||||
)
|
||||
ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs))
|
||||
return ret
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
@_deprecation_message
|
||||
def get_details():
|
||||
return __proxy__["esxvm.get_details"]()
|
||||
|
|
|
@ -3525,9 +3525,13 @@ def touch(name, atime=None, mtime=None):
|
|||
simply update the atime and mtime if it already does.
|
||||
|
||||
atime:
|
||||
Access time in Unix epoch time
|
||||
Access time in Unix epoch time. Set it to 0 to set atime of the
|
||||
file with Unix date of birth. If this parameter isn't set, atime
|
||||
will be set with current time.
|
||||
mtime:
|
||||
Last modification in Unix epoch time
|
||||
Last modification in Unix epoch time. Set it to 0 to set mtime of
|
||||
the file with Unix date of birth. If this parameter isn't set,
|
||||
mtime will be set with current time.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -3537,20 +3541,20 @@ def touch(name, atime=None, mtime=None):
|
|||
"""
|
||||
name = os.path.expanduser(name)
|
||||
|
||||
if atime and atime.isdigit():
|
||||
if atime and str(atime).isdigit():
|
||||
atime = int(atime)
|
||||
if mtime and mtime.isdigit():
|
||||
if mtime and str(mtime).isdigit():
|
||||
mtime = int(mtime)
|
||||
try:
|
||||
if not os.path.exists(name):
|
||||
with salt.utils.files.fopen(name, "a"):
|
||||
pass
|
||||
|
||||
if not atime and not mtime:
|
||||
if atime is None and mtime is None:
|
||||
times = None
|
||||
elif not mtime and atime:
|
||||
elif mtime is None and atime is not None:
|
||||
times = (atime, time.time())
|
||||
elif not atime and mtime:
|
||||
elif atime is None and mtime is not None:
|
||||
times = (time.time(), mtime)
|
||||
else:
|
||||
times = (atime, mtime)
|
||||
|
|
|
@ -783,16 +783,21 @@ def import_key(text=None, filename=None, user=None, gnupghome=None):
|
|||
|
||||
|
||||
def export_key(
|
||||
keyids=None, secret=False, user=None, gnupghome=None, use_passphrase=False
|
||||
keyids=None,
|
||||
secret=False,
|
||||
user=None,
|
||||
gnupghome=None,
|
||||
use_passphrase=False,
|
||||
output=None,
|
||||
bare=False,
|
||||
):
|
||||
"""
|
||||
Export a key from the GPG keychain
|
||||
|
||||
keyids
|
||||
The key ID(s) of the key(s) to be exported. Can be specified as a comma
|
||||
separated string or a list. Anything which GnuPG itself accepts to
|
||||
identify a key - for example, the key ID or the fingerprint could be
|
||||
used.
|
||||
separated string or a list. Anything which GnuPG itself accepts to identify a key
|
||||
for example, the key ID, fingerprint, user ID or email address could be used.
|
||||
|
||||
secret
|
||||
Export the secret key identified by the ``keyids`` information passed.
|
||||
|
@ -806,11 +811,22 @@ def export_key(
|
|||
Specify the location where GPG keyring and related files are stored.
|
||||
|
||||
use_passphrase
|
||||
Whether to use a passphrase with the signing key. Passphrase is received
|
||||
from Pillar.
|
||||
Whether to use a passphrase to export the secret key.
|
||||
Passphrase is received from Pillar.
|
||||
|
||||
.. versionadded:: 3003
|
||||
|
||||
output
|
||||
The filename where the exported key data will be written to, default is standard out.
|
||||
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
bare
|
||||
If ``True``, return the (armored) exported key block as a string without the
|
||||
standard comment/res dict.
|
||||
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
@ -822,18 +838,40 @@ def export_key(
|
|||
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
|
||||
|
||||
"""
|
||||
ret = {"res": True}
|
||||
gpg = _create_gpg(user, gnupghome)
|
||||
|
||||
if isinstance(keyids, str):
|
||||
keyids = keyids.split(",")
|
||||
|
||||
if use_passphrase:
|
||||
if secret and use_passphrase:
|
||||
gpg_passphrase = __salt__["pillar.get"]("gpg_passphrase")
|
||||
if not gpg_passphrase:
|
||||
raise SaltInvocationError("gpg_passphrase not available in pillar.")
|
||||
ret = gpg.export_keys(keyids, secret, passphrase=gpg_passphrase)
|
||||
result = gpg.export_keys(keyids, secret, passphrase=gpg_passphrase)
|
||||
else:
|
||||
ret = gpg.export_keys(keyids, secret, expect_passphrase=False)
|
||||
result = gpg.export_keys(keyids, secret, expect_passphrase=False)
|
||||
|
||||
if result and output:
|
||||
with salt.utils.files.flopen(output, "w") as fout:
|
||||
fout.write(salt.utils.stringutils.to_str(result))
|
||||
|
||||
if result:
|
||||
if not bare:
|
||||
if output:
|
||||
ret["comment"] = "Exported key data has been written to {}".format(
|
||||
output
|
||||
)
|
||||
else:
|
||||
ret["comment"] = result
|
||||
else:
|
||||
ret = result
|
||||
else:
|
||||
if not bare:
|
||||
ret["res"] = False
|
||||
else:
|
||||
ret = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -1158,6 +1196,7 @@ def encrypt(
|
|||
output=None,
|
||||
sign=None,
|
||||
use_passphrase=False,
|
||||
always_trust=False,
|
||||
gnupghome=None,
|
||||
bare=False,
|
||||
):
|
||||
|
@ -1170,7 +1209,8 @@ def encrypt(
|
|||
``/etc/salt/gpgkeys``.
|
||||
|
||||
recipients
|
||||
The fingerprints for those recipient whom the data is being encrypted for.
|
||||
The key ID, fingerprint, user ID or email address associated with the recipients
|
||||
key can be used.
|
||||
|
||||
text
|
||||
The text to encrypt.
|
||||
|
@ -1186,8 +1226,13 @@ def encrypt(
|
|||
default key or fingerprint to specify a different key to sign with.
|
||||
|
||||
use_passphrase
|
||||
Whether to use a passphrase with the signing key. Passphrase is received
|
||||
from Pillar.
|
||||
Whether to use a passphrase with the signing key.
|
||||
Passphrase is received from Pillar.
|
||||
|
||||
always_trust
|
||||
Skip key validation and assume that used keys are fully trusted.
|
||||
|
||||
.. versionadded:: 3006.0
|
||||
|
||||
gnupghome
|
||||
Specify the location where GPG keyring and related files are stored.
|
||||
|
@ -1204,37 +1249,39 @@ def encrypt(
|
|||
|
||||
salt '*' gpg.encrypt filename='/path/to/important.file' recipients=recipient@example.com
|
||||
|
||||
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True \\
|
||||
salt '*' gpg.encrypt filename='/path/to/important.file' sign=True use_passphrase=True \\
|
||||
recipients=recipient@example.com
|
||||
|
||||
"""
|
||||
ret = {"res": True, "comment": ""}
|
||||
gpg = _create_gpg(user, gnupghome)
|
||||
|
||||
if use_passphrase:
|
||||
if sign and use_passphrase:
|
||||
gpg_passphrase = __salt__["pillar.get"]("gpg_passphrase")
|
||||
if not gpg_passphrase:
|
||||
raise SaltInvocationError("gpg_passphrase not available in pillar.")
|
||||
gpg_passphrase = gpg_passphrase["gpg_passphrase"]
|
||||
else:
|
||||
gpg_passphrase = None
|
||||
|
||||
if text:
|
||||
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
|
||||
result = gpg.encrypt(
|
||||
text,
|
||||
recipients,
|
||||
sign=sign,
|
||||
passphrase=gpg_passphrase,
|
||||
always_trust=always_trust,
|
||||
output=output,
|
||||
)
|
||||
elif filename:
|
||||
with salt.utils.files.flopen(filename, "rb") as _fp:
|
||||
if output:
|
||||
result = gpg.encrypt_file(
|
||||
_fp,
|
||||
recipients,
|
||||
passphrase=gpg_passphrase,
|
||||
output=output,
|
||||
sign=sign,
|
||||
)
|
||||
else:
|
||||
result = gpg.encrypt_file(
|
||||
_fp, recipients, passphrase=gpg_passphrase, sign=sign
|
||||
)
|
||||
result = gpg.encrypt_file(
|
||||
_fp,
|
||||
recipients,
|
||||
sign=sign,
|
||||
passphrase=gpg_passphrase,
|
||||
always_trust=always_trust,
|
||||
output=output,
|
||||
)
|
||||
else:
|
||||
raise SaltInvocationError("filename or text must be passed.")
|
||||
|
||||
|
@ -1254,7 +1301,9 @@ def encrypt(
|
|||
)
|
||||
else:
|
||||
ret = False
|
||||
|
||||
log.error(result.stderr)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -1310,7 +1359,6 @@ def decrypt(
|
|||
gpg_passphrase = __salt__["pillar.get"]("gpg_passphrase")
|
||||
if not gpg_passphrase:
|
||||
raise SaltInvocationError("gpg_passphrase not available in pillar.")
|
||||
gpg_passphrase = gpg_passphrase["gpg_passphrase"]
|
||||
else:
|
||||
gpg_passphrase = None
|
||||
|
||||
|
|
|
@ -302,7 +302,6 @@ def new_set(name=None, set_type=None, family="ipv4", comment=False, **kwargs):
|
|||
IPv6:
|
||||
salt '*' ipset.new_set custom_set list:set family=ipv6
|
||||
"""
|
||||
|
||||
ipset_family = _IPSET_FAMILIES[family]
|
||||
if not name:
|
||||
return "Error: Set Name needs to be specified"
|
||||
|
@ -483,7 +482,7 @@ def add(name=None, entry=None, family="ipv4", **kwargs):
|
|||
|
||||
settype = setinfo["Type"]
|
||||
|
||||
cmd = [_ipset_cmd(), "add", "-exist", name, entry]
|
||||
cmd = [_ipset_cmd(), "add", "-exist", name] + entry.split()
|
||||
|
||||
if "timeout" in kwargs:
|
||||
if "timeout" not in setinfo["Header"]:
|
||||
|
@ -497,7 +496,7 @@ def add(name=None, entry=None, family="ipv4", **kwargs):
|
|||
if "comment" not in setinfo["Header"]:
|
||||
return "Error: Set {} not created with comment support".format(name)
|
||||
if "comment" not in entry:
|
||||
cmd = '{} comment "{}"'.format(cmd, kwargs["comment"])
|
||||
cmd = cmd + ["comment", f"{kwargs['comment']}"]
|
||||
|
||||
if {"skbmark", "skbprio", "skbqueue"} & set(kwargs.keys()):
|
||||
if "skbinfo" not in setinfo["Header"]:
|
||||
|
|
|
@ -364,7 +364,7 @@ def create(domain_name, years, **kwargs):
|
|||
for requiredkey in require_opts:
|
||||
if requiredkey not in opts:
|
||||
log.error("Missing required parameter '%s'", requiredkey)
|
||||
raise Exception("Missing required parameter '" + requiredkey + "'")
|
||||
raise Exception("Missing required parameter '{}'".format(requiredkey))
|
||||
|
||||
response_xml = salt.utils.namecheap.post_request(opts)
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ def __get_certificates(
|
|||
|
||||
if http_dc_validation:
|
||||
validation_tag = sslresult.getElementsByTagName("HttpDCValidation")
|
||||
if validation_tag is not None and len(validation_tag) > 0:
|
||||
if validation_tag:
|
||||
validation_tag = validation_tag[0]
|
||||
|
||||
if validation_tag.getAttribute("ValueAvailable").lower() == "true":
|
||||
|
|
|
@ -219,7 +219,7 @@ def _config_logic(
|
|||
# will discard the config
|
||||
if loaded_result["comment"]:
|
||||
loaded_result["comment"] += "\n"
|
||||
if not len(loaded_result.get("diff", "")) > 0:
|
||||
if not loaded_result.get("diff", ""):
|
||||
loaded_result["already_configured"] = True
|
||||
discarded = _safe_dicard_config(loaded_result, napalm_device)
|
||||
if not discarded["result"]:
|
||||
|
@ -239,7 +239,7 @@ def _config_logic(
|
|||
removed = cancel_commit(commit_jid)
|
||||
log.debug("Cleaned up the commit from the schedule")
|
||||
log.debug(removed["comment"])
|
||||
if len(loaded_result.get("diff", "")) > 0:
|
||||
if loaded_result.get("diff", ""):
|
||||
# if not testing mode
|
||||
# and also the user wants to commit (default)
|
||||
# and there are changes to commit
|
||||
|
|
|
@ -222,7 +222,7 @@ def install(name=None, pkgs=None, sources=None, **kwargs):
|
|||
except MinionError as exc:
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
|
||||
old = list_pkgs()
|
||||
|
|
|
@ -57,7 +57,7 @@ def available(name):
|
|||
salt '*' service.available sshd
|
||||
"""
|
||||
cmd = "{} get {}".format(_cmd(), name)
|
||||
if __salt__["cmd.retcode"](cmd) == 2:
|
||||
if __salt__["cmd.retcode"](cmd, ignore_retcode=True) == 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@ -198,7 +198,7 @@ def status(name, sig=None):
|
|||
return bool(__salt__["status.pid"](sig))
|
||||
|
||||
cmd = "{} check {}".format(_cmd(), name)
|
||||
return not __salt__["cmd.retcode"](cmd)
|
||||
return not __salt__["cmd.retcode"](cmd, ignore_retcode=True)
|
||||
|
||||
|
||||
def enable(name, **kwargs):
|
||||
|
@ -255,7 +255,7 @@ def disabled(name):
|
|||
salt '*' service.disabled <service name>
|
||||
"""
|
||||
cmd = "{} get {} status".format(_cmd(), name)
|
||||
return not __salt__["cmd.retcode"](cmd) == 0
|
||||
return not __salt__["cmd.retcode"](cmd, ignore_retcode=True) == 0
|
||||
|
||||
|
||||
def enabled(name, **kwargs):
|
||||
|
@ -274,7 +274,7 @@ def enabled(name, **kwargs):
|
|||
salt '*' service.enabled <service name> flags=<flags>
|
||||
"""
|
||||
cmd = "{} get {} status".format(_cmd(), name)
|
||||
if not __salt__["cmd.retcode"](cmd):
|
||||
if not __salt__["cmd.retcode"](cmd, ignore_retcode=True):
|
||||
# also consider a service disabled if the current flags are different
|
||||
# than the configured ones so we have a chance to update them
|
||||
flags = _get_flags(**kwargs)
|
||||
|
|
|
@ -209,7 +209,7 @@ def latest_version(*names, **kwargs):
|
|||
"""
|
||||
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
|
||||
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ""
|
||||
|
||||
ret = {}
|
||||
|
@ -504,7 +504,7 @@ def install(
|
|||
to_downgrade = []
|
||||
|
||||
_append_noaction_if_testmode(cmd_prefix, **kwargs)
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
elif pkg_type == "file":
|
||||
if reinstall:
|
||||
|
|
|
@ -59,7 +59,7 @@ def __virtual__():
|
|||
return __virtualname__
|
||||
return (
|
||||
False,
|
||||
"The oracle execution module not loaded: python oracle library not found.",
|
||||
"oracle execution module not loaded: python oracle library not found.",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ def latest_version(*names, **kwargs):
|
|||
"""
|
||||
refresh = salt.utils.data.is_true(kwargs.pop("refresh", False))
|
||||
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ""
|
||||
|
||||
# Refresh before looking for the latest version available
|
||||
|
@ -529,7 +529,7 @@ def install(
|
|||
except MinionError as exc:
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
|
||||
if "root" in kwargs:
|
||||
|
@ -849,7 +849,7 @@ def file_list(*packages, **kwargs):
|
|||
ret = []
|
||||
cmd = ["pacman", "-Ql"]
|
||||
|
||||
if len(packages) > 0 and os.path.exists(packages[0]):
|
||||
if packages and os.path.exists(packages[0]):
|
||||
packages = list(packages)
|
||||
cmd.extend(("-r", packages.pop(0)))
|
||||
|
||||
|
@ -883,7 +883,7 @@ def file_dict(*packages, **kwargs):
|
|||
ret = {}
|
||||
cmd = ["pacman", "-Ql"]
|
||||
|
||||
if len(packages) > 0 and os.path.exists(packages[0]):
|
||||
if packages and os.path.exists(packages[0]):
|
||||
packages = list(packages)
|
||||
cmd.extend(("-r", packages.pop(0)))
|
||||
|
||||
|
|
|
@ -337,7 +337,7 @@ def create_or_update_resource(
|
|||
resource_value = resource.get(k, None)
|
||||
if resource_value is not None and resource_value != v:
|
||||
data_to_update[k] = v
|
||||
if len(data_to_update) > 0:
|
||||
if data_to_update:
|
||||
if __opts__["test"]:
|
||||
return "would update"
|
||||
# flush the resource_cache, because we're modifying a resource
|
||||
|
|
|
@ -538,7 +538,7 @@ def snapshot_name_to_id(name, snap_name, strict=False, runas=None):
|
|||
|
||||
# Return one or more IDs having snap_name or raise an error upon
|
||||
# non-singular names
|
||||
if len(named_ids) == 0:
|
||||
if not named_ids:
|
||||
raise SaltInvocationError(
|
||||
'No snapshots for VM "{}" have name "{}"'.format(name, snap_name)
|
||||
)
|
||||
|
|
|
@ -27,11 +27,22 @@ def _repack_pkgs(pkgs, normalize=True):
|
|||
_normalize_name = __salt__["pkg.normalize_name"]
|
||||
else:
|
||||
_normalize_name = lambda pkgname: pkgname
|
||||
return {
|
||||
|
||||
repacked_pkgs = {
|
||||
_normalize_name(str(x)): str(y) if y is not None else y
|
||||
for x, y in salt.utils.data.repack_dictlist(pkgs).items()
|
||||
}
|
||||
|
||||
# Check if there were collisions in names
|
||||
if len(pkgs) != len(repacked_pkgs):
|
||||
raise SaltInvocationError(
|
||||
"You are passing a list of packages that contains duplicated packages names: {}. This cannot be processed. In case you are targeting different versions of the same package, please target them individually".format(
|
||||
pkgs
|
||||
)
|
||||
)
|
||||
|
||||
return repacked_pkgs
|
||||
|
||||
|
||||
def pack_sources(sources, normalize=True):
|
||||
"""
|
||||
|
@ -184,7 +195,7 @@ def version(*names, **kwargs):
|
|||
ret = {}
|
||||
versions_as_list = salt.utils.data.is_true(kwargs.pop("versions_as_list", False))
|
||||
pkg_glob = False
|
||||
if len(names) != 0:
|
||||
if names:
|
||||
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
|
||||
for name in names:
|
||||
if "*" in name:
|
||||
|
|
|
@ -285,7 +285,7 @@ def latest_version(*names, **kwargs):
|
|||
salt '*' pkg.latest_version <package name> jail=<jail name or id>
|
||||
salt '*' pkg.latest_version <package name> chroot=/path/to/chroot
|
||||
"""
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ""
|
||||
ret = {}
|
||||
|
||||
|
@ -826,7 +826,7 @@ def install(
|
|||
except MinionError as exc:
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
|
||||
env = {}
|
||||
|
|
|
@ -288,7 +288,7 @@ def install(name=None, refresh=False, version=None, pkgs=None, **kwargs):
|
|||
except MinionError as exc:
|
||||
raise CommandExecutionError(exc)
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
|
||||
if pkgs is None and version and len(pkg_params) == 1:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue