mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'develop' of github.com:saltstack/salt into napalm-beacon
This commit is contained in:
commit
40f36c9915
231 changed files with 11465 additions and 3129 deletions
60
.github/CODEOWNERS
vendored
Normal file
60
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
# SALTSTACK CODE OWNERS
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about CODEOWNERS file
|
||||
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
# See https://help.github.com/articles/about-codeowners/
|
||||
# for more info about the CODEOWNERS file
|
||||
|
||||
# Team Boto
|
||||
salt/**/*boto* @saltstack/team-boto
|
||||
|
||||
# Team Core
|
||||
salt/auth/ @saltstack/team-core
|
||||
salt/cache/ @saltstack/team-core
|
||||
salt/cli/ @saltstack/team-core
|
||||
salt/client/* @saltstack/team-core
|
||||
salt/config/* @saltstack/team-core
|
||||
salt/daemons/ @saltstack/team-core
|
||||
salt/pillar/ @saltstack/team-core
|
||||
salt/loader.py @saltstack/team-core
|
||||
salt/payload.py @saltstack/team-core
|
||||
salt/**/master* @saltstack/team-core
|
||||
salt/**/minion* @saltstack/team-core
|
||||
|
||||
# Team Cloud
|
||||
salt/cloud/ @saltstack/team-cloud
|
||||
salt/utils/openstack/ @saltstack/team-cloud
|
||||
salt/utils/aws.py @saltstack/team-cloud
|
||||
salt/**/*cloud* @saltstack/team-cloud
|
||||
|
||||
# Team NetAPI
|
||||
salt/cli/api.py @saltstack/team-netapi
|
||||
salt/client/netapi.py @saltstack/team-netapi
|
||||
salt/netapi/ @saltstack/team-netapi
|
||||
|
||||
# Team Network
|
||||
salt/proxy/ @saltstack/team-proxy
|
||||
|
||||
# Team SPM
|
||||
salt/cli/spm.py @saltstack/team-spm
|
||||
salt/spm/ @saltstack/team-spm
|
||||
|
||||
# Team SSH
|
||||
salt/cli/ssh.py @saltstack/team-ssh
|
||||
salt/client/ssh/ @saltstack/team-ssh
|
||||
salt/runners/ssh.py @saltstack/team-ssh
|
||||
salt/**/thin.py @saltstack/team-ssh
|
||||
|
||||
# Team State
|
||||
salt/state.py @saltstack/team-state
|
||||
|
||||
# Team Transport
|
||||
salt/transport/ @saltstack/team-transport
|
||||
salt/utils/zeromq.py @saltstack/team-transport
|
||||
|
||||
# Team Windows
|
||||
salt/**/*win* @saltstack/team-windows
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
|
@ -1,8 +1,8 @@
|
|||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 1075 is approximately 2 years and 11 months
|
||||
daysUntilStale: 1075
|
||||
# 1030 is approximately 2 years and 10 months
|
||||
daysUntilStale: 1030
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
|
|
@ -373,7 +373,7 @@
|
|||
# interface: eth0
|
||||
# cidr: '10.0.0.0/8'
|
||||
|
||||
# The number of seconds a mine update runs.
|
||||
# The number of minutes between mine updates.
|
||||
#mine_interval: 60
|
||||
|
||||
# Windows platforms lack posix IPC and must rely on slower TCP based inter-
|
||||
|
|
|
@ -706,7 +706,7 @@ Note these can be defined in the pillar for a minion as well.
|
|||
|
||||
Default: ``60``
|
||||
|
||||
The number of seconds a mine update runs.
|
||||
The number of minutes between mine updates.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -2113,6 +2113,41 @@ It will be interpreted as megabytes.
|
|||
|
||||
file_recv_max_size: 100
|
||||
|
||||
.. conf_minion:: pass_to_ext_pillars
|
||||
|
||||
``pass_to_ext_pillars``
|
||||
-----------------------
|
||||
|
||||
Specify a list of configuration keys whose values are to be passed to
|
||||
external pillar functions.
|
||||
|
||||
Suboptions can be specified using the ':' notation (i.e. ``option:suboption``)
|
||||
|
||||
The values are merged and included in the ``extra_minion_data`` optional
|
||||
parameter of the external pillar function. The ``extra_minion_data`` parameter
|
||||
is passed only to the external pillar functions that have it explicitly
|
||||
specified in their definition.
|
||||
|
||||
If the config contains
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opt1: value1
|
||||
opt2:
|
||||
subopt1: value2
|
||||
subopt2: value3
|
||||
|
||||
pass_to_ext_pillars:
|
||||
- opt1
|
||||
- opt2: subopt1
|
||||
|
||||
the ``extra_minion_data`` parameter will be
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'opt1': 'value1',
|
||||
'opt2': {'subopt1': 'value2'}}
|
||||
|
||||
Security Settings
|
||||
=================
|
||||
|
||||
|
|
|
@ -118,3 +118,53 @@ has to be closed after every command.
|
|||
.. code-block:: yaml
|
||||
|
||||
proxy_always_alive: False
|
||||
|
||||
``proxy_merge_pillar_in_opts``
|
||||
------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Wheter the pillar data to be merged into the proxy configuration options.
|
||||
As multiple proxies can run on the same server, we may need different
|
||||
configuration options for each, while there's one single configuration file.
|
||||
The solution is merging the pillar data of each proxy minion into the opts.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
proxy_merge_pillar_in_opts: True
|
||||
|
||||
``proxy_deep_merge_pillar_in_opts``
|
||||
-----------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``False``.
|
||||
|
||||
Deep merge of pillar data into configuration opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_merge_pillar_in_opts_strategy``
|
||||
---------------------------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``smart``.
|
||||
|
||||
The strategy used when merging pillar configuration into opts.
|
||||
This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is
|
||||
enabled.
|
||||
|
||||
``proxy_mines_pillar``
|
||||
----------------------
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Default: ``True``.
|
||||
|
||||
Allow enabling mine details using pillar data. This evaluates the mine
|
||||
configuration under the pillar, for the following regular minion options that
|
||||
are also equally available on the proxy minion: :conf_minion:`mine_interval`,
|
||||
and :conf_minion:`mine_functions`.
|
||||
|
|
|
@ -44,6 +44,7 @@ execution modules
|
|||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch
|
||||
boto_cloudwatch_event
|
||||
|
@ -417,6 +418,7 @@ execution modules
|
|||
test
|
||||
testinframod
|
||||
test_virtual
|
||||
textfsm_mod
|
||||
timezone
|
||||
tls
|
||||
tomcat
|
||||
|
|
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
6
doc/ref/modules/all/salt.modules.boto_cloudfront.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
============================
|
||||
salt.modules.boto_cloudfront
|
||||
============================
|
||||
|
||||
.. automodule:: salt.modules.boto_cloudfront
|
||||
:members:
|
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
5
doc/ref/modules/all/salt.modules.textfsm_mod.rst
Normal file
|
@ -0,0 +1,5 @@
|
|||
salt.modules.textfsm_mod module
|
||||
===============================
|
||||
|
||||
.. automodule:: salt.modules.textfsm_mod
|
||||
:members:
|
|
@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f
|
|||
for chunk in chunks:
|
||||
# The state runtime uses "tags" to track completed jobs, it may
|
||||
# look familiar with the _|-
|
||||
tag = salt.utils.gen_state_tag(chunk)
|
||||
tag = __utils__['state.gen_tag'](chunk)
|
||||
if tag in running:
|
||||
# Already ran the pkg state, skip aggregation
|
||||
continue
|
||||
|
|
|
@ -31,6 +31,7 @@ state modules
|
|||
boto_apigateway
|
||||
boto_asg
|
||||
boto_cfn
|
||||
boto_cloudfront
|
||||
boto_cloudtrail
|
||||
boto_cloudwatch_alarm
|
||||
boto_cloudwatch_event
|
||||
|
@ -179,6 +180,7 @@ state modules
|
|||
netusers
|
||||
network
|
||||
netyang
|
||||
nfs_export
|
||||
nftables
|
||||
npm
|
||||
ntp
|
||||
|
|
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
6
doc/ref/states/all/salt.states.boto_cloudfront.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
===========================
|
||||
salt.states.boto_cloudfront
|
||||
===========================
|
||||
|
||||
.. automodule:: salt.states.boto_cloudfront
|
||||
:members:
|
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
6
doc/ref/states/all/salt.states.nfs_export.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
======================
|
||||
salt.states.nfs_export
|
||||
======================
|
||||
|
||||
.. automodule:: salt.states.nfs_export
|
||||
:members:
|
|
@ -153,7 +153,12 @@ A State Module must return a dict containing the following keys/values:
|
|||
However, if a state is going to fail and this can be determined
|
||||
in test mode without applying the change, ``False`` can be returned.
|
||||
|
||||
- **comment:** A string containing a summary of the result.
|
||||
- **comment:** A list of strings or a single string summarizing the result.
|
||||
Note that support for lists of strings is available as of Salt Oxygen.
|
||||
Lists of strings will be joined with newlines to form the final comment;
|
||||
this is useful to allow multiple comments from subparts of a state.
|
||||
Prefer to keep line lengths short (use multiple lines as needed),
|
||||
and end with punctuation (e.g. a period) to delimit multiple comments.
|
||||
|
||||
The return data can also, include the **pchanges** key, this stands for
|
||||
`predictive changes`. The **pchanges** key informs the State system what
|
||||
|
|
|
@ -263,9 +263,17 @@ against that branch.
|
|||
Release Branches
|
||||
----------------
|
||||
|
||||
For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases.
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
|
||||
Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well.
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
|
|
|
@ -51,6 +51,19 @@ New NaCl Renderer
|
|||
|
||||
A new renderer has been added for encrypted data.
|
||||
|
||||
New support for Cisco UCS Chassis
|
||||
---------------------------------
|
||||
|
||||
The salt proxy minion now allows for control of Cisco USC chassis. See
|
||||
the `cimc` modules for details.
|
||||
|
||||
New salt-ssh roster
|
||||
-------------------
|
||||
|
||||
A new roster has been added that allows users to pull in a list of hosts
|
||||
for salt-ssh targeting from a ~/.ssh configuration. For full details,
|
||||
please see the `sshconfig` roster.
|
||||
|
||||
New GitFS Features
|
||||
------------------
|
||||
|
||||
|
@ -110,9 +123,15 @@ Support has been added to the ``virtual`` grain for detecting Solaris LDOMs
|
|||
running on T-Series SPARC hardware. The ``virtual_subtype`` grain is
|
||||
populated as a list of domain roles.
|
||||
|
||||
Lists of comments in state returns
|
||||
----------------------------------
|
||||
|
||||
State functions can now return a list of strings for the ``comment`` field,
|
||||
as opposed to only a single string.
|
||||
This is meant to ease writing states with multiple or multi-part comments.
|
||||
|
||||
Beacon configuration changes
|
||||
----------------------------------------
|
||||
----------------------------
|
||||
|
||||
In order to remain consistent and to align with other Salt components such as states,
|
||||
support for configuring beacons using dictionary based configuration has been deprecated
|
||||
|
@ -734,3 +753,7 @@ Other Miscellaneous Deprecations
|
|||
The ``version.py`` file had the following changes:
|
||||
|
||||
- The ``rc_info`` function was removed. Please use ``pre_info`` instead.
|
||||
|
||||
Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be
|
||||
used instead. The removal of these warnings does not have a behavior change. Only
|
||||
the warning text was removed.
|
||||
|
|
|
@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file
|
|||
:param bool allusers: This parameter is specific to `.msi` installations. It
|
||||
tells `msiexec` to install the software for all users. The default is True.
|
||||
|
||||
:param bool cache_dir: If true, the entire directory where the installer resides
|
||||
will be recursively cached. This is useful for installers that depend on
|
||||
other files in the same directory for installation.
|
||||
:param bool cache_dir: If true when installer URL begins with salt://, the
|
||||
entire directory where the installer resides will be recursively cached.
|
||||
This is useful for installers that depend on other files in the same
|
||||
directory for installation.
|
||||
|
||||
.. note:: Only applies to salt: installer URLs.
|
||||
:param str cache_file:
|
||||
When installer URL begins with salt://, this indicates single file to copy
|
||||
down for use with the installer. Copied to the same location as the
|
||||
installer. Use this over ``cache_dir`` if there are many files in the
|
||||
directory and you only need a specific file and don't want to cache
|
||||
additional files that may reside in the installer directory.
|
||||
|
||||
Here's an example for a software package that has dependent files:
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ if Defined x (
|
|||
if %Python%==2 (
|
||||
Set "PyDir=C:\Python27"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
)
|
||||
Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts"
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
|
|||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower())))
|
|||
|
||||
#==============================================================================
|
||||
# Update PIP and SetupTools
|
||||
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
|
||||
|
@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
|
|||
|
||||
#==============================================================================
|
||||
# Install pypi resources using pip
|
||||
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
|
||||
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
|
||||
#==============================================================================
|
||||
Write-Output " ----------------------------------------------------------------"
|
||||
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
|
||||
|
@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
|
|||
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
|
||||
}
|
||||
|
||||
#==============================================================================
|
||||
# Move PyWin32 DLL's to site-packages\win32
|
||||
#==============================================================================
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
|
||||
|
||||
# Remove pythonwin directory
|
||||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Install PyYAML with CLoader
|
||||
# This has to be a compiled binary to get the CLoader
|
||||
|
|
|
@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
|
|||
DownloadFileWithProgress $url $file
|
||||
|
||||
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
|
||||
$p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru
|
||||
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i
|
|||
|
||||
# Move DLL's to Python Root
|
||||
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
|
||||
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
|
||||
|
||||
# Remove pywin32_system32 directory
|
||||
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
|
||||
|
@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"
|
|||
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
|
||||
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse
|
||||
|
||||
# Remove PyWin32 PostInstall and testall Scripts
|
||||
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
|
||||
Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse
|
||||
|
||||
#==============================================================================
|
||||
# Fix PyCrypto
|
||||
#==============================================================================
|
||||
|
|
|
@ -56,7 +56,7 @@ if %Python%==2 (
|
|||
Set "PyVerMajor=2"
|
||||
Set "PyVerMinor=7"
|
||||
) else (
|
||||
Set "PyDir=C:\Program Files\Python35"
|
||||
Set "PyDir=C:\Python35"
|
||||
Set "PyVerMajor=3"
|
||||
Set "PyVerMinor=5"
|
||||
)
|
||||
|
|
|
@ -16,9 +16,10 @@ if %errorLevel%==0 (
|
|||
)
|
||||
echo.
|
||||
|
||||
:CheckPython2
|
||||
if exist "\Python27" goto RemovePython2
|
||||
if exist "\Program Files\Python35" goto RemovePython3
|
||||
goto eof
|
||||
|
||||
goto CheckPython3
|
||||
|
||||
:RemovePython2
|
||||
rem Uninstall Python 2.7
|
||||
|
@ -47,25 +48,30 @@ goto eof
|
|||
|
||||
goto eof
|
||||
|
||||
:CheckPython3
|
||||
if exist "\Python35" goto RemovePython3
|
||||
|
||||
goto eof
|
||||
|
||||
:RemovePython3
|
||||
echo %0 :: Uninstalling Python 3 ...
|
||||
echo ---------------------------------------------------------------------
|
||||
:: 64 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
|
||||
echo %0 :: - 3.5.3 64bit
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
|
||||
)
|
||||
|
||||
:: 32 bit
|
||||
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
|
||||
echo %0 :: - 3.5.3 32bit
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall
|
||||
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
|
||||
)
|
||||
|
||||
rem wipe the Python directory
|
||||
echo %0 :: Removing the C:\Program Files\Python35 Directory ...
|
||||
echo %0 :: Removing the C:\Python35 Directory ...
|
||||
echo ---------------------------------------------------------------------
|
||||
rd /s /q "C:\Program Files\Python35"
|
||||
rd /s /q "C:\Python35"
|
||||
if %errorLevel%==0 (
|
||||
echo Successful
|
||||
) else (
|
||||
|
|
|
@ -44,7 +44,7 @@ ${StrStrAdv}
|
|||
!define CPUARCH "x86"
|
||||
!endif
|
||||
|
||||
; Part of the Trim function for Strings
|
||||
# Part of the Trim function for Strings
|
||||
!define Trim "!insertmacro Trim"
|
||||
!macro Trim ResultVar String
|
||||
Push "${String}"
|
||||
|
@ -61,27 +61,27 @@ ${StrStrAdv}
|
|||
!define MUI_UNICON "salt.ico"
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp"
|
||||
|
||||
; Welcome page
|
||||
# Welcome page
|
||||
!insertmacro MUI_PAGE_WELCOME
|
||||
|
||||
; License page
|
||||
# License page
|
||||
!insertmacro MUI_PAGE_LICENSE "LICENSE.txt"
|
||||
|
||||
; Configure Minion page
|
||||
# Configure Minion page
|
||||
Page custom pageMinionConfig pageMinionConfig_Leave
|
||||
|
||||
; Instfiles page
|
||||
# Instfiles page
|
||||
!insertmacro MUI_PAGE_INSTFILES
|
||||
|
||||
; Finish page (Customized)
|
||||
# Finish page (Customized)
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave
|
||||
!insertmacro MUI_PAGE_FINISH
|
||||
|
||||
; Uninstaller pages
|
||||
# Uninstaller pages
|
||||
!insertmacro MUI_UNPAGE_INSTFILES
|
||||
|
||||
; Language files
|
||||
# Language files
|
||||
!insertmacro MUI_LANGUAGE "English"
|
||||
|
||||
|
||||
|
@ -201,8 +201,8 @@ ShowInstDetails show
|
|||
ShowUnInstDetails show
|
||||
|
||||
|
||||
; Check and install Visual C++ redist packages
|
||||
; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
# Check and install Visual C++ redist packages
|
||||
# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info
|
||||
Section -Prerequisites
|
||||
|
||||
Var /GLOBAL VcRedistName
|
||||
|
@ -211,12 +211,12 @@ Section -Prerequisites
|
|||
Var /Global CheckVcRedist
|
||||
StrCpy $CheckVcRedist "False"
|
||||
|
||||
; Visual C++ 2015 redist packages
|
||||
# Visual C++ 2015 redist packages
|
||||
!define PY3_VC_REDIST_NAME "VC_Redist_2015"
|
||||
!define PY3_VC_REDIST_X64_GUID "{50A2BC33-C9CD-3BF1-A8FF-53C10A0B183C}"
|
||||
!define PY3_VC_REDIST_X86_GUID "{BBF2AC74-720C-3CB3-8291-5E34039232FA}"
|
||||
|
||||
; Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
# Visual C++ 2008 SP1 MFC Security Update redist packages
|
||||
!define PY2_VC_REDIST_NAME "VC_Redist_2008_SP1_MFC"
|
||||
!define PY2_VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}"
|
||||
!define PY2_VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}"
|
||||
|
@ -239,7 +239,7 @@ Section -Prerequisites
|
|||
StrCpy $VcRedistGuid ${PY2_VC_REDIST_X86_GUID}
|
||||
${EndIf}
|
||||
|
||||
; VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
# VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below
|
||||
${If} ${AtMostWin2008R2}
|
||||
StrCpy $CheckVcRedist "True"
|
||||
${EndIf}
|
||||
|
@ -255,20 +255,41 @@ Section -Prerequisites
|
|||
"$VcRedistName is currently not installed. Would you like to install?" \
|
||||
/SD IDYES IDNO endVcRedist
|
||||
|
||||
ClearErrors
|
||||
; The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
# The Correct version of VCRedist is copied over by "build_pkg.bat"
|
||||
SetOutPath "$INSTDIR\"
|
||||
File "..\prereqs\vcredist.exe"
|
||||
; /passive used by 2015 installer
|
||||
; /qb! used by 2008 installer
|
||||
; It just ignores the unrecognized switches...
|
||||
ExecWait "$INSTDIR\vcredist.exe /qb! /passive"
|
||||
IfErrors 0 endVcRedist
|
||||
# If an output variable is specified ($0 in the case below),
|
||||
# ExecWait sets the variable with the exit code (and only sets the
|
||||
# error flag if an error occurs; if an error occurs, the contents
|
||||
# of the user variable are undefined).
|
||||
# http://nsis.sourceforge.net/Reference/ExecWait
|
||||
# /passive used by 2015 installer
|
||||
# /qb! used by 2008 installer
|
||||
# It just ignores the unrecognized switches...
|
||||
ClearErrors
|
||||
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
|
||||
IfErrors 0 CheckVcRedistErrorCode
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed to install. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
Goto endVcRedist
|
||||
|
||||
CheckVcRedistErrorCode:
|
||||
# Check for Reboot Error Code (3010)
|
||||
${If} $0 == 3010
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName installed but requires a restart to complete." \
|
||||
/SD IDOK
|
||||
|
||||
# Check for any other errors
|
||||
${ElseIfNot} $0 == 0
|
||||
MessageBox MB_OK \
|
||||
"$VcRedistName failed with ErrorCode: $0. Try installing the package manually." \
|
||||
/SD IDOK
|
||||
${EndIf}
|
||||
|
||||
endVcRedist:
|
||||
|
||||
${EndIf}
|
||||
|
||||
${EndIf}
|
||||
|
@ -294,12 +315,12 @@ Function .onInit
|
|||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
; Check for existing installation
|
||||
# Check for existing installation
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" checkOther
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
|
@ -307,12 +328,12 @@ Function .onInit
|
|||
Abort
|
||||
|
||||
checkOther:
|
||||
; Check for existing installation of full salt
|
||||
# Check for existing installation of full salt
|
||||
ReadRegStr $R0 HKLM \
|
||||
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \
|
||||
"UninstallString"
|
||||
StrCmp $R0 "" skipUninstall
|
||||
; Found existing installation, prompt to uninstall
|
||||
# Found existing installation, prompt to uninstall
|
||||
MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \
|
||||
"${PRODUCT_NAME_OTHER} is already installed.$\n$\n\
|
||||
Click `OK` to remove the existing installation." \
|
||||
|
@ -321,22 +342,22 @@ Function .onInit
|
|||
|
||||
uninst:
|
||||
|
||||
; Get current Silent status
|
||||
# Get current Silent status
|
||||
StrCpy $R0 0
|
||||
${If} ${Silent}
|
||||
StrCpy $R0 1
|
||||
${EndIf}
|
||||
|
||||
; Turn on Silent mode
|
||||
# Turn on Silent mode
|
||||
SetSilent silent
|
||||
|
||||
; Don't remove all directories
|
||||
# Don't remove all directories
|
||||
StrCpy $DeleteInstallDir 0
|
||||
|
||||
; Uninstall silently
|
||||
# Uninstall silently
|
||||
Call uninstallSalt
|
||||
|
||||
; Set it back to Normal mode, if that's what it was before
|
||||
# Set it back to Normal mode, if that's what it was before
|
||||
${If} $R0 == 0
|
||||
SetSilent normal
|
||||
${EndIf}
|
||||
|
@ -350,7 +371,7 @@ Section -Post
|
|||
|
||||
WriteUninstaller "$INSTDIR\uninst.exe"
|
||||
|
||||
; Uninstall Registry Entries
|
||||
# Uninstall Registry Entries
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"DisplayName" "$(^Name)"
|
||||
WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
|
@ -366,19 +387,19 @@ Section -Post
|
|||
WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \
|
||||
"DependOnService" "nsi"
|
||||
|
||||
; Set the estimated size
|
||||
# Set the estimated size
|
||||
${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2
|
||||
IntFmt $0 "0x%08X" $0
|
||||
WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \
|
||||
"EstimatedSize" "$0"
|
||||
|
||||
; Commandline Registry Entries
|
||||
# Commandline Registry Entries
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat"
|
||||
WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\"
|
||||
|
||||
; Register the Salt-Minion Service
|
||||
# Register the Salt-Minion Service
|
||||
nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet"
|
||||
nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com"
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START"
|
||||
|
@ -398,12 +419,12 @@ SectionEnd
|
|||
|
||||
Function .onInstSuccess
|
||||
|
||||
; If StartMinionDelayed is 1, then set the service to start delayed
|
||||
# If StartMinionDelayed is 1, then set the service to start delayed
|
||||
${If} $StartMinionDelayed == 1
|
||||
nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START"
|
||||
${EndIf}
|
||||
|
||||
; If start-minion is 1, then start the service
|
||||
# If start-minion is 1, then start the service
|
||||
${If} $StartMinion == 1
|
||||
nsExec::Exec 'net start salt-minion'
|
||||
${EndIf}
|
||||
|
@ -413,10 +434,11 @@ FunctionEnd
|
|||
|
||||
Function un.onInit
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Uninstaller: Remove Installation Directory
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/delete-install-dir" $R1
|
||||
IfErrors delete_install_dir_not_found
|
||||
StrCpy $DeleteInstallDir 1
|
||||
|
@ -434,7 +456,7 @@ Section Uninstall
|
|||
|
||||
Call un.uninstallSalt
|
||||
|
||||
; Remove C:\salt from the Path
|
||||
# Remove C:\salt from the Path
|
||||
Push "C:\salt"
|
||||
Call un.RemoveFromPath
|
||||
|
||||
|
@ -444,27 +466,27 @@ SectionEnd
|
|||
!macro uninstallSalt un
|
||||
Function ${un}uninstallSalt
|
||||
|
||||
; Make sure we're in the right directory
|
||||
# Make sure we're in the right directory
|
||||
${If} $INSTDIR == "c:\salt\bin\Scripts"
|
||||
StrCpy $INSTDIR "C:\salt"
|
||||
${EndIf}
|
||||
|
||||
; Stop and Remove salt-minion service
|
||||
# Stop and Remove salt-minion service
|
||||
nsExec::Exec 'net stop salt-minion'
|
||||
nsExec::Exec 'sc delete salt-minion'
|
||||
|
||||
; Stop and remove the salt-master service
|
||||
# Stop and remove the salt-master service
|
||||
nsExec::Exec 'net stop salt-master'
|
||||
nsExec::Exec 'sc delete salt-master'
|
||||
|
||||
; Remove files
|
||||
# Remove files
|
||||
Delete "$INSTDIR\uninst.exe"
|
||||
Delete "$INSTDIR\nssm.exe"
|
||||
Delete "$INSTDIR\salt*"
|
||||
Delete "$INSTDIR\vcredist.exe"
|
||||
RMDir /r "$INSTDIR\bin"
|
||||
|
||||
; Remove Registry entries
|
||||
# Remove Registry entries
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}"
|
||||
|
@ -474,17 +496,17 @@ Function ${un}uninstallSalt
|
|||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}"
|
||||
DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}"
|
||||
|
||||
; Automatically close when finished
|
||||
# Automatically close when finished
|
||||
SetAutoClose true
|
||||
|
||||
; Prompt to remove the Installation directory
|
||||
# Prompt to remove the Installation directory
|
||||
${IfNot} $DeleteInstallDir == 1
|
||||
MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \
|
||||
"Would you like to completely remove $INSTDIR and all of its contents?" \
|
||||
/SD IDNO IDNO finished
|
||||
${EndIf}
|
||||
|
||||
; Make sure you're not removing Program Files
|
||||
# Make sure you're not removing Program Files
|
||||
${If} $INSTDIR != 'Program Files'
|
||||
${AndIf} $INSTDIR != 'Program Files (x86)'
|
||||
RMDir /r "$INSTDIR"
|
||||
|
@ -526,7 +548,7 @@ FunctionEnd
|
|||
|
||||
Function Trim
|
||||
|
||||
Exch $R1 ; Original string
|
||||
Exch $R1 # Original string
|
||||
Push $R2
|
||||
|
||||
Loop:
|
||||
|
@ -558,36 +580,36 @@ Function Trim
|
|||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; StrStr Function
|
||||
; - find substring in a string
|
||||
;
|
||||
; Usage:
|
||||
; Push "this is some string"
|
||||
; Push "some"
|
||||
; Call StrStr
|
||||
; Pop $0 ; "some string"
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
#
|
||||
# Usage:
|
||||
# Push "this is some string"
|
||||
# Push "some"
|
||||
# Call StrStr
|
||||
# Pop $0 ; "some string"
|
||||
#------------------------------------------------------------------------------
|
||||
!macro StrStr un
|
||||
Function ${un}StrStr
|
||||
|
||||
Exch $R1 ; $R1=substring, stack=[old$R1,string,...]
|
||||
Exch ; stack=[string,old$R1,...]
|
||||
Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 ; $R3=strlen(substring)
|
||||
Push $R4 ; $R4=count
|
||||
Push $R5 ; $R5=tmp
|
||||
StrLen $R3 $R1 ; Get the length of the Search String
|
||||
StrCpy $R4 0 ; Set the counter to 0
|
||||
Exch $R1 # $R1=substring, stack=[old$R1,string,...]
|
||||
Exch # stack=[string,old$R1,...]
|
||||
Exch $R2 # $R2=string, stack=[old$R2,old$R1,...]
|
||||
Push $R3 # $R3=strlen(substring)
|
||||
Push $R4 # $R4=count
|
||||
Push $R5 # $R5=tmp
|
||||
StrLen $R3 $R1 # Get the length of the Search String
|
||||
StrCpy $R4 0 # Set the counter to 0
|
||||
|
||||
loop:
|
||||
StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is
|
||||
; the size of the length of the search string
|
||||
StrCmp $R5 $R1 done ; Is the contents of the window the same as
|
||||
; search string, then done
|
||||
StrCmp $R5 "" done ; Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 ; Shift the windows one character
|
||||
Goto loop ; Repeat
|
||||
StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is
|
||||
# the size of the length of the search string
|
||||
StrCmp $R5 $R1 done # Is the contents of the window the same as
|
||||
# search string, then done
|
||||
StrCmp $R5 "" done # Is the window empty, then done
|
||||
IntOp $R4 $R4 + 1 # Shift the windows one character
|
||||
Goto loop # Repeat
|
||||
|
||||
done:
|
||||
StrCpy $R1 $R2 "" $R4
|
||||
|
@ -595,7 +617,7 @@ Function ${un}StrStr
|
|||
Pop $R4
|
||||
Pop $R3
|
||||
Pop $R2
|
||||
Exch $R1 ; $R1=old$R1, stack=[result,...]
|
||||
Exch $R1 # $R1=old$R1, stack=[result,...]
|
||||
|
||||
FunctionEnd
|
||||
!macroend
|
||||
|
@ -603,74 +625,74 @@ FunctionEnd
|
|||
!insertmacro StrStr "un."
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; AddToPath Function
|
||||
; - Adds item to Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call AddToPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# AddToPath Function
|
||||
# - Adds item to Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call AddToPath
|
||||
#------------------------------------------------------------------------------
|
||||
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
|
||||
Function AddToPath
|
||||
|
||||
Exch $0 ; Path to add
|
||||
Push $1 ; Current Path
|
||||
Push $2 ; Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 ; Handle to Reg / Length of Path
|
||||
Push $4 ; Result of Registry Call
|
||||
Exch $0 # Path to add
|
||||
Push $1 # Current Path
|
||||
Push $2 # Results of StrStr / Length of Path + Path to Add
|
||||
Push $3 # Handle to Reg / Length of Path
|
||||
Push $4 # Result of Registry Call
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
"You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \
|
||||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Check if already in PATH
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0;" ; The string to find
|
||||
# Check if already in PATH
|
||||
Push "$1;" # The string to search
|
||||
Push "$0;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result of the search
|
||||
StrCmp $2 "" 0 done ; String not found, try again with ';' at the end
|
||||
; Otherwise, it's already in the path
|
||||
Push "$1;" ; The string to search
|
||||
Push "$0\;" ; The string to find
|
||||
Pop $2 # The result of the search
|
||||
StrCmp $2 "" 0 done # String not found, try again with ';' at the end
|
||||
# Otherwise, it's already in the path
|
||||
Push "$1;" # The string to search
|
||||
Push "$0\;" # The string to find
|
||||
Call StrStr
|
||||
Pop $2 ; The result
|
||||
StrCmp $2 "" 0 done ; String not found, continue (add)
|
||||
; Otherwise, it's already in the path
|
||||
Pop $2 # The result
|
||||
StrCmp $2 "" 0 done # String not found, continue (add)
|
||||
# Otherwise, it's already in the path
|
||||
|
||||
; Prevent NSIS string overflow
|
||||
StrLen $2 $0 ; Length of path to add ($2)
|
||||
StrLen $3 $1 ; Length of current path ($3)
|
||||
IntOp $2 $2 + $3 ; Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 ; Account for the additional ';'
|
||||
; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
# Prevent NSIS string overflow
|
||||
StrLen $2 $0 # Length of path to add ($2)
|
||||
StrLen $3 $1 # Length of current path ($3)
|
||||
IntOp $2 $2 + $3 # Length of current path + path to add ($2)
|
||||
IntOp $2 $2 + 2 # Account for the additional ';'
|
||||
# $2 = strlen(dir) + strlen(PATH) + sizeof(";")
|
||||
|
||||
; Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
# Make sure the new length isn't over the NSIS_MAX_STRLEN
|
||||
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
|
||||
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
|
||||
MessageBox MB_OK \
|
||||
|
@ -678,18 +700,18 @@ Function AddToPath
|
|||
/SD IDOK
|
||||
Goto done
|
||||
|
||||
; Append dir to PATH
|
||||
# Append dir to PATH
|
||||
DetailPrint "Add to PATH: $0"
|
||||
StrCpy $2 $1 1 -1 ; Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 ; Check for trailing ';'
|
||||
StrCpy $1 $1 -1 ; remove trailing ';'
|
||||
StrCmp $1 "" +2 ; Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" ; Append new path at the end ($0)
|
||||
StrCpy $2 $1 1 -1 # Copy the last character of the existing path
|
||||
StrCmp $2 ";" 0 +2 # Check for trailing ';'
|
||||
StrCpy $1 $1 -1 # remove trailing ';'
|
||||
StrCmp $1 "" +2 # Make sure Path is not empty
|
||||
StrCpy $0 "$1;$0" # Append new path at the end ($0)
|
||||
|
||||
; We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
# We can use the NSIS command here. Only 'ReadRegStr' is affected
|
||||
WriteRegExpandStr ${Environ} "PATH" $0
|
||||
|
||||
; Broadcast registry change to open programs
|
||||
# Broadcast registry change to open programs
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
|
@ -702,16 +724,16 @@ Function AddToPath
|
|||
FunctionEnd
|
||||
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; RemoveFromPath Function
|
||||
; - Removes item from Path for All Users
|
||||
; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
; Windows Commands
|
||||
;
|
||||
; Usage:
|
||||
; Push "C:\path\to\add"
|
||||
; Call un.RemoveFromPath
|
||||
;------------------------------------------------------------------------------
|
||||
#------------------------------------------------------------------------------
|
||||
# RemoveFromPath Function
|
||||
# - Removes item from Path for All Users
|
||||
# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native
|
||||
# Windows Commands
|
||||
#
|
||||
# Usage:
|
||||
# Push "C:\path\to\add"
|
||||
# Call un.RemoveFromPath
|
||||
#------------------------------------------------------------------------------
|
||||
Function un.RemoveFromPath
|
||||
|
||||
Exch $0
|
||||
|
@ -722,59 +744,59 @@ Function un.RemoveFromPath
|
|||
Push $5
|
||||
Push $6
|
||||
|
||||
; Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
# Open a handle to the key in the registry, handle in $3, Error in $4
|
||||
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
|
||||
; Make sure registry handle opened successfully (returned 0)
|
||||
# Make sure registry handle opened successfully (returned 0)
|
||||
IntCmp $4 0 0 done done
|
||||
|
||||
; Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
# Load the contents of path into $1, Error Code into $4, Path length into $2
|
||||
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
|
||||
|
||||
; Close the handle to the registry ($3)
|
||||
# Close the handle to the registry ($3)
|
||||
System::Call "advapi32::RegCloseKey(i $3)"
|
||||
|
||||
; Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
|
||||
# Check for Error Code 234, Path too long for the variable
|
||||
IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA
|
||||
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
|
||||
Goto done
|
||||
|
||||
; If no error, continue
|
||||
IntCmp $4 0 +5 ; $4 != NO_ERROR
|
||||
; Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
|
||||
# If no error, continue
|
||||
IntCmp $4 0 +5 # $4 != NO_ERROR
|
||||
# Error 2 means the Key was not found
|
||||
IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND
|
||||
DetailPrint "AddToPath: unexpected error code $4"
|
||||
Goto done
|
||||
StrCpy $1 ""
|
||||
|
||||
; Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 ; Copy the last character of the path
|
||||
StrCmp $5 ";" +2 ; Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" ; ensure trailing ';'
|
||||
# Ensure there's a trailing ';'
|
||||
StrCpy $5 $1 1 -1 # Copy the last character of the path
|
||||
StrCmp $5 ";" +2 # Check for trailing ';', if found continue
|
||||
StrCpy $1 "$1;" # ensure trailing ';'
|
||||
|
||||
; Check for our directory inside the path
|
||||
Push $1 ; String to Search
|
||||
Push "$0;" ; Dir to Find
|
||||
# Check for our directory inside the path
|
||||
Push $1 # String to Search
|
||||
Push "$0;" # Dir to Find
|
||||
Call un.StrStr
|
||||
Pop $2 ; The results of the search
|
||||
StrCmp $2 "" done ; If results are empty, we're done, otherwise continue
|
||||
Pop $2 # The results of the search
|
||||
StrCmp $2 "" done # If results are empty, we're done, otherwise continue
|
||||
|
||||
; Remove our Directory from the Path
|
||||
# Remove our Directory from the Path
|
||||
DetailPrint "Remove from PATH: $0"
|
||||
StrLen $3 "$0;" ; Get the length of our dir ($3)
|
||||
StrLen $4 $2 ; Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" ; Combine $5 and $6
|
||||
StrLen $3 "$0;" # Get the length of our dir ($3)
|
||||
StrLen $4 $2 # Get the length of the return from StrStr ($4)
|
||||
StrCpy $5 $1 -$4 # $5 is now the part before the path to remove
|
||||
StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove
|
||||
StrCpy $3 "$5$6" # Combine $5 and $6
|
||||
|
||||
; Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 ; Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 ; Check for ';'
|
||||
StrCpy $3 $3 -1 ; remove trailing ';'
|
||||
# Check for Trailing ';'
|
||||
StrCpy $5 $3 1 -1 # Load the last character of the string
|
||||
StrCmp $5 ";" 0 +2 # Check for ';'
|
||||
StrCpy $3 $3 -1 # remove trailing ';'
|
||||
|
||||
; Write the new path to the registry
|
||||
# Write the new path to the registry
|
||||
WriteRegExpandStr ${Environ} "PATH" $3
|
||||
|
||||
; Broadcast the change to all open applications
|
||||
# Broadcast the change to all open applications
|
||||
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
|
||||
|
||||
done:
|
||||
|
@ -808,6 +830,7 @@ Function getMinionConfig
|
|||
confFound:
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
|
@ -838,68 +861,69 @@ FunctionEnd
|
|||
Function updateMinionConfig
|
||||
|
||||
ClearErrors
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading
|
||||
GetTempFileName $R0 ; get new temp file name
|
||||
FileOpen $1 $R0 "w" ; open temp file for writing
|
||||
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
|
||||
GetTempFileName $R0 # get new temp file name
|
||||
FileOpen $1 $R0 "w" # open temp file for writing
|
||||
|
||||
loop: ; loop through each line
|
||||
FileRead $0 $2 ; read line from target file
|
||||
IfErrors done ; end if errors are encountered (end of line)
|
||||
loop: # loop through each line
|
||||
FileRead $0 $2 # read line from target file
|
||||
IfErrors done # end if errors are encountered (end of line)
|
||||
|
||||
${If} $MasterHost_State != "" ; if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MasterHost_State != "" # if master is empty
|
||||
${AndIf} $MasterHost_State != "salt" # and if master is not 'salt'
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
${If} $MinionName_State != "" ; if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line
|
||||
${If} $3 == 0 ; is it in the first...
|
||||
${OrIf} $3 == 1 ; or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" ; change line
|
||||
${EndIf} ; close if statement
|
||||
${EndIf} ; close if statement
|
||||
${If} $MinionName_State != "" # if minion is empty
|
||||
${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname'
|
||||
${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or the second position (account for comments)
|
||||
StrCpy $2 "id: $MinionName_State$\r$\n" # change line
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
FileWrite $1 $2 ; write changed or unchanged line to temp file
|
||||
FileWrite $1 $2 # write changed or unchanged line to temp file
|
||||
Goto loop
|
||||
|
||||
done:
|
||||
FileClose $0 ; close target file
|
||||
FileClose $1 ; close temp file
|
||||
Delete "$INSTDIR\conf\minion" ; delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file
|
||||
Delete $R0 ; delete temp file
|
||||
FileClose $0 # close target file
|
||||
FileClose $1 # close temp file
|
||||
Delete "$INSTDIR\conf\minion" # delete target file
|
||||
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
|
||||
Delete $R0 # delete temp file
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function parseCommandLineSwitches
|
||||
|
||||
; Load the parameters
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
; Check for start-minion switches
|
||||
; /start-service is to be deprecated, so we must check for both
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
${GetOptions} $R0 "/start-minion=" $R2
|
||||
|
||||
# Service: Start Salt Minion
|
||||
${IfNot} $R2 == ""
|
||||
; If start-minion was passed something, then set it
|
||||
# If start-minion was passed something, then set it
|
||||
StrCpy $StartMinion $R2
|
||||
${ElseIfNot} $R1 == ""
|
||||
; If start-service was passed something, then set StartMinion to that
|
||||
# If start-service was passed something, then set StartMinion to that
|
||||
StrCpy $StartMinion $R1
|
||||
${Else}
|
||||
; Otherwise default to 1
|
||||
# Otherwise default to 1
|
||||
StrCpy $StartMinion 1
|
||||
${EndIf}
|
||||
|
||||
# Service: Minion Startup Type Delayed
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/start-minion-delayed" $R1
|
||||
IfErrors start_minion_delayed_not_found
|
||||
StrCpy $StartMinionDelayed 1
|
||||
|
|
|
@ -19,9 +19,9 @@ Function Get-Settings {
|
|||
"Python2Dir" = "C:\Python27"
|
||||
"Scripts2Dir" = "C:\Python27\Scripts"
|
||||
"SitePkgs2Dir" = "C:\Python27\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Program Files\Python35"
|
||||
"Scripts3Dir" = "C:\Program Files\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages"
|
||||
"Python3Dir" = "C:\Python35"
|
||||
"Scripts3Dir" = "C:\Python35\Scripts"
|
||||
"SitePkgs3Dir" = "C:\Python35\Lib\site-packages"
|
||||
"DownloadDir" = "$env:Temp\DevSalt"
|
||||
}
|
||||
# The script deletes the DownLoadDir (above) for each install.
|
||||
|
|
|
@ -29,6 +29,7 @@ import salt.config
|
|||
import salt.loader
|
||||
import salt.transport.client
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.files
|
||||
import salt.utils.minions
|
||||
import salt.utils.versions
|
||||
|
@ -69,7 +70,7 @@ class LoadAuth(object):
|
|||
if fstr not in self.auth:
|
||||
return ''
|
||||
try:
|
||||
pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0]
|
||||
pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0]
|
||||
return load[pname_arg]
|
||||
except IndexError:
|
||||
return ''
|
||||
|
@ -216,8 +217,9 @@ class LoadAuth(object):
|
|||
acl_ret = self.__get_acl(load)
|
||||
tdata['auth_list'] = acl_ret
|
||||
|
||||
if 'groups' in load:
|
||||
tdata['groups'] = load['groups']
|
||||
groups = self.get_groups(load)
|
||||
if groups:
|
||||
tdata['groups'] = groups
|
||||
|
||||
return self.tokens["{0}.mk_token".format(self.opts['eauth_tokens'])](self.opts, tdata)
|
||||
|
||||
|
@ -292,29 +294,31 @@ class LoadAuth(object):
|
|||
def authenticate_key(self, load, key):
|
||||
'''
|
||||
Authenticate a user by the key passed in load.
|
||||
Return the effective user id (name) if it's differ from the specified one (for sudo).
|
||||
If the effective user id is the same as passed one return True on success or False on
|
||||
Return the effective user id (name) if it's different from the specified one (for sudo).
|
||||
If the effective user id is the same as the passed one, return True on success or False on
|
||||
failure.
|
||||
'''
|
||||
auth_key = load.pop('key')
|
||||
if not auth_key:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
error_msg = 'Authentication failure of type "user" occurred.'
|
||||
auth_key = load.pop('key', None)
|
||||
if auth_key is None:
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
|
||||
if 'user' in load:
|
||||
auth_user = AuthUser(load['user'])
|
||||
if auth_user.is_sudo():
|
||||
# If someone sudos check to make sure there is no ACL's around their username
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return auth_user.sudo_name()
|
||||
elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root':
|
||||
if auth_key != key[self.opts.get('user', 'root')]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_user.is_running_user():
|
||||
if auth_key != key.get(load['user']):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
elif auth_key == key.get('root'):
|
||||
pass
|
||||
|
@ -322,19 +326,19 @@ class LoadAuth(object):
|
|||
if load['user'] in key:
|
||||
# User is authorised, check key and check perms
|
||||
if auth_key != key[load['user']]:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return load['user']
|
||||
else:
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
else:
|
||||
if auth_key != key[salt.utils.get_user()]:
|
||||
log.warning('Authentication failure of type "other" occurred.')
|
||||
log.warning(error_msg)
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_auth_list(self, load):
|
||||
def get_auth_list(self, load, token=None):
|
||||
'''
|
||||
Retrieve access list for the user specified in load.
|
||||
The list is built by eauth module or from master eauth configuration.
|
||||
|
@ -342,30 +346,37 @@ class LoadAuth(object):
|
|||
list if the user has no rights to execute anything on this master and returns non-empty list
|
||||
if user is allowed to execute particular functions.
|
||||
'''
|
||||
# Get auth list from token
|
||||
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
return token['auth_list']
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.__get_acl(load)
|
||||
if auth_list is not None:
|
||||
return auth_list
|
||||
|
||||
if load['eauth'] not in self.opts['external_auth']:
|
||||
eauth = token['eauth'] if token else load['eauth']
|
||||
if eauth not in self.opts['external_auth']:
|
||||
# No matching module is allowed in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][load['eauth']]
|
||||
if groups is None or groups is False:
|
||||
if token:
|
||||
name = token['name']
|
||||
groups = token.get('groups')
|
||||
else:
|
||||
name = self.load_name(load) # The username we are attempting to auth with
|
||||
groups = self.get_groups(load) # The groups this user belongs to
|
||||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
group_config = group_config.rstrip('%')
|
||||
for group in groups:
|
||||
if group == group_config:
|
||||
group_auth_match = True
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
@ -405,6 +416,64 @@ class LoadAuth(object):
|
|||
|
||||
return auth_list
|
||||
|
||||
def check_authentication(self, load, auth_type, key=None, show_username=False):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Go through various checks to see if the token/eauth/user can be authenticated.
|
||||
|
||||
Returns a dictionary containing the following keys:
|
||||
|
||||
- auth_list
|
||||
- username
|
||||
- error
|
||||
|
||||
If an error is encountered, return immediately with the relevant error dictionary
|
||||
as authentication has failed. Otherwise, return the username and valid auth_list.
|
||||
'''
|
||||
auth_list = []
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
ret = {'auth_list': auth_list,
|
||||
'username': username,
|
||||
'error': {}}
|
||||
|
||||
# Authenticate
|
||||
if auth_type == 'token':
|
||||
token = self.authenticate_token(load)
|
||||
if not token:
|
||||
ret['error'] = {'name': 'TokenAuthenticationError',
|
||||
'message': 'Authentication failure of type "token" occurred.'}
|
||||
return ret
|
||||
|
||||
# Update username for token
|
||||
username = token['name']
|
||||
ret['username'] = username
|
||||
auth_list = self.get_auth_list(load, token=token)
|
||||
elif auth_type == 'eauth':
|
||||
if not self.authenticate_eauth(load):
|
||||
ret['error'] = {'name': 'EauthAuthenticationError',
|
||||
'message': 'Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.'.format(username)}
|
||||
return ret
|
||||
|
||||
auth_list = self.get_auth_list(load)
|
||||
elif auth_type == 'user':
|
||||
if not self.authenticate_key(load, key):
|
||||
if show_username:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username)
|
||||
else:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
else:
|
||||
ret['error'] = {'name': 'SaltInvocationError',
|
||||
'message': 'Authentication type not supported.'}
|
||||
return ret
|
||||
|
||||
# Authentication checks passed
|
||||
ret['auth_list'] = auth_list
|
||||
return ret
|
||||
|
||||
|
||||
class Authorize(object):
|
||||
'''
|
||||
|
@ -550,6 +619,15 @@ class Authorize(object):
|
|||
load.get('arg', None),
|
||||
load.get('tgt', None),
|
||||
load.get('tgt_type', 'glob'))
|
||||
|
||||
# Handle possible return of dict data structure from any_auth call to
|
||||
# avoid a stacktrace. As mentioned in PR #43181, this entire class is
|
||||
# dead code and is marked for removal in Salt Neon. But until then, we
|
||||
# should handle the dict return, which is an error and should return
|
||||
# False until this class is removed.
|
||||
if isinstance(good, dict):
|
||||
return False
|
||||
|
||||
if not good:
|
||||
# Accept find_job so the CLI will function cleanly
|
||||
if load.get('fun', '') != 'saltutil.find_job':
|
||||
|
@ -562,7 +640,7 @@ class Authorize(object):
|
|||
authorization
|
||||
|
||||
Note: this will check that the user has at least one right that will let
|
||||
him execute "load", this does not deal with conflicting rules
|
||||
the user execute "load", this does not deal with conflicting rules
|
||||
'''
|
||||
|
||||
adata = self.auth_data
|
||||
|
@ -634,7 +712,7 @@ class Resolver(object):
|
|||
'not available').format(eauth))
|
||||
return ret
|
||||
|
||||
args = salt.utils.arg_lookup(self.auth[fstr])
|
||||
args = salt.utils.args.arg_lookup(self.auth[fstr])
|
||||
for arg in args['args']:
|
||||
if arg in self.opts:
|
||||
ret[arg] = self.opts[arg]
|
||||
|
|
|
@ -378,7 +378,7 @@ def groups(username, **kwargs):
|
|||
search_results = bind.search_s(search_base,
|
||||
ldap.SCOPE_SUBTREE,
|
||||
search_string,
|
||||
[_config('accountattributename'), 'cn'])
|
||||
[_config('accountattributename'), 'cn', _config('groupattribute')])
|
||||
for _, entry in search_results:
|
||||
if username in entry[_config('accountattributename')]:
|
||||
group_list.append(entry['cn'][0])
|
||||
|
@ -390,7 +390,7 @@ def groups(username, **kwargs):
|
|||
|
||||
# Only test user auth on first call for job.
|
||||
# 'show_jid' only exists on first payload so we can use that for the conditional.
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs['password'],
|
||||
if 'show_jid' in kwargs and not _bind(username, kwargs.get('password'),
|
||||
anonymous=_config('auth_by_group_membership_only', mandatory=False) and
|
||||
_config('anonymous', mandatory=False)):
|
||||
log.error('LDAP username and password do not match')
|
||||
|
|
|
@ -59,7 +59,7 @@ class Beacon(object):
|
|||
|
||||
if 'enabled' in current_beacon_config:
|
||||
if not current_beacon_config['enabled']:
|
||||
log.trace('Beacon {0} disabled'.format(mod))
|
||||
log.trace('Beacon %s disabled', mod)
|
||||
continue
|
||||
else:
|
||||
# remove 'enabled' item before processing the beacon
|
||||
|
@ -68,7 +68,7 @@ class Beacon(object):
|
|||
else:
|
||||
self._remove_list_item(config[mod], 'enabled')
|
||||
|
||||
log.trace('Beacon processing: {0}'.format(mod))
|
||||
log.trace('Beacon processing: %s', mod)
|
||||
fun_str = '{0}.beacon'.format(mod)
|
||||
validate_str = '{0}.validate'.format(mod)
|
||||
if fun_str in self.beacons:
|
||||
|
@ -77,10 +77,10 @@ class Beacon(object):
|
|||
if interval:
|
||||
b_config = self._trim_config(b_config, mod, 'interval')
|
||||
if not self._process_interval(mod, interval):
|
||||
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
|
||||
log.trace('Skipping beacon %s. Interval not reached.', mod)
|
||||
continue
|
||||
if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
|
||||
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
|
||||
log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod)
|
||||
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
|
||||
is_running = False
|
||||
running_jobs = salt.utils.minion.running(self.opts)
|
||||
|
@ -90,10 +90,10 @@ class Beacon(object):
|
|||
if is_running:
|
||||
close_str = '{0}.close'.format(mod)
|
||||
if close_str in self.beacons:
|
||||
log.info('Closing beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Closing beacon %s. State run in progress.', mod)
|
||||
self.beacons[close_str](b_config[mod])
|
||||
else:
|
||||
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
|
||||
log.info('Skipping beacon %s. State run in progress.', mod)
|
||||
continue
|
||||
# Update __grains__ on the beacon
|
||||
self.beacons[fun_str].__globals__['__grains__'] = grains
|
||||
|
@ -120,7 +120,7 @@ class Beacon(object):
|
|||
if runonce:
|
||||
self.disable_beacon(mod)
|
||||
else:
|
||||
log.warning('Unable to process beacon {0}'.format(mod))
|
||||
log.warning('Unable to process beacon %s', mod)
|
||||
return ret
|
||||
|
||||
def _trim_config(self, b_config, mod, key):
|
||||
|
@ -149,19 +149,19 @@ class Beacon(object):
|
|||
Process beacons with intervals
|
||||
Return True if a beacon should be run on this loop
|
||||
'''
|
||||
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
|
||||
log.trace('Processing interval %s for beacon mod %s', interval, mod)
|
||||
loop_interval = self.opts['loop_interval']
|
||||
if mod in self.interval_map:
|
||||
log.trace('Processing interval in map')
|
||||
counter = self.interval_map[mod]
|
||||
log.trace('Interval counter: {0}'.format(counter))
|
||||
log.trace('Interval counter: %s', counter)
|
||||
if counter * loop_interval >= interval:
|
||||
self.interval_map[mod] = 1
|
||||
return True
|
||||
else:
|
||||
self.interval_map[mod] += 1
|
||||
else:
|
||||
log.trace('Interval process inserting mod: {0}'.format(mod))
|
||||
log.trace('Interval process inserting mod: %s', mod)
|
||||
self.interval_map[mod] = 1
|
||||
return False
|
||||
|
||||
|
@ -205,15 +205,50 @@ class Beacon(object):
|
|||
'''
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
b_conf = self.functions['config.merge']('beacons')
|
||||
if not isinstance(self.opts['beacons'], dict):
|
||||
self.opts['beacons'] = {}
|
||||
self.opts['beacons'].update(b_conf)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacons_list_complete')
|
||||
|
||||
return True
|
||||
|
||||
def list_available_beacons(self):
|
||||
'''
|
||||
List the available beacons
|
||||
'''
|
||||
_beacons = ['{0}'.format(_beacon.replace('.beacon', ''))
|
||||
for _beacon in self.beacons if '.beacon' in _beacon]
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': _beacons},
|
||||
tag='/salt/minion/minion_beacons_list_available_complete')
|
||||
|
||||
return True
|
||||
|
||||
def validate_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Return available beacon functions
|
||||
'''
|
||||
validate_str = '{}.validate'.format(name)
|
||||
# Run the validate function if it's available,
|
||||
# otherwise there is a warning about it being missing
|
||||
if validate_str in self.beacons:
|
||||
if 'enabled' in beacon_data:
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True,
|
||||
'vcomment': vcomment,
|
||||
'valid': valid},
|
||||
tag='/salt/minion/minion_beacon_validation_complete')
|
||||
|
||||
return True
|
||||
|
||||
def add_beacon(self, name, beacon_data):
|
||||
'''
|
||||
Add a beacon item
|
||||
|
@ -224,9 +259,9 @@ class Beacon(object):
|
|||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
else:
|
||||
log.info('Added new beacon item {0}'.format(name))
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -245,7 +280,7 @@ class Beacon(object):
|
|||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: {0}'.format(name))
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
@ -261,7 +296,7 @@ class Beacon(object):
|
|||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item {0}'.format(name))
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
|
|
9
salt/cache/consul.py
vendored
9
salt/cache/consul.py
vendored
|
@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store.
|
|||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
:depends: python-consul >= 0.2.0
|
||||
|
||||
It is up to the system administrator to set up and configure the Consul
|
||||
infrastructure. All is needed for this plugin is a working Consul agent
|
||||
with a read-write access to the key-value store.
|
||||
|
@ -81,8 +83,11 @@ def __virtual__():
|
|||
'verify': __opts__.get('consul.verify', True),
|
||||
}
|
||||
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
try:
|
||||
global api
|
||||
api = consul.Consul(**consul_kwargs)
|
||||
except AttributeError:
|
||||
return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed")
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ class BaseCaller(object):
|
|||
'''
|
||||
ret = {}
|
||||
fun = self.opts['fun']
|
||||
ret['jid'] = salt.utils.jid.gen_jid()
|
||||
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
|
||||
proc_fn = os.path.join(
|
||||
salt.minion.get_proc_dir(self.opts['cachedir']),
|
||||
ret['jid']
|
||||
|
|
|
@ -184,9 +184,10 @@ class SaltCP(object):
|
|||
if gzip \
|
||||
else salt.utils.itertools.read_file
|
||||
|
||||
minions = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(
|
||||
tgt,
|
||||
tgt_type=selected_target_option or 'glob')
|
||||
minions = _res['minions']
|
||||
|
||||
local = salt.client.get_local_client(self.opts['conf_file'])
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ from __future__ import absolute_import
|
|||
# Import Salt libs
|
||||
import salt.spm
|
||||
import salt.utils.parsers as parsers
|
||||
from salt.utils.verify import verify_log
|
||||
from salt.utils.verify import verify_log, verify_env
|
||||
|
||||
|
||||
class SPM(parsers.SPMParser):
|
||||
|
@ -29,6 +29,10 @@ class SPM(parsers.SPMParser):
|
|||
ui = salt.spm.SPMCmdlineInterface()
|
||||
self.parse_args()
|
||||
self.setup_logfile_logger()
|
||||
v_dirs = [
|
||||
self.config['cachedir'],
|
||||
]
|
||||
verify_env(v_dirs, self.config['user'],)
|
||||
verify_log(self.config)
|
||||
client = salt.spm.SPMClient(ui, self.config)
|
||||
client.run(self.args)
|
||||
|
|
|
@ -347,7 +347,8 @@ class LocalClient(object):
|
|||
return self._check_pub_data(pub_data)
|
||||
|
||||
def gather_minions(self, tgt, expr_form):
|
||||
return salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
_res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form)
|
||||
return _res['minions']
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def run_job_async(
|
||||
|
@ -1141,6 +1142,7 @@ class LocalClient(object):
|
|||
minion_timeouts = {}
|
||||
|
||||
found = set()
|
||||
missing = []
|
||||
# Check to see if the jid is real, if not return the empty dict
|
||||
try:
|
||||
if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}:
|
||||
|
@ -1179,6 +1181,8 @@ class LocalClient(object):
|
|||
break
|
||||
if u'minions' in raw.get(u'data', {}):
|
||||
minions.update(raw[u'data'][u'minions'])
|
||||
if u'missing' in raw.get(u'data', {}):
|
||||
missing.extend(raw[u'data'][u'missing'])
|
||||
continue
|
||||
if u'return' not in raw[u'data']:
|
||||
continue
|
||||
|
@ -1320,6 +1324,10 @@ class LocalClient(object):
|
|||
for minion in list((minions - found)):
|
||||
yield {minion: {u'failed': True}}
|
||||
|
||||
if missing:
|
||||
for minion in missing:
|
||||
yield {minion: {'failed': True}}
|
||||
|
||||
def get_returns(
|
||||
self,
|
||||
jid,
|
||||
|
|
|
@ -14,8 +14,9 @@ client applications.
|
|||
http://docs.saltstack.com/ref/clients/index.html
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -24,9 +25,9 @@ import salt.auth
|
|||
import salt.client
|
||||
import salt.runner
|
||||
import salt.wheel
|
||||
import salt.utils
|
||||
import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.syspaths as syspaths
|
||||
from salt.utils.event import tagify
|
||||
from salt.exceptions import EauthAuthenticationError
|
||||
|
||||
|
||||
|
@ -229,7 +230,7 @@ class APIClient(object):
|
|||
functions = self.wheelClient.functions
|
||||
elif client == u'runner':
|
||||
functions = self.runnerClient.functions
|
||||
result = {u'master': salt.utils.argspec_report(functions, module)}
|
||||
result = {u'master': salt.utils.args.argspec_report(functions, module)}
|
||||
return result
|
||||
|
||||
def create_token(self, creds):
|
||||
|
@ -322,4 +323,4 @@ class APIClient(object):
|
|||
Need to convert this to a master call with appropriate authentication
|
||||
|
||||
'''
|
||||
return self.event.fire_event(data, tagify(tag, u'wui'))
|
||||
return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui'))
|
||||
|
|
|
@ -16,7 +16,8 @@ import copy as pycopy
|
|||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
import salt.minion
|
||||
import salt.utils
|
||||
import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved
|
||||
import salt.utils.args
|
||||
import salt.utils.doc
|
||||
import salt.utils.error
|
||||
import salt.utils.event
|
||||
|
@ -25,6 +26,7 @@ import salt.utils.job
|
|||
import salt.utils.lazy
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.state
|
||||
import salt.utils.versions
|
||||
import salt.transport
|
||||
import salt.log.setup
|
||||
|
@ -297,7 +299,7 @@ class SyncClientMixin(object):
|
|||
# this is not to clutter the output with the module loading
|
||||
# if we have a high debug level.
|
||||
self.mminion # pylint: disable=W0104
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid())
|
||||
jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts))
|
||||
tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
|
||||
|
||||
data = {u'fun': u'{0}.{1}'.format(self.client, fun),
|
||||
|
@ -396,7 +398,7 @@ class SyncClientMixin(object):
|
|||
data[u'success'] = True
|
||||
if isinstance(data[u'return'], dict) and u'data' in data[u'return']:
|
||||
# some functions can return boolean values
|
||||
data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data'])
|
||||
data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data[u'return'] = str(ex)
|
||||
|
@ -510,7 +512,7 @@ class AsyncClientMixin(object):
|
|||
|
||||
def _gen_async_pub(self, jid=None):
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix)
|
||||
return {u'tag': tag, u'jid': jid}
|
||||
|
||||
|
|
|
@ -44,9 +44,6 @@ from salt.exceptions import (
|
|||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Import Salt-Cloud Libs
|
||||
import salt.utils.cloud
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -1193,7 +1190,7 @@ def list_nodes_select(call=None):
|
|||
'''
|
||||
Return a list of the VMs that are on the provider, with select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full(), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
@ -1503,7 +1500,7 @@ def _query(action=None,
|
|||
if LASTCALL >= now:
|
||||
time.sleep(ratelimit_sleep)
|
||||
|
||||
result = salt.utils.http.query(
|
||||
result = __utils__['http.query'](
|
||||
url,
|
||||
method,
|
||||
params=args,
|
||||
|
|
|
@ -24,7 +24,6 @@ import logging
|
|||
# Import salt libs
|
||||
from salt.exceptions import SaltCloudSystemExit
|
||||
import salt.config as config
|
||||
import salt.utils.cloud as cloud
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
|
@ -136,7 +135,7 @@ def create(vm_info):
|
|||
)
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'starting create',
|
||||
'salt/cloud/{0}/creating'.format(vm_info['name']),
|
||||
|
@ -151,7 +150,7 @@ def create(vm_info):
|
|||
'clone_from': vm_info['clonefrom']
|
||||
}
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
'salt/cloud/{0}/requesting'.format(vm_info['name']),
|
||||
|
@ -174,10 +173,10 @@ def create(vm_info):
|
|||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = cloud.bootstrap(vm_info, __opts__)
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
vm_result.update(res)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'created machine',
|
||||
'salt/cloud/{0}/created'.format(vm_info['name']),
|
||||
|
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
|
|||
"private_ips",
|
||||
"public_ips",
|
||||
]
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), attributes, call,
|
||||
)
|
||||
|
||||
|
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
|
|||
"""
|
||||
Return a list of the VMs that are on the provider, with select fields
|
||||
"""
|
||||
return cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
@ -306,7 +305,7 @@ def destroy(name, call=None):
|
|||
if not vb_machine_exists(name):
|
||||
return "{0} doesn't exist and can't be deleted".format(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
|
@ -317,7 +316,7 @@ def destroy(name, call=None):
|
|||
|
||||
vb_destroy_machine(name)
|
||||
|
||||
cloud.fire_event(
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
|
|
|
@ -53,7 +53,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S'
|
|||
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
|
||||
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
|
||||
_DFLT_LOG_FMT_LOGFILE = (
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
|
||||
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
|
||||
)
|
||||
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']
|
||||
|
||||
|
@ -111,9 +111,10 @@ VALID_OPTS = {
|
|||
'master_port': (six.string_types, int),
|
||||
|
||||
# The behaviour of the minion when connecting to a master. Can specify 'failover',
|
||||
# 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an
|
||||
# exec module function to run to determine the master hostname. If 'disable' is specified
|
||||
# the minion will run, but will not try to connect to a master.
|
||||
# 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be
|
||||
# set to an exec module function to run to determine the master hostname. If 'disable' is
|
||||
# specified the minion will run, but will not try to connect to a master. If 'distributed'
|
||||
# is specified the minion will try to deterministically pick a master based on its' id.
|
||||
'master_type': str,
|
||||
|
||||
# Specify the format in which the master address will be specified. Can
|
||||
|
@ -186,6 +187,16 @@ VALID_OPTS = {
|
|||
# A unique identifier for this daemon
|
||||
'id': str,
|
||||
|
||||
# Use a module function to determine the unique identifier. If this is
|
||||
# set and 'id' is not set, it will allow invocation of a module function
|
||||
# to determine the value of 'id'. For simple invocations without function
|
||||
# arguments, this may be a string that is the function name. For
|
||||
# invocations with function arguments, this may be a dictionary with the
|
||||
# key being the function name, and the value being an embedded dictionary
|
||||
# where each key is a function argument name and each value is the
|
||||
# corresponding argument value.
|
||||
'id_function': (dict, str),
|
||||
|
||||
# The directory to store all cache files.
|
||||
'cachedir': str,
|
||||
|
||||
|
@ -332,7 +343,7 @@ VALID_OPTS = {
|
|||
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
|
||||
'mine_return_job': bool,
|
||||
|
||||
# Schedule a mine update every n number of seconds
|
||||
# The number of minutes between mine updates.
|
||||
'mine_interval': int,
|
||||
|
||||
# The ipc strategy. (i.e., sockets versus tcp, etc)
|
||||
|
@ -417,6 +428,12 @@ VALID_OPTS = {
|
|||
# Tell the client to display the jid when a job is published
|
||||
'show_jid': bool,
|
||||
|
||||
# Ensure that a generated jid is always unique. If this is set, the jid
|
||||
# format is different due to an underscore and process id being appended
|
||||
# to the jid. WARNING: A change to the jid format may break external
|
||||
# applications that depend on the original format.
|
||||
'unique_jid': bool,
|
||||
|
||||
# Tells the highstate outputter to show successful states. False will omit successes.
|
||||
'state_verbose': bool,
|
||||
|
||||
|
@ -573,6 +590,23 @@ VALID_OPTS = {
|
|||
# False in 2016.3.0
|
||||
'add_proxymodule_to_opts': bool,
|
||||
|
||||
# Merge pillar data into configuration opts.
|
||||
# As multiple proxies can run on the same server, we may need different
|
||||
# configuration options for each, while there's one single configuration file.
|
||||
# The solution is merging the pillar data of each proxy minion into the opts.
|
||||
'proxy_merge_pillar_in_opts': bool,
|
||||
|
||||
# Deep merge of pillar data into configuration opts.
|
||||
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_deep_merge_pillar_in_opts': bool,
|
||||
|
||||
# The strategy used when merging pillar into opts.
|
||||
# Considered only when `proxy_merge_pillar_in_opts` is True.
|
||||
'proxy_merge_pillar_in_opts_strategy': str,
|
||||
|
||||
# Allow enabling mine details using pillar data.
|
||||
'proxy_mines_pillar': bool,
|
||||
|
||||
# In some particular cases, always alive proxies are not beneficial.
|
||||
# This option can be used in those less dynamic environments:
|
||||
# the user can request the connection
|
||||
|
@ -908,6 +942,7 @@ VALID_OPTS = {
|
|||
'ssh_scan_timeout': float,
|
||||
'ssh_identities_only': bool,
|
||||
'ssh_log_file': str,
|
||||
'ssh_config_file': str,
|
||||
|
||||
# Enable ioflo verbose logging. Warning! Very verbose!
|
||||
'ioflo_verbose': int,
|
||||
|
@ -1079,6 +1114,11 @@ VALID_OPTS = {
|
|||
# (in other words, require that minions have 'minion_sign_messages'
|
||||
# turned on)
|
||||
'require_minion_sign_messages': bool,
|
||||
|
||||
# The list of config entries to be passed to external pillar function as
|
||||
# part of the extra_minion_data param
|
||||
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
|
||||
'pass_to_ext_pillars': (six.string_types, list),
|
||||
}
|
||||
|
||||
# default configurations
|
||||
|
@ -1102,6 +1142,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'root_dir': salt.syspaths.ROOT_DIR,
|
||||
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
|
||||
'id': '',
|
||||
'id_function': {},
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
|
||||
'append_minionid_config_dirs': [],
|
||||
'cache_jobs': False,
|
||||
|
@ -1197,6 +1238,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'gitfs_ref_types': ['branch', 'tag', 'sha'],
|
||||
'gitfs_refspecs': _DFLT_REFSPECS,
|
||||
'gitfs_disable_saltenv_mapping': False,
|
||||
'unique_jid': False,
|
||||
'hash_type': 'sha256',
|
||||
'disable_modules': [],
|
||||
'disable_returners': [],
|
||||
|
@ -1441,6 +1483,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'hgfs_saltenv_blacklist': [],
|
||||
'show_timeout': True,
|
||||
'show_jid': False,
|
||||
'unique_jid': False,
|
||||
'svnfs_remotes': [],
|
||||
'svnfs_mountpoint': '',
|
||||
'svnfs_root': '',
|
||||
|
@ -1607,6 +1650,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'ssh_scan_timeout': 0.01,
|
||||
'ssh_identities_only': False,
|
||||
'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'),
|
||||
'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'),
|
||||
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
|
||||
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
|
||||
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
|
||||
|
@ -1673,6 +1717,12 @@ DEFAULT_PROXY_MINION_OPTS = {
|
|||
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
|
||||
'default_include': 'proxy.d/*.conf',
|
||||
|
||||
'proxy_merge_pillar_in_opts': False,
|
||||
'proxy_deep_merge_pillar_in_opts': False,
|
||||
'proxy_merge_pillar_in_opts_strategy': 'smart',
|
||||
|
||||
'proxy_mines_pillar': True,
|
||||
|
||||
# By default, proxies will preserve the connection.
|
||||
# If this option is set to False,
|
||||
# the connection with the remote dumb device
|
||||
|
@ -3335,6 +3385,57 @@ def _cache_id(minion_id, cache_file):
|
|||
log.error('Could not cache minion ID: {0}'.format(exc))
|
||||
|
||||
|
||||
def call_id_function(opts):
|
||||
'''
|
||||
Evaluate the function that determines the ID if the 'id_function'
|
||||
option is set and return the result
|
||||
'''
|
||||
if opts.get('id'):
|
||||
return opts['id']
|
||||
|
||||
# Import 'salt.loader' here to avoid a circular dependency
|
||||
import salt.loader as loader
|
||||
|
||||
if isinstance(opts['id_function'], str):
|
||||
mod_fun = opts['id_function']
|
||||
fun_kwargs = {}
|
||||
elif isinstance(opts['id_function'], dict):
|
||||
mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function']))
|
||||
if fun_kwargs is None:
|
||||
fun_kwargs = {}
|
||||
else:
|
||||
log.error('\'id_function\' option is neither a string nor a dictionary')
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
# split module and function and try loading the module
|
||||
mod, fun = mod_fun.split('.')
|
||||
if not opts.get('grains'):
|
||||
# Get grains for use by the module
|
||||
opts['grains'] = loader.grains(opts)
|
||||
|
||||
try:
|
||||
id_mod = loader.raw_mod(opts, mod, fun)
|
||||
if not id_mod:
|
||||
raise KeyError
|
||||
# we take whatever the module returns as the minion ID
|
||||
newid = id_mod[mod_fun](**fun_kwargs)
|
||||
if not isinstance(newid, str) or not newid:
|
||||
log.error('Function {0} returned value "{1}" of type {2} instead of string'.format(
|
||||
mod_fun, newid, type(newid))
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
log.info('Evaluated minion ID from module: {0}'.format(mod_fun))
|
||||
return newid
|
||||
except TypeError:
|
||||
log.error('Function arguments {0} are incorrect for function {1}'.format(
|
||||
fun_kwargs, mod_fun)
|
||||
)
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
except KeyError:
|
||||
log.error('Failed to load module {0}'.format(mod_fun))
|
||||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
|
||||
|
||||
def get_id(opts, cache_minion_id=False):
|
||||
'''
|
||||
Guess the id of the minion.
|
||||
|
@ -3376,13 +3477,21 @@ def get_id(opts, cache_minion_id=False):
|
|||
log.debug('Guessing ID. The id can be explicitly set in {0}'
|
||||
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
|
||||
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
if opts.get('id_function'):
|
||||
newid = call_id_function(opts)
|
||||
else:
|
||||
newid = salt.utils.network.generate_minion_id()
|
||||
|
||||
if opts.get('minion_id_lowercase'):
|
||||
newid = newid.lower()
|
||||
log.debug('Changed minion id {0} to lowercase.'.format(newid))
|
||||
if '__role' in opts and opts.get('__role') == 'minion':
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(newid))
|
||||
if opts.get('id_function'):
|
||||
log.debug('Found minion id from external function {0}: {1}'.format(
|
||||
opts['id_function'], newid))
|
||||
else:
|
||||
log.debug('Found minion id from generate_minion_id(): {0}'.format(
|
||||
newid))
|
||||
if cache_minion_id and opts.get('minion_id_caching', True):
|
||||
_cache_id(newid, id_cache)
|
||||
is_ipv4 = salt.utils.network.is_ipv4(newid)
|
||||
|
|
44
salt/config/schemas/esxcluster.py
Normal file
44
salt/config/schemas/esxcluster.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)`
|
||||
|
||||
|
||||
salt.config.schemas.esxcluster
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
ESX Cluster configuration schemas
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt libs
|
||||
from salt.utils.schema import (Schema,
|
||||
ArrayItem,
|
||||
IntegerItem,
|
||||
StringItem)
|
||||
|
||||
|
||||
class EsxclusterProxySchema(Schema):
|
||||
'''
|
||||
Schema of the esxcluster proxy input
|
||||
'''
|
||||
|
||||
title = 'Esxcluster Proxy Schema'
|
||||
description = 'Esxcluster proxy schema'
|
||||
additional_properties = False
|
||||
proxytype = StringItem(required=True,
|
||||
enum=['esxcluster'])
|
||||
vcenter = StringItem(required=True, pattern=r'[^\s]+')
|
||||
datacenter = StringItem(required=True)
|
||||
cluster = StringItem(required=True)
|
||||
mechanism = StringItem(required=True, enum=['userpass', 'sspi'])
|
||||
username = StringItem()
|
||||
passwords = ArrayItem(min_items=1,
|
||||
items=StringItem(),
|
||||
unique_items=True)
|
||||
# TODO Should be changed when anyOf is supported for schemas
|
||||
domain = StringItem()
|
||||
principal = StringItem()
|
||||
protocol = StringItem()
|
||||
port = IntegerItem(minimum=1)
|
|
@ -400,7 +400,7 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
|
|||
kind=kinds.applKinds.master))
|
||||
except gaierror as ex:
|
||||
log.warning("Unable to connect to master {0}: {1}".format(mha, ex))
|
||||
if self.opts.value.get('master_type') != 'failover':
|
||||
if self.opts.value.get(u'master_type') not in (u'failover', u'distributed'):
|
||||
raise ex
|
||||
if not stack.remotes:
|
||||
raise ex
|
||||
|
|
|
@ -550,11 +550,12 @@ class RemoteFuncs(object):
|
|||
if match_type.lower() == 'compound':
|
||||
match_type = 'compound_pillar_exact'
|
||||
checker = salt.utils.minions.CkMinions(self.opts)
|
||||
minions = checker.check_minions(
|
||||
_res = checker.check_minions(
|
||||
load['tgt'],
|
||||
match_type,
|
||||
greedy=False
|
||||
)
|
||||
minions = _res['minions']
|
||||
for minion in minions:
|
||||
fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine')
|
||||
if isinstance(fdata, dict):
|
||||
|
@ -718,7 +719,7 @@ class RemoteFuncs(object):
|
|||
Handle the return data sent from the minions
|
||||
'''
|
||||
# Generate EndTime
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
|
||||
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts))
|
||||
# If the return data is invalid, just ignore it
|
||||
if any(key not in load for key in ('return', 'jid', 'id')):
|
||||
return False
|
||||
|
@ -872,9 +873,10 @@ class RemoteFuncs(object):
|
|||
pub_load['tgt_type'] = load['tgt_type']
|
||||
ret = {}
|
||||
ret['jid'] = self.local.cmd_async(**pub_load)
|
||||
ret['minions'] = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
pub_load['tgt_type'])
|
||||
ret['minions'] = _res['minions']
|
||||
auth_cache = os.path.join(
|
||||
self.opts['cachedir'],
|
||||
'publish_auth')
|
||||
|
@ -1011,35 +1013,33 @@ class LocalFuncs(object):
|
|||
'''
|
||||
Send a master control function back to the runner system
|
||||
'''
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred '
|
||||
'for user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
# All runner opts pass through eauth
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.').format(auth_type, username)))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(load, auth_type)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
username = auth_check.get('username')
|
||||
if not runner_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred '
|
||||
'for user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and 'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
fun = load.pop('fun')
|
||||
runner_client = salt.runner.RunnerClient(self.opts)
|
||||
|
@ -1048,56 +1048,49 @@ class LocalFuncs(object):
|
|||
username)
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
return {'error': {'name': exc.__class__.__name__,
|
||||
'args': exc.args,
|
||||
'message': str(exc)}}
|
||||
|
||||
def wheel(self, load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
token = self.loadauth.authenticate_token(load)
|
||||
if not token:
|
||||
return dict(error=dict(name=err_name,
|
||||
message='Authentication failure of type "token" occurred.'))
|
||||
username = token['name']
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
load['eauth'] = token['eauth']
|
||||
load['username'] = username
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_eauth(load):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "eauth" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_list = self.loadauth.get_auth_list(load)
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
username = load.get('username', 'UNKNOWN')
|
||||
if not self.loadauth.authenticate_key(load, self.key):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "user" occurred for '
|
||||
'user {0}.').format(username)))
|
||||
auth_type, err_name, key = self._prep_auth_info(load)
|
||||
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(
|
||||
load,
|
||||
auth_type,
|
||||
key=key,
|
||||
show_username=True
|
||||
)
|
||||
error = auth_check.get('error')
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {'error': error}
|
||||
|
||||
# Authorize
|
||||
username = auth_check.get('username')
|
||||
if auth_type != 'user':
|
||||
if not self.ckminions.wheel_check(auth_list, load['fun'], load['kwarg']):
|
||||
return dict(error=dict(name=err_name,
|
||||
message=('Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.').format(auth_type, username)))
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get('auth_list', []),
|
||||
load['fun'],
|
||||
load['kwarg']
|
||||
)
|
||||
if not wheel_check:
|
||||
return {'error': {'name': err_name,
|
||||
'message': 'Authentication failure of type "{0}" occurred for '
|
||||
'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# Authenticated. Do the job.
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = load.pop('fun')
|
||||
tag = salt.utils.event.tagify(jid, prefix='wheel')
|
||||
data = {'fun': "wheel.{0}".format(fun),
|
||||
|
@ -1114,7 +1107,7 @@ class LocalFuncs(object):
|
|||
'data': data}
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while '
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
'introspecting {0}: {1}'.format(fun, exc))
|
||||
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
|
||||
fun,
|
||||
exc.__class__.__name__,
|
||||
|
@ -1167,11 +1160,12 @@ class LocalFuncs(object):
|
|||
|
||||
# Retrieve the minions list
|
||||
delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res['minions']
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
|
@ -1181,12 +1175,7 @@ class LocalFuncs(object):
|
|||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
|
||||
auth_list = token['auth_list']
|
||||
else:
|
||||
extra['eauth'] = token['eauth']
|
||||
extra['username'] = token['name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -1383,3 +1372,18 @@ class LocalFuncs(object):
|
|||
},
|
||||
'pub': pub_load
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, load):
|
||||
key = None
|
||||
if 'token' in load:
|
||||
auth_type = 'token'
|
||||
err_name = 'TokenAuthenticationError'
|
||||
elif 'eauth' in load:
|
||||
auth_type = 'eauth'
|
||||
err_name = 'EauthAuthenticationError'
|
||||
else:
|
||||
auth_type = 'user'
|
||||
err_name = 'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -744,12 +744,7 @@ class Client(object):
|
|||
Cache a file then process it as a template
|
||||
'''
|
||||
if u'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
u'Oxygen',
|
||||
u'Parameter \'env\' has been detected in the argument list. This '
|
||||
u'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop(u'env')
|
||||
|
||||
kwargs[u'saltenv'] = saltenv
|
||||
|
@ -1300,10 +1295,10 @@ class RemoteClient(Client):
|
|||
hash_type = self.opts.get(u'hash_type', u'md5')
|
||||
ret[u'hsum'] = salt.utils.get_hash(path, form=hash_type)
|
||||
ret[u'hash_type'] = hash_type
|
||||
return ret, list(os.stat(path))
|
||||
return ret
|
||||
load = {u'path': path,
|
||||
u'saltenv': saltenv,
|
||||
u'cmd': u'_file_hash_and_stat'}
|
||||
u'cmd': u'_file_hash'}
|
||||
return self.channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv=u'base'):
|
||||
|
@ -1312,14 +1307,33 @@ class RemoteClient(Client):
|
|||
master file server prepend the path with salt://<file on server>
|
||||
otherwise, prepend the file with / for a local file.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)[0]
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
|
||||
def hash_and_stat_file(self, path, saltenv=u'base'):
|
||||
'''
|
||||
The same as hash_file, but also return the file's mode, or None if no
|
||||
mode data is present.
|
||||
'''
|
||||
return self.__hash_and_stat_file(path, saltenv)
|
||||
hash_result = self.hash_file(path, saltenv)
|
||||
try:
|
||||
path = self._check_proto(path)
|
||||
except MinionError as err:
|
||||
if not os.path.isfile(path):
|
||||
return hash_result, None
|
||||
else:
|
||||
try:
|
||||
return hash_result, list(os.stat(path))
|
||||
except Exception:
|
||||
return hash_result, None
|
||||
load = {'path': path,
|
||||
'saltenv': saltenv,
|
||||
'cmd': '_file_find'}
|
||||
fnd = self.channel.send(load)
|
||||
try:
|
||||
stat_result = fnd.get('stat')
|
||||
except AttributeError:
|
||||
stat_result = None
|
||||
return hash_result, stat_result
|
||||
|
||||
def list_env(self, saltenv=u'base'):
|
||||
'''
|
||||
|
|
|
@ -553,12 +553,7 @@ class Fileserver(object):
|
|||
kwargs[args[0]] = args[1]
|
||||
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs.pop('saltenv')
|
||||
|
@ -583,12 +578,7 @@ class Fileserver(object):
|
|||
'dest': ''}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
|
||||
|
@ -609,13 +599,7 @@ class Fileserver(object):
|
|||
Common code for hashing and stating files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. '
|
||||
'This parameter is no longer used and has been replaced by '
|
||||
'\'saltenv\' as of Salt 2016.11.0. This warning will be removed '
|
||||
'in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
|
@ -656,12 +640,7 @@ class Fileserver(object):
|
|||
Deletes the file_lists cache files
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
saltenv = load.get('saltenv', [])
|
||||
|
@ -738,12 +717,7 @@ class Fileserver(object):
|
|||
Return a list of files from the dominant environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -769,12 +743,7 @@ class Fileserver(object):
|
|||
List all emptydirs in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -800,12 +769,7 @@ class Fileserver(object):
|
|||
List all directories in the given environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = set()
|
||||
|
@ -831,12 +795,7 @@ class Fileserver(object):
|
|||
Return a list of symlinked files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
|
|
@ -736,12 +736,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -770,12 +765,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
|
@ -804,12 +794,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files and dirs
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs')
|
||||
|
@ -852,12 +837,7 @@ def _get_file_list(load):
|
|||
Get a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
@ -897,12 +877,7 @@ def _get_dir_list(load):
|
|||
Get a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
|
|
@ -165,12 +165,7 @@ def file_hash(load, fnd):
|
|||
ret = {}
|
||||
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
@ -235,12 +230,7 @@ def file_list(load):
|
|||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
@ -319,12 +309,7 @@ def dir_list(load):
|
|||
- source-minion/absolute/path
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in envs():
|
||||
|
|
|
@ -40,12 +40,7 @@ def find_file(path, saltenv='base', **kwargs):
|
|||
Search the environment for the relative path.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
path = os.path.normpath(path)
|
||||
|
@ -117,12 +112,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -218,12 +208,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'path' not in load or 'saltenv' not in load:
|
||||
|
@ -298,12 +283,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if load['saltenv'] not in __opts__['file_roots']:
|
||||
|
@ -444,12 +424,7 @@ def symlink_list(load):
|
|||
Return a dict of all symlinks based on a given path on the Master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
|
|
@ -126,12 +126,7 @@ def find_file(path, saltenv='base', **kwargs):
|
|||
is missing, or if the MD5 does not match.
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
fnd = {'bucket': None,
|
||||
|
@ -168,12 +163,7 @@ def file_hash(load, fnd):
|
|||
Return an MD5 file hash
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {}
|
||||
|
@ -201,12 +191,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -245,12 +230,7 @@ def file_list(load):
|
|||
Return a list of all files on the file server in a specified environment
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
|
@ -286,12 +266,7 @@ def dir_list(load):
|
|||
Return a list of all directories on the master
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = []
|
||||
|
|
|
@ -631,12 +631,7 @@ def serve_file(load, fnd):
|
|||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
ret = {'data': '',
|
||||
|
@ -665,12 +660,7 @@ def file_hash(load, fnd):
|
|||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if not all(x in load for x in ('path', 'saltenv')):
|
||||
|
@ -723,12 +713,7 @@ def _file_lists(load, form):
|
|||
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
|
||||
'''
|
||||
if 'env' in load:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
load.pop('env')
|
||||
|
||||
if 'saltenv' not in load or load['saltenv'] not in envs():
|
||||
|
|
38
salt/grains/cimc.py
Normal file
38
salt/grains/cimc.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Generate baseline proxy minion grains for cimc hosts.
|
||||
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.platform
|
||||
import salt.proxy.cimc
|
||||
|
||||
__proxyenabled__ = ['cimc']
|
||||
__virtualname__ = 'cimc'
|
||||
|
||||
log = logging.getLogger(__file__)
|
||||
|
||||
GRAINS_CACHE = {'os_family': 'Cisco UCS'}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
try:
|
||||
if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'cimc':
|
||||
return __virtualname__
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def cimc(proxy=None):
|
||||
if not proxy:
|
||||
return {}
|
||||
if proxy['cimc.initialized']() is False:
|
||||
return {}
|
||||
return {'cimc': proxy['cimc.grains']()}
|
|
@ -2425,4 +2425,46 @@ def get_master():
|
|||
# master
|
||||
return {'master': __opts__.get('master', '')}
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
|
||||
def default_gateway():
|
||||
'''
|
||||
Populates grains which describe whether a server has a default gateway
|
||||
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
|
||||
for a `default` at the beginning of any line. Assuming the standard
|
||||
`default via <ip>` format for default gateways, it will also parse out the
|
||||
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
|
||||
|
||||
If the `ip` command is unavailable, no grains will be populated.
|
||||
|
||||
Currently does not support multiple default gateways. The grains will be
|
||||
set to the first default gateway found.
|
||||
|
||||
List of grains:
|
||||
|
||||
ip4_gw: True # ip/True/False if default ipv4 gateway
|
||||
ip6_gw: True # ip/True/False if default ipv6 gateway
|
||||
ip_gw: True # True if either of the above is True, False otherwise
|
||||
'''
|
||||
grains = {}
|
||||
if not salt.utils.which('ip'):
|
||||
return {}
|
||||
grains['ip_gw'] = False
|
||||
grains['ip4_gw'] = False
|
||||
grains['ip6_gw'] = False
|
||||
if __salt__['cmd.run']('ip -4 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip4_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -4 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip4_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
if __salt__['cmd.run']('ip -6 route show | grep "^default"', python_shell=True):
|
||||
grains['ip_gw'] = True
|
||||
grains['ip6_gw'] = True
|
||||
try:
|
||||
gateway_ip = __salt__['cmd.run']('ip -6 route show | grep "^default via"', python_shell=True).split(' ')[2].strip()
|
||||
grains['ip6_gw'] = gateway_ip if gateway_ip else True
|
||||
except Exception as exc:
|
||||
pass
|
||||
return grains
|
||||
|
|
|
@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`.
|
|||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
|
||||
|
@ -47,16 +48,30 @@ def _search(prefix="latest/"):
|
|||
Recursively look up all grains in the metadata server
|
||||
'''
|
||||
ret = {}
|
||||
for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'):
|
||||
linedata = http.query(os.path.join(HOST, prefix))
|
||||
if 'body' not in linedata:
|
||||
return ret
|
||||
for line in linedata['body'].split('\n'):
|
||||
if line.endswith('/'):
|
||||
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
|
||||
elif prefix == 'latest/':
|
||||
# (gtmanfred) The first level should have a forward slash since
|
||||
# they have stuff underneath. This will not be doubled up though,
|
||||
# because lines ending with a slash are checked first.
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
|
||||
elif line.endswith(('dynamic', 'meta-data')):
|
||||
ret[line] = _search(prefix=os.path.join(prefix, line))
|
||||
elif '=' in line:
|
||||
key, value = line.split('=')
|
||||
ret[value] = _search(prefix=os.path.join(prefix, key))
|
||||
else:
|
||||
ret[line] = http.query(os.path.join(HOST, prefix, line))['body']
|
||||
retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
|
||||
# (gtmanfred) This try except block is slightly faster than
|
||||
# checking if the string starts with a curly brace
|
||||
try:
|
||||
ret[line] = json.loads(retdata)
|
||||
except ValueError:
|
||||
ret[line] = retdata
|
||||
return ret
|
||||
|
||||
|
||||
|
|
37
salt/key.py
37
salt/key.py
|
@ -496,7 +496,7 @@ class Key(object):
|
|||
minions = []
|
||||
for key, val in six.iteritems(keys):
|
||||
minions.extend(val)
|
||||
if not self.opts.get(u'preserve_minion_cache', False) or not preserve_minions:
|
||||
if not self.opts.get(u'preserve_minion_cache', False):
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], self.ACC)
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
|
@ -743,7 +743,7 @@ class Key(object):
|
|||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
|
@ -781,11 +781,10 @@ class Key(object):
|
|||
salt.utils.event.tagify(prefix=u'key'))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if preserve_minions:
|
||||
preserve_minions_list = matches.get(u'minions', [])
|
||||
if self.opts.get(u'preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
preserve_minions_list = []
|
||||
self.check_minion_cache(preserve_minions=preserve_minions_list)
|
||||
self.check_minion_cache()
|
||||
if self.opts.get(u'rotate_aes_key'):
|
||||
salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user'])
|
||||
return (
|
||||
|
@ -976,16 +975,17 @@ class RaetKey(Key):
|
|||
minions.extend(val)
|
||||
|
||||
m_cache = os.path.join(self.opts[u'cachedir'], u'minions')
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions:
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if not self.opts.get('preserve_minion_cache', False):
|
||||
if os.path.isdir(m_cache):
|
||||
for minion in os.listdir(m_cache):
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
shutil.rmtree(os.path.join(m_cache, minion))
|
||||
cache = salt.cache.factory(self.opts)
|
||||
clist = cache.list(self.ACC)
|
||||
if clist:
|
||||
for minion in clist:
|
||||
if minion not in minions and minion not in preserve_minions:
|
||||
cache.flush(u'{0}/{1}'.format(self.ACC, minion))
|
||||
|
||||
kind = self.opts.get(u'__role', u'') # application kind
|
||||
if kind not in kinds.APPL_KINDS:
|
||||
|
@ -1227,7 +1227,7 @@ class RaetKey(Key):
|
|||
def delete_key(self,
|
||||
match=None,
|
||||
match_dict=None,
|
||||
preserve_minions=False,
|
||||
preserve_minions=None,
|
||||
revoke_auth=False):
|
||||
'''
|
||||
Delete public keys. If "match" is passed, it is evaluated as a glob.
|
||||
|
@ -1258,7 +1258,10 @@ class RaetKey(Key):
|
|||
os.remove(os.path.join(self.opts[u'pki_dir'], status, key))
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
if self.opts.get('preserve_minions') is True:
|
||||
self.check_minion_cache(preserve_minions=matches.get(u'minions', []))
|
||||
else:
|
||||
self.check_minion_cache()
|
||||
return (
|
||||
self.name_match(match) if match is not None
|
||||
else self.dict_match(matches)
|
||||
|
|
|
@ -270,7 +270,7 @@ def raw_mod(opts, name, functions, mod=u'modules'):
|
|||
testmod['test.ping']()
|
||||
'''
|
||||
loader = LazyLoader(
|
||||
_module_dirs(opts, mod, u'rawmodule'),
|
||||
_module_dirs(opts, mod, u'module'),
|
||||
opts,
|
||||
tag=u'rawmodule',
|
||||
virtual_enable=False,
|
||||
|
|
175
salt/master.py
175
salt/master.py
|
@ -1311,7 +1311,8 @@ class AESFuncs(object):
|
|||
load.get(u'saltenv', load.get(u'env')),
|
||||
ext=load.get(u'ext'),
|
||||
pillar_override=load.get(u'pillar_override', {}),
|
||||
pillarenv=load.get(u'pillarenv'))
|
||||
pillarenv=load.get(u'pillarenv'),
|
||||
extra_minion_data=load.get(u'extra_minion_data'))
|
||||
data = pillar.compile_pillar()
|
||||
self.fs_.update_opts()
|
||||
if self.opts.get(u'minion_data_cache', False):
|
||||
|
@ -1667,49 +1668,36 @@ class ClearFuncs(object):
|
|||
Send a master control function back to the runner system
|
||||
'''
|
||||
# All runner ops pass through eauth
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
runner_check = self.ckminions.runner_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not runner_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(runner_check, dict) and u'error' in runner_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.runner_check
|
||||
return runner_check
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
clear_load.pop(u'password', None)
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
|
@ -1726,57 +1714,45 @@ class ClearFuncs(object):
|
|||
username)
|
||||
except Exception as exc:
|
||||
log.error(u'Exception occurred while introspecting %s: %s', fun, exc)
|
||||
return dict(error=dict(name=exc.__class__.__name__,
|
||||
args=exc.args,
|
||||
message=str(exc)))
|
||||
return {u'error': {u'name': exc.__class__.__name__,
|
||||
u'args': exc.args,
|
||||
u'message': str(exc)}}
|
||||
|
||||
def wheel(self, clear_load):
|
||||
'''
|
||||
Send a master control function back to the wheel system
|
||||
'''
|
||||
# All wheel ops pass through eauth
|
||||
username = None
|
||||
if u'token' in clear_load:
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(clear_load)
|
||||
if not token:
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=u'Authentication failure of type "token" occurred.'))
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
|
||||
|
||||
# Authorize
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
clear_load[u'eauth'] = token[u'eauth']
|
||||
clear_load[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'TokenAuthenticationError',
|
||||
message=(u'Authentication failure of type "token" occurred for '
|
||||
u'user {0}.').format(token[u'name'])))
|
||||
clear_load.pop(u'token')
|
||||
username = token[u'name']
|
||||
elif u'eauth' in clear_load:
|
||||
if not self.loadauth.authenticate_eauth(clear_load):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
# Authenticate
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
error = auth_check.get(u'error')
|
||||
|
||||
auth_list = self.loadauth.get_auth_list(clear_load)
|
||||
if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})):
|
||||
return dict(error=dict(name=u'EauthAuthenticationError',
|
||||
message=(u'Authentication failure of type "eauth" occurred for '
|
||||
u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN'))))
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
return {u'error': error}
|
||||
|
||||
# No error occurred, consume the password from the clear_load if
|
||||
# passed
|
||||
clear_load.pop(u'password', None)
|
||||
username = clear_load.pop(u'username', u'UNKNOWN')
|
||||
# Authorize
|
||||
username = auth_check.get(u'username')
|
||||
if auth_type != u'user':
|
||||
wheel_check = self.ckminions.wheel_check(
|
||||
auth_check.get(u'auth_list', []),
|
||||
clear_load[u'fun'],
|
||||
clear_load.get(u'kwarg', {})
|
||||
)
|
||||
if not wheel_check:
|
||||
return {u'error': {u'name': err_name,
|
||||
u'message': u'Authentication failure of type "{0}" occurred for '
|
||||
u'user {1}.'.format(auth_type, username)}}
|
||||
elif isinstance(wheel_check, dict) and u'error' in wheel_check:
|
||||
# A dictionary with an error name/message was handled by ckminions.wheel_check
|
||||
return wheel_check
|
||||
|
||||
# No error occurred, consume sensitive settings from the clear_load if passed.
|
||||
for item in sensitive_load_keys:
|
||||
clear_load.pop(item, None)
|
||||
else:
|
||||
if not self.loadauth.authenticate_key(clear_load, self.key):
|
||||
return dict(error=dict(name=u'UserAuthenticationError',
|
||||
message=u'Authentication failure of type "user" occurred'))
|
||||
|
||||
if u'user' in clear_load:
|
||||
username = clear_load[u'user']
|
||||
if salt.auth.AuthUser(username).is_sudo():
|
||||
|
@ -1786,7 +1762,7 @@ class ClearFuncs(object):
|
|||
|
||||
# Authorized. Do the job!
|
||||
try:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(self.opts)
|
||||
fun = clear_load.pop(u'fun')
|
||||
tag = tagify(jid, prefix=u'wheel')
|
||||
data = {u'fun': u"wheel.{0}".format(fun),
|
||||
|
@ -1852,11 +1828,13 @@ class ClearFuncs(object):
|
|||
|
||||
# Retrieve the minions list
|
||||
delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM)
|
||||
minions = self.ckminions.check_minions(
|
||||
_res = self.ckminions.check_minions(
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res.get('minions', list())
|
||||
missing = _res.get('missing', list())
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get(u'token', False):
|
||||
|
@ -1866,12 +1844,7 @@ class ClearFuncs(object):
|
|||
return u''
|
||||
|
||||
# Get acl
|
||||
if self.opts[u'keep_acl_in_token'] and u'auth_list' in token:
|
||||
auth_list = token[u'auth_list']
|
||||
else:
|
||||
extra[u'eauth'] = token[u'eauth']
|
||||
extra[u'username'] = token[u'name']
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
|
@ -1961,7 +1934,7 @@ class ClearFuncs(object):
|
|||
if jid is None:
|
||||
return {u'enc': u'clear',
|
||||
u'load': {u'error': u'Master failed to assign jid'}}
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra)
|
||||
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
|
||||
|
||||
# Send it!
|
||||
self._send_pub(payload)
|
||||
|
@ -1970,10 +1943,29 @@ class ClearFuncs(object):
|
|||
u'enc': u'clear',
|
||||
u'load': {
|
||||
u'jid': clear_load[u'jid'],
|
||||
u'minions': minions
|
||||
u'minions': minions,
|
||||
u'missing': missing
|
||||
}
|
||||
}
|
||||
|
||||
def _prep_auth_info(self, clear_load):
|
||||
sensitive_load_keys = []
|
||||
key = None
|
||||
if u'token' in clear_load:
|
||||
auth_type = u'token'
|
||||
err_name = u'TokenAuthenticationError'
|
||||
sensitive_load_keys = [u'token']
|
||||
elif u'eauth' in clear_load:
|
||||
auth_type = u'eauth'
|
||||
err_name = u'EauthAuthenticationError'
|
||||
sensitive_load_keys = [u'username', u'password']
|
||||
else:
|
||||
auth_type = u'user'
|
||||
err_name = u'UserAuthenticationError'
|
||||
key = self.key
|
||||
|
||||
return auth_type, err_name, key, sensitive_load_keys
|
||||
|
||||
def _prep_jid(self, clear_load, extra):
|
||||
'''
|
||||
Return a jid for this publication
|
||||
|
@ -2007,7 +1999,7 @@ class ClearFuncs(object):
|
|||
chan = salt.transport.server.PubServerChannel.factory(opts)
|
||||
chan.publish(load)
|
||||
|
||||
def _prep_pub(self, minions, jid, clear_load, extra):
|
||||
def _prep_pub(self, minions, jid, clear_load, extra, missing):
|
||||
'''
|
||||
Take a given load and perform the necessary steps
|
||||
to prepare a publication.
|
||||
|
@ -2028,6 +2020,7 @@ class ClearFuncs(object):
|
|||
u'fun': clear_load[u'fun'],
|
||||
u'arg': clear_load[u'arg'],
|
||||
u'minions': minions,
|
||||
u'missing': missing,
|
||||
}
|
||||
|
||||
# Announce the job on the event bus
|
||||
|
|
|
@ -21,6 +21,7 @@ import multiprocessing
|
|||
from random import randint, shuffle
|
||||
from stat import S_IMODE
|
||||
import salt.serializers.msgpack
|
||||
from binascii import crc32
|
||||
|
||||
# Import Salt Libs
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
|
@ -102,6 +103,7 @@ import salt.defaults.exitcodes
|
|||
import salt.cli.daemons
|
||||
import salt.log.setup
|
||||
|
||||
import salt.utils.dictupdate
|
||||
from salt.config import DEFAULT_MINION_OPTS
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.utils.debug import enable_sigusr1_handler
|
||||
|
@ -443,13 +445,30 @@ class MinionBase(object):
|
|||
if opts[u'master_type'] == u'func':
|
||||
eval_master_func(opts)
|
||||
|
||||
# if failover is set, master has to be of type list
|
||||
elif opts[u'master_type'] == u'failover':
|
||||
# if failover or distributed is set, master has to be of type list
|
||||
elif opts[u'master_type'] in (u'failover', u'distributed'):
|
||||
if isinstance(opts[u'master'], list):
|
||||
log.info(
|
||||
u'Got list of available master addresses: %s',
|
||||
opts[u'master']
|
||||
)
|
||||
|
||||
if opts[u'master_type'] == u'distributed':
|
||||
master_len = len(opts[u'master'])
|
||||
if master_len > 1:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
master_idx = crc32(opts[u'id']) % master_len
|
||||
try:
|
||||
preferred_masters = opts[u'master']
|
||||
preferred_masters[0] = opts[u'master'][master_idx]
|
||||
preferred_masters[1:] = [m for m in opts[u'master'] if m != preferred_masters[0]]
|
||||
opts[u'master'] = preferred_masters
|
||||
log.info(u'Distributed to the master at \'{0}\'.'.format(opts[u'master'][0]))
|
||||
except (KeyError, AttributeError, TypeError):
|
||||
log.warning(u'Failed to distribute to a specific master.')
|
||||
else:
|
||||
log.warning(u'master_type = distributed needs more than 1 master.')
|
||||
|
||||
if opts[u'master_shuffle']:
|
||||
if opts[u'master_failback']:
|
||||
secondary_masters = opts[u'master'][1:]
|
||||
|
@ -497,7 +516,7 @@ class MinionBase(object):
|
|||
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
||||
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
|
||||
# See issue 21082 for details
|
||||
if opts[u'retry_dns']:
|
||||
if opts[u'retry_dns'] and opts[u'master_type'] == u'failover':
|
||||
msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
|
||||
u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
|
||||
log.critical(msg)
|
||||
|
@ -845,7 +864,7 @@ class MinionManager(MinionBase):
|
|||
Spawn all the coroutines which will sign in to masters
|
||||
'''
|
||||
masters = self.opts[u'master']
|
||||
if self.opts[u'master_type'] == u'failover' or not isinstance(self.opts[u'master'], list):
|
||||
if (self.opts[u'master_type'] in (u'failover', u'distributed')) or not isinstance(self.opts[u'master'], list):
|
||||
masters = [masters]
|
||||
|
||||
for master in masters:
|
||||
|
@ -1930,6 +1949,10 @@ class Minion(MinionBase):
|
|||
self.beacons.disable_beacon(name)
|
||||
elif func == u'list':
|
||||
self.beacons.list_beacons()
|
||||
elif func == u'list_available':
|
||||
self.beacons.list_available_beacons()
|
||||
elif func == u'validate_beacon':
|
||||
self.beacons.validate_beacon(name, beacon_data)
|
||||
|
||||
def environ_setenv(self, tag, data):
|
||||
'''
|
||||
|
@ -3191,6 +3214,26 @@ class ProxyMinion(Minion):
|
|||
if u'proxy' not in self.opts:
|
||||
self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy']
|
||||
|
||||
if self.opts.get(u'proxy_merge_pillar_in_opts'):
|
||||
# Override proxy opts with pillar data when the user required.
|
||||
self.opts = salt.utils.dictupdate.merge(self.opts,
|
||||
self.opts[u'pillar'],
|
||||
strategy=self.opts.get(u'proxy_merge_pillar_in_opts_strategy'),
|
||||
merge_lists=self.opts.get(u'proxy_deep_merge_pillar_in_opts', False))
|
||||
elif self.opts.get(u'proxy_mines_pillar'):
|
||||
# Even when not required, some details such as mine configuration
|
||||
# should be merged anyway whenever possible.
|
||||
if u'mine_interval' in self.opts[u'pillar']:
|
||||
self.opts[u'mine_interval'] = self.opts[u'pillar'][u'mine_interval']
|
||||
if u'mine_functions' in self.opts[u'pillar']:
|
||||
general_proxy_mines = self.opts.get(u'mine_functions', [])
|
||||
specific_proxy_mines = self.opts[u'pillar'][u'mine_functions']
|
||||
try:
|
||||
self.opts[u'mine_functions'] = general_proxy_mines + specific_proxy_mines
|
||||
except TypeError as terr:
|
||||
log.error(u'Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
|
||||
self.opts[u'id']))
|
||||
|
||||
fq_proxyname = self.opts[u'proxy'][u'proxytype']
|
||||
|
||||
# Need to load the modules so they get all the dunder variables
|
||||
|
|
|
@ -12,6 +12,7 @@ import logging
|
|||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
|
@ -241,4 +242,4 @@ def _read_link(name):
|
|||
Throws an OSError if the link does not exist
|
||||
'''
|
||||
alt_link_path = '/etc/alternatives/{0}'.format(name)
|
||||
return os.readlink(alt_link_path)
|
||||
return salt.utils.path.readlink(alt_link_path)
|
||||
|
|
|
@ -447,11 +447,15 @@ def config(name, config, edit=True):
|
|||
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
|
||||
'''
|
||||
|
||||
configs = []
|
||||
for entry in config:
|
||||
key = next(six.iterkeys(entry))
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configs)
|
||||
return configs
|
||||
configs.append(_parse_config(entry[key], key))
|
||||
|
||||
# Python auto-correct line endings
|
||||
configstext = "\n".join(configs)
|
||||
if edit:
|
||||
with salt.utils.files.fopen(name, 'w') as configfile:
|
||||
configfile.write('# This file is managed by Salt.\n')
|
||||
configfile.write(configstext)
|
||||
return configstext
|
||||
|
|
|
@ -99,7 +99,7 @@ def __virtual__():
|
|||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
'''
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon'):
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon', 'Deepin'):
|
||||
return __virtualname__
|
||||
elif __grains__.get('os_family', False) == 'Cumulus':
|
||||
return __virtualname__
|
||||
|
|
|
@ -14,6 +14,7 @@ import os
|
|||
import yaml
|
||||
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
import salt.utils.event
|
||||
import salt.utils.files
|
||||
from salt.ext.six.moves import map
|
||||
|
@ -69,6 +70,47 @@ def list_(return_yaml=True):
|
|||
return {'beacons': {}}
|
||||
|
||||
|
||||
def list_available(return_yaml=True):
|
||||
'''
|
||||
List the beacons currently available on the minion
|
||||
|
||||
:param return_yaml: Whether to return YAML formatted output, default True
|
||||
:return: List of currently configured Beacons.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' beacons.list_available
|
||||
|
||||
'''
|
||||
beacons = None
|
||||
|
||||
try:
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_available_complete', wait=30)
|
||||
if event_ret and event_ret['complete']:
|
||||
beacons = event_ret['beacons']
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret = {}
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
return ret
|
||||
|
||||
if beacons:
|
||||
if return_yaml:
|
||||
tmp = {'beacons': beacons}
|
||||
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
|
||||
return yaml_out
|
||||
else:
|
||||
return beacons
|
||||
else:
|
||||
return {'beacons': {}}
|
||||
|
||||
|
||||
def add(name, beacon_data, **kwargs):
|
||||
'''
|
||||
Add a beacon on the minion
|
||||
|
@ -95,37 +137,34 @@ def add(name, beacon_data, **kwargs):
|
|||
ret['result'] = True
|
||||
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
|
||||
else:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
try:
|
||||
beacon_module = __import__('salt.beacons.' + name, fromlist=['validate'])
|
||||
log.debug('Successfully imported beacon.')
|
||||
except ImportError:
|
||||
ret['comment'] = 'Beacon {0} does not exist'.format(name)
|
||||
return ret
|
||||
|
||||
# Attempt to validate
|
||||
if hasattr(beacon_module, 'validate'):
|
||||
_beacon_data = beacon_data
|
||||
if 'enabled' in _beacon_data:
|
||||
del _beacon_data['enabled']
|
||||
valid, vcomment = beacon_module.validate(_beacon_data)
|
||||
else:
|
||||
log.info('Beacon {0} does not have a validate'
|
||||
' function, skipping validation.'.format(name))
|
||||
valid = True
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
try:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons')
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'validate_beacon'},
|
||||
'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30)
|
||||
valid = event_ret['valid']
|
||||
vcomment = event_ret['vcomment']
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
|
||||
try:
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'add'}, 'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_add_complete', wait=30)
|
||||
log.debug('=== event_ret {} ==='.format(event_ret))
|
||||
if event_ret and event_ret['complete']:
|
||||
beacons = event_ret['beacons']
|
||||
if name in beacons and beacons[name] == beacon_data:
|
||||
|
@ -165,29 +204,32 @@ def modify(name, beacon_data, **kwargs):
|
|||
ret['result'] = True
|
||||
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
|
||||
else:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
try:
|
||||
beacon_module = __import__('salt.beacons.' + name, fromlist=['validate'])
|
||||
log.debug('Successfully imported beacon.')
|
||||
except ImportError:
|
||||
ret['comment'] = 'Beacon {0} does not exist'.format(name)
|
||||
return ret
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'name': name,
|
||||
'beacon_data': beacon_data,
|
||||
'func': 'validate_beacon'},
|
||||
'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30)
|
||||
valid = event_ret['valid']
|
||||
vcomment = event_ret['vcomment']
|
||||
|
||||
# Attempt to validate
|
||||
if hasattr(beacon_module, 'validate'):
|
||||
_beacon_data = beacon_data
|
||||
if 'enabled' in _beacon_data:
|
||||
del _beacon_data['enabled']
|
||||
valid, vcomment = beacon_module.validate(_beacon_data)
|
||||
else:
|
||||
log.info('Beacon {0} does not have a validate'
|
||||
' function, skipping validation.'.format(name))
|
||||
valid = True
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon modify failed.'
|
||||
|
||||
if not valid:
|
||||
ret['result'] = False
|
||||
ret['comment'] = ('Beacon {0} configuration invalid, '
|
||||
'not adding.\n{1}'.format(name, vcomment))
|
||||
'not modifying.\n{1}'.format(name, vcomment))
|
||||
return ret
|
||||
|
||||
_current = current_beacons[name]
|
||||
|
@ -197,10 +239,14 @@ def modify(name, beacon_data, **kwargs):
|
|||
ret['comment'] = 'Job {0} in correct state'.format(name)
|
||||
return ret
|
||||
|
||||
_current_lines = ['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in sorted(_current.items())]
|
||||
_new_lines = ['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in sorted(_new.items())]
|
||||
_current_lines = []
|
||||
for _item in _current:
|
||||
_current_lines.extend(['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in six.iteritems(_item)])
|
||||
_new_lines = []
|
||||
for _item in _new:
|
||||
_new_lines.extend(['{0}:{1}\n'.format(key, value)
|
||||
for (key, value) in six.iteritems(_item)])
|
||||
_diff = difflib.unified_diff(_current_lines, _new_lines)
|
||||
|
||||
ret['changes'] = {}
|
||||
|
|
462
salt/modules/boto_cloudfront.py
Normal file
462
salt/modules/boto_cloudfront.py
Normal file
|
@ -0,0 +1,462 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Connection module for Amazon CloudFront
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:depends: boto3
|
||||
|
||||
:configuration: This module accepts explicit AWS credentials but can also
|
||||
utilize IAM roles assigned to the instance through Instance Profiles or
|
||||
it can read them from the ~/.aws/credentials file or from these
|
||||
environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY.
|
||||
Dynamic credentials are then automatically obtained from AWS API and no
|
||||
further configuration is necessary. More information available at:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/
|
||||
iam-roles-for-amazon-ec2.html
|
||||
|
||||
http://boto3.readthedocs.io/en/latest/guide/
|
||||
configuration.html#guide-configuration
|
||||
|
||||
If IAM roles are not used you need to specify them either in a pillar or
|
||||
in the minion's config file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
cloudfront.keyid: GKTADJGHEIQSXMKKRBJ08H
|
||||
cloudfront.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
|
||||
A region may also be specified in the configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
cloudfront.region: us-east-1
|
||||
|
||||
If a region is not specified, the default is us-east-1.
|
||||
|
||||
It's also possible to specify key, keyid and region via a profile, either
|
||||
as a passed in dict, or as a string to pull from pillars or minion config:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
myprofile:
|
||||
keyid: GKTADJGHEIQSXMKKRBJ08H
|
||||
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
region: us-east-1
|
||||
'''
|
||||
# keep lint from choking on _get_conn and _cache_id
|
||||
# pylint: disable=E0602
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt libs
|
||||
import salt.ext.six as six
|
||||
from salt.utils.odict import OrderedDict
|
||||
|
||||
import yaml
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
# pylint: disable=unused-import
|
||||
import boto3
|
||||
import botocore
|
||||
# pylint: enable=unused-import
|
||||
logging.getLogger('boto3').setLevel(logging.CRITICAL)
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if boto3 libraries exist.
|
||||
'''
|
||||
if not HAS_BOTO:
|
||||
msg = 'The boto_cloudfront module could not be loaded: {}.'
|
||||
return (False, msg.format('boto3 libraries not found'))
|
||||
__utils__['boto3.assign_funcs'](__name__, 'cloudfront')
|
||||
return True
|
||||
|
||||
|
||||
def _list_distributions(
|
||||
conn,
|
||||
name=None,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None,
|
||||
):
|
||||
'''
|
||||
Private function that returns an iterator over all CloudFront distributions.
|
||||
The caller is responsible for all boto-related error handling.
|
||||
|
||||
name
|
||||
(Optional) Only yield the distribution with the given name
|
||||
'''
|
||||
for dl_ in conn.get_paginator('list_distributions').paginate():
|
||||
distribution_list = dl_['DistributionList']
|
||||
if 'Items' not in distribution_list:
|
||||
# If there are no items, AWS omits the `Items` key for some reason
|
||||
continue
|
||||
for partial_dist in distribution_list['Items']:
|
||||
tags = conn.list_tags_for_resource(Resource=partial_dist['ARN'])
|
||||
tags = dict(
|
||||
(kv['Key'], kv['Value']) for kv in tags['Tags']['Items']
|
||||
)
|
||||
|
||||
id_ = partial_dist['Id']
|
||||
if 'Name' not in tags:
|
||||
log.warning(
|
||||
'CloudFront distribution {0} has no Name tag.'.format(id_),
|
||||
)
|
||||
continue
|
||||
distribution_name = tags.pop('Name', None)
|
||||
if name is not None and distribution_name != name:
|
||||
continue
|
||||
|
||||
# NOTE: list_distributions() returns a DistributionList,
|
||||
# which nominally contains a list of Distribution objects.
|
||||
# However, they are mangled in that they are missing values
|
||||
# (`Logging`, `ActiveTrustedSigners`, and `ETag` keys)
|
||||
# and moreover flatten the normally nested DistributionConfig
|
||||
# attributes to the top level.
|
||||
# Hence, we must call get_distribution() to get the full object,
|
||||
# and we cache these objects to help lessen API calls.
|
||||
distribution = _cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=distribution_name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
if distribution:
|
||||
yield (distribution_name, distribution)
|
||||
continue
|
||||
|
||||
dist_with_etag = conn.get_distribution(Id=id_)
|
||||
distribution = {
|
||||
'distribution': dist_with_etag['Distribution'],
|
||||
'etag': dist_with_etag['ETag'],
|
||||
'tags': tags,
|
||||
}
|
||||
_cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=distribution_name,
|
||||
resource_id=distribution,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
yield (distribution_name, distribution)
|
||||
|
||||
|
||||
def get_distribution(name, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
Get information about a CloudFront distribution (configuration, tags) with a given name.
|
||||
|
||||
name
|
||||
Name of the CloudFront distribution
|
||||
|
||||
region
|
||||
Region to connect to
|
||||
|
||||
key
|
||||
Secret key to use
|
||||
|
||||
keyid
|
||||
Access key to use
|
||||
|
||||
profile
|
||||
A dict with region, key, and keyid,
|
||||
or a pillar key (string) that contains such a dict.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_cloudfront.get_distribution name=mydistribution profile=awsprofile
|
||||
|
||||
'''
|
||||
distribution = _cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
if distribution:
|
||||
return {'result': distribution}
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
try:
|
||||
for _, dist in _list_distributions(
|
||||
conn,
|
||||
name=name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
):
|
||||
# _list_distributions should only return the one distribution
|
||||
# that we want (with the given name).
|
||||
# In case of multiple distributions with the same name tag,
|
||||
# our use of caching means list_distributions will just
|
||||
# return the first one over and over again,
|
||||
# so only the first result is useful.
|
||||
if distribution is not None:
|
||||
msg = 'More than one distribution found with name {0}'
|
||||
return {'error': msg.format(name)}
|
||||
distribution = dist
|
||||
except botocore.exceptions.ClientError as err:
|
||||
return {'error': __utils__['boto3.get_error'](err)}
|
||||
if not distribution:
|
||||
return {'result': None}
|
||||
|
||||
_cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=name,
|
||||
resource_id=distribution,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
return {'result': distribution}
|
||||
|
||||
|
||||
def export_distributions(region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
Get details of all CloudFront distributions.
|
||||
Produces results that can be used to create an SLS file.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call boto_cloudfront.export_distributions --out=txt |\
|
||||
sed "s/local: //" > cloudfront_distributions.sls
|
||||
|
||||
'''
|
||||
results = OrderedDict()
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
try:
|
||||
for name, distribution in _list_distributions(
|
||||
conn,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
):
|
||||
config = distribution['distribution']['DistributionConfig']
|
||||
tags = distribution['tags']
|
||||
|
||||
distribution_sls_data = [
|
||||
{'name': name},
|
||||
{'config': config},
|
||||
{'tags': tags},
|
||||
]
|
||||
results['Manage CloudFront distribution {0}'.format(name)] = {
|
||||
'boto_cloudfront.present': distribution_sls_data,
|
||||
}
|
||||
except botocore.exceptions.ClientError as err:
|
||||
# Raise an exception, as this is meant to be user-invoked at the CLI
|
||||
# as opposed to being called from execution or state modules
|
||||
raise err
|
||||
|
||||
dumper = __utils__['yamldumper.get_dumper']('IndentedSafeOrderedDumper')
|
||||
return yaml.dump(
|
||||
results,
|
||||
default_flow_style=False,
|
||||
Dumper=dumper,
|
||||
)
|
||||
|
||||
|
||||
def create_distribution(
|
||||
name,
|
||||
config,
|
||||
tags=None,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None,
|
||||
):
|
||||
'''
|
||||
Create a CloudFront distribution with the given name, config, and (optionally) tags.
|
||||
|
||||
name
|
||||
Name for the CloudFront distribution
|
||||
|
||||
config
|
||||
Configuration for the distribution
|
||||
|
||||
tags
|
||||
Tags to associate with the distribution
|
||||
|
||||
region
|
||||
Region to connect to
|
||||
|
||||
key
|
||||
Secret key to use
|
||||
|
||||
keyid
|
||||
Access key to use
|
||||
|
||||
profile
|
||||
A dict with region, key, and keyid,
|
||||
or a pillar key (string) that contains such a dict.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_cloudfront.create_distribution name=mydistribution profile=awsprofile \
|
||||
config='{"Comment":"partial configuration","Enabled":true}'
|
||||
'''
|
||||
if tags is None:
|
||||
tags = {}
|
||||
if 'Name' in tags:
|
||||
# Be lenient and silently accept if names match, else error
|
||||
if tags['Name'] != name:
|
||||
return {'error': 'Must not pass `Name` in `tags` but as `name`'}
|
||||
tags['Name'] = name
|
||||
tags = {
|
||||
'Items': [{'Key': k, 'Value': v} for k, v in six.iteritems(tags)]
|
||||
}
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
try:
|
||||
conn.create_distribution_with_tags(
|
||||
DistributionConfigWithTags={
|
||||
'DistributionConfig': config,
|
||||
'Tags': tags,
|
||||
},
|
||||
)
|
||||
_cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=name,
|
||||
invalidate=True,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
except botocore.exceptions.ClientError as err:
|
||||
return {'error': __utils__['boto3.get_error'](err)}
|
||||
|
||||
return {'result': True}
|
||||
|
||||
|
||||
def update_distribution(
|
||||
name,
|
||||
config,
|
||||
tags=None,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
profile=None,
|
||||
):
|
||||
'''
|
||||
Update the config (and optionally tags) for the CloudFront distribution with the given name.
|
||||
|
||||
name
|
||||
Name of the CloudFront distribution
|
||||
|
||||
config
|
||||
Configuration for the distribution
|
||||
|
||||
tags
|
||||
Tags to associate with the distribution
|
||||
|
||||
region
|
||||
Region to connect to
|
||||
|
||||
key
|
||||
Secret key to use
|
||||
|
||||
keyid
|
||||
Access key to use
|
||||
|
||||
profile
|
||||
A dict with region, key, and keyid,
|
||||
or a pillar key (string) that contains such a dict.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \
|
||||
config='{"Comment":"partial configuration","Enabled":true}'
|
||||
'''
|
||||
distribution_ret = get_distribution(
|
||||
name,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile
|
||||
)
|
||||
if 'error' in distribution_result:
|
||||
return distribution_result
|
||||
dist_with_tags = distribution_result['result']
|
||||
|
||||
current_distribution = dist_with_tags['distribution']
|
||||
current_config = current_distribution['DistributionConfig']
|
||||
current_tags = dist_with_tags['tags']
|
||||
etag = dist_with_tags['etag']
|
||||
|
||||
config_diff = __utils__['dictdiffer.deep_diff'](current_config, config)
|
||||
if tags:
|
||||
tags_diff = __utils__['dictdiffer.deep_diff'](current_tags, tags)
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
try:
|
||||
if 'old' in config_diff or 'new' in config_diff:
|
||||
conn.update_distribution(
|
||||
DistributionConfig=config,
|
||||
Id=current_distribution['Id'],
|
||||
IfMatch=etag,
|
||||
)
|
||||
if tags:
|
||||
arn = current_distribution['ARN']
|
||||
if 'new' in tags_diff:
|
||||
tags_to_add = {
|
||||
'Items': [
|
||||
{'Key': k, 'Value': v}
|
||||
for k, v in six.iteritems(tags_diff['new'])
|
||||
],
|
||||
}
|
||||
conn.tag_resource(
|
||||
Resource=arn,
|
||||
Tags=tags_to_add,
|
||||
)
|
||||
if 'old' in tags_diff:
|
||||
tags_to_remove = {
|
||||
'Items': list(tags_diff['old'].keys()),
|
||||
}
|
||||
conn.untag_resource(
|
||||
Resource=arn,
|
||||
TagKeys=tags_to_remove,
|
||||
)
|
||||
except botocore.exceptions.ClientError as err:
|
||||
return {'error': __utils__['boto3.get_error'](err)}
|
||||
finally:
|
||||
_cache_id(
|
||||
'cloudfront',
|
||||
sub_resource=name,
|
||||
invalidate=True,
|
||||
region=region,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
profile=profile,
|
||||
)
|
||||
|
||||
return {'result': True}
|
710
salt/modules/cimc.py
Normal file
710
salt/modules/cimc.py
Normal file
|
@ -0,0 +1,710 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module to provide Cisco UCS compatibility to Salt.
|
||||
|
||||
:codeauthor: :email:`Spencer Ervin <spencer_ervin@hotmail.com>`
|
||||
:maturity: new
|
||||
:depends: none
|
||||
:platform: unix
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
This module accepts connection configuration details either as
|
||||
parameters, or as configuration settings in pillar as a Salt proxy.
|
||||
Options passed into opts will be ignored if options are passed into pillar.
|
||||
|
||||
.. seealso::
|
||||
:prox:`Cisco UCS Proxy Module <salt.proxy.cimc>`
|
||||
|
||||
About
|
||||
=====
|
||||
This execution module was designed to handle connections to a Cisco UCS server. This module adds support to send
|
||||
connections directly to the device through the rest API.
|
||||
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.platform
|
||||
import salt.proxy.cimc
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'cimc'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Will load for the cimc proxy minions.
|
||||
'''
|
||||
try:
|
||||
if salt.utils.platform.is_proxy() and \
|
||||
__opts__['proxy']['proxytype'] == 'cimc':
|
||||
return __virtualname__
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return False, 'The cimc execution module can only be loaded for cimc proxy minions.'
|
||||
|
||||
|
||||
def activate_backup_image(reset=False):
|
||||
'''
|
||||
Activates the firmware backup image.
|
||||
|
||||
CLI Example:
|
||||
|
||||
Args:
|
||||
reset(bool): Reset the CIMC device on activate.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.activate_backup_image
|
||||
salt '*' cimc.activate_backup_image reset=True
|
||||
|
||||
'''
|
||||
|
||||
dn = "sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined"
|
||||
|
||||
r = "no"
|
||||
|
||||
if reset is True:
|
||||
r = "yes"
|
||||
|
||||
inconfig = """<firmwareBootUnit dn='sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined'
|
||||
adminState='trigger' image='backup' resetOnActivate='{0}' />""".format(r)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def create_user(uid=None, username=None, password=None, priv=None):
|
||||
'''
|
||||
Create a CIMC user with username and password.
|
||||
|
||||
Args:
|
||||
uid(int): The user ID slot to create the user account in.
|
||||
|
||||
username(str): The name of the user.
|
||||
|
||||
password(str): The clear text password of the user.
|
||||
|
||||
priv(str): The privilege level of the user.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.create_user 11 username=admin password=foobar priv=admin
|
||||
|
||||
'''
|
||||
|
||||
if not uid:
|
||||
raise salt.exceptions.CommandExecutionError("The user ID must be specified.")
|
||||
|
||||
if not username:
|
||||
raise salt.exceptions.CommandExecutionError("The username must be specified.")
|
||||
|
||||
if not password:
|
||||
raise salt.exceptions.CommandExecutionError("The password must be specified.")
|
||||
|
||||
if not priv:
|
||||
raise salt.exceptions.CommandExecutionError("The privilege level must be specified.")
|
||||
|
||||
dn = "sys/user-ext/user-{0}".format(uid)
|
||||
|
||||
inconfig = """<aaaUser id="{0}" accountStatus="active" name="{1}" priv="{2}"
|
||||
pwd="{3}" dn="sys/user-ext/user-{0}"/>""".format(uid,
|
||||
username,
|
||||
priv,
|
||||
password)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_bios_defaults():
|
||||
'''
|
||||
Get the default values of BIOS tokens.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_bios_defaults
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('biosPlatformDefaults', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_bios_settings():
|
||||
'''
|
||||
Get the C240 server BIOS token values.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_bios_settings
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('biosSettings', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_boot_order():
|
||||
'''
|
||||
Retrieves the configured boot order table.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_boot_order
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('lsbootDef', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_cpu_details():
|
||||
'''
|
||||
Get the CPU product ID details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_cpu_details
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogCpu', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_disks():
|
||||
'''
|
||||
Get the HDD product ID details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_disks
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogHdd', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_ethernet_interfaces():
|
||||
'''
|
||||
Get the adapter Ethernet interface details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_ethernet_interfaces
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('adaptorHostEthIf', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_fibre_channel_interfaces():
|
||||
'''
|
||||
Get the adapter fibre channel interface details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_fibre_channel_interfaces
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('adaptorHostFcIf', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_firmware():
|
||||
'''
|
||||
Retrieves the current running firmware versions of server components.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_firmware
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('firmwareRunning', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_ldap():
|
||||
'''
|
||||
Retrieves LDAP server details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_ldap
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('aaaLdap', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_management_interface():
|
||||
'''
|
||||
Retrieve the management interface details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_management_interface
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('mgmtIf', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_memory_token():
|
||||
'''
|
||||
Get the memory RAS BIOS token.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_memory_token
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('biosVfSelectMemoryRASConfiguration', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_memory_unit():
|
||||
'''
|
||||
Get the IMM/Memory unit product ID details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_memory_unit
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogDimm', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_network_adapters():
|
||||
'''
|
||||
Get the list of network adapaters and configuration details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_network_adapters
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('networkAdapterEthIf', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_ntp():
|
||||
'''
|
||||
Retrieves the current running NTP configuration.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_ntp
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('commNtpProvider', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_pci_adapters():
|
||||
'''
|
||||
Get the PCI adapter product ID details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_disks
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogPCIAdapter', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_power_supplies():
|
||||
'''
|
||||
Retrieves the power supply unit details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_power_supplies
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('equipmentPsu', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_snmp_config():
|
||||
'''
|
||||
Get the snmp configuration details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_snmp_config
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('commSnmp', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_syslog():
|
||||
'''
|
||||
Get the Syslog client-server details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_syslog
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('commSyslogClient', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_system_info():
|
||||
'''
|
||||
Get the system information.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_system_info
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('computeRackUnit', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_users():
|
||||
'''
|
||||
Get the CIMC users.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_users
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('aaaUser', False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_vic_adapters():
|
||||
'''
|
||||
Get the VIC adapter general profile details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_vic_adapters
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('adaptorGenProfile', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def get_vic_uplinks():
|
||||
'''
|
||||
Get the VIC adapter uplink port details.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.get_vic_uplinks
|
||||
|
||||
'''
|
||||
ret = __proxy__['cimc.get_config_resolver_class']('adaptorExtEthIf', True)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def mount_share(name=None,
|
||||
remote_share=None,
|
||||
remote_file=None,
|
||||
mount_type="nfs",
|
||||
username=None,
|
||||
password=None):
|
||||
'''
|
||||
Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater.
|
||||
The remote share can be either NFS, CIFS, or WWW.
|
||||
|
||||
Some of the advantages of CIMC Mounted vMedia include:
|
||||
Communication between mounted media and target stays local (inside datacenter)
|
||||
Media mounts can be scripted/automated
|
||||
No vKVM requirements for media connection
|
||||
Multiple share types supported
|
||||
Connections supported through all CIMC interfaces
|
||||
|
||||
Note: CIMC Mounted vMedia is enabled through BIOS configuration.
|
||||
|
||||
Args:
|
||||
name(str): The name of the volume on the CIMC device.
|
||||
|
||||
remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This
|
||||
must be the directory path and not the full path to the remote file.
|
||||
|
||||
remote_file(str): The name of the remote file to mount. It must reside within remote_share.
|
||||
|
||||
mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www.
|
||||
|
||||
username(str): An optional requirement to pass credentials to the remote share. If not provided, an
|
||||
unauthenticated connection attempt will be made.
|
||||
|
||||
password(str): An optional requirement to pass a password to the remote share. If not provided, an
|
||||
unauthenticated connection attempt will be made.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso
|
||||
|
||||
salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword
|
||||
|
||||
'''
|
||||
|
||||
if not name:
|
||||
raise salt.exceptions.CommandExecutionError("The share name must be specified.")
|
||||
|
||||
if not remote_share:
|
||||
raise salt.exceptions.CommandExecutionError("The remote share path must be specified.")
|
||||
|
||||
if not remote_file:
|
||||
raise salt.exceptions.CommandExecutionError("The remote file name must be specified.")
|
||||
|
||||
if username and password:
|
||||
mount_options = " mountOptions='username={0},password={1}'".format(username, password)
|
||||
else:
|
||||
mount_options = ""
|
||||
|
||||
dn = 'sys/svc-ext/vmedia-svc/vmmap-{0}'.format(name)
|
||||
inconfig = """<commVMediaMap dn='sys/svc-ext/vmedia-svc/vmmap-{0}' map='{1}'{2}
|
||||
remoteFile='{3}' remoteShare='{4}' status='created'
|
||||
volumeName='Win12' />""".format(name, mount_type, mount_options, remote_file, remote_share)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def reboot():
|
||||
'''
|
||||
Power cycling the server.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.reboot
|
||||
|
||||
'''
|
||||
|
||||
dn = "sys/rack-unit-1"
|
||||
|
||||
inconfig = """<computeRackUnit adminPower="cycle-immediate" dn="sys/rack-unit-1"></computeRackUnit>"""
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_ntp_server(server1='', server2='', server3='', server4=''):
|
||||
'''
|
||||
Sets the NTP servers configuration. This will also enable the client NTP service.
|
||||
|
||||
Args:
|
||||
server1(str): The first IP address or FQDN of the NTP servers.
|
||||
|
||||
server2(str): The second IP address or FQDN of the NTP servers.
|
||||
|
||||
server3(str): The third IP address or FQDN of the NTP servers.
|
||||
|
||||
server4(str): The fourth IP address or FQDN of the NTP servers.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.set_ntp_server 10.10.10.1
|
||||
|
||||
salt '*' cimc.set_ntp_server 10.10.10.1 foo.bar.com
|
||||
|
||||
'''
|
||||
|
||||
dn = "sys/svc-ext/ntp-svc"
|
||||
inconfig = """<commNtpProvider dn="sys/svc-ext/ntp-svc" ntpEnable="yes" ntpServer1="{0}" ntpServer2="{1}"
|
||||
ntpServer3="{2}" ntpServer4="{3}"/>""".format(server1, server2, server3, server4)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_syslog_server(server=None, type="primary"):
|
||||
'''
|
||||
Set the SYSLOG server on the host.
|
||||
|
||||
Args:
|
||||
server(str): The hostname or IP address of the SYSLOG server.
|
||||
|
||||
type(str): Specifies the type of SYSLOG server. This can either be primary (default) or secondary.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.set_syslog_server foo.bar.com
|
||||
|
||||
salt '*' cimc.set_syslog_server foo.bar.com primary
|
||||
|
||||
salt '*' cimc.set_syslog_server foo.bar.com secondary
|
||||
|
||||
'''
|
||||
|
||||
if not server:
|
||||
raise salt.exceptions.CommandExecutionError("The SYSLOG server must be specified.")
|
||||
|
||||
if type == "primary":
|
||||
dn = "sys/svc-ext/syslog/client-primary"
|
||||
inconfig = """<commSyslogClient name='primary' adminState='enabled' hostname='{0}'
|
||||
dn='sys/svc-ext/syslog/client-primary'> </commSyslogClient>""".format(server)
|
||||
elif type == "secondary":
|
||||
dn = "sys/svc-ext/syslog/client-secondary"
|
||||
inconfig = """<commSyslogClient name='secondary' adminState='enabled' hostname='{0}'
|
||||
dn='sys/svc-ext/syslog/client-secondary'> </commSyslogClient>""".format(server)
|
||||
else:
|
||||
raise salt.exceptions.CommandExecutionError("The SYSLOG type must be either primary or secondary.")
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def tftp_update_bios(server=None, path=None):
|
||||
'''
|
||||
Update the BIOS firmware through TFTP.
|
||||
|
||||
Args:
|
||||
server(str): The IP address or hostname of the TFTP server.
|
||||
|
||||
path(str): The TFTP path and filename for the BIOS image.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.tftp_update_bios foo.bar.com HP-SL2.cap
|
||||
|
||||
'''
|
||||
|
||||
if not server:
|
||||
raise salt.exceptions.CommandExecutionError("The server name must be specified.")
|
||||
|
||||
if not path:
|
||||
raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.")
|
||||
|
||||
dn = "sys/rack-unit-1/bios/fw-updatable"
|
||||
|
||||
inconfig = """<firmwareUpdatable adminState='trigger' dn='sys/rack-unit-1/bios/fw-updatable'
|
||||
protocol='tftp' remoteServer='{0}' remotePath='{1}'
|
||||
type='blade-bios' />""".format(server, path)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def tftp_update_cimc(server=None, path=None):
|
||||
'''
|
||||
Update the CIMC firmware through TFTP.
|
||||
|
||||
Args:
|
||||
server(str): The IP address or hostname of the TFTP server.
|
||||
|
||||
path(str): The TFTP path and filename for the CIMC image.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cimc.tftp_update_cimc foo.bar.com HP-SL2.bin
|
||||
|
||||
'''
|
||||
|
||||
if not server:
|
||||
raise salt.exceptions.CommandExecutionError("The server name must be specified.")
|
||||
|
||||
if not path:
|
||||
raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.")
|
||||
|
||||
dn = "sys/rack-unit-1/mgmt/fw-updatable"
|
||||
|
||||
inconfig = """<firmwareUpdatable adminState='trigger' dn='sys/rack-unit-1/mgmt/fw-updatable'
|
||||
protocol='tftp' remoteServer='{0}' remotePath='{1}'
|
||||
type='blade-controller' />""".format(server, path)
|
||||
|
||||
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
|
||||
|
||||
return ret
|
|
@ -2120,12 +2120,7 @@ def script(source,
|
|||
)
|
||||
|
||||
if '__env__' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'__env__\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('__env__')
|
||||
|
||||
if salt.utils.platform.is_windows() and runas and cwd is None:
|
||||
|
@ -2336,12 +2331,7 @@ def script_retcode(source,
|
|||
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
|
||||
'''
|
||||
if '__env__' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'__env__\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('__env__')
|
||||
|
||||
return script(source=source,
|
||||
|
|
|
@ -186,12 +186,7 @@ def set_file(path, saltenv='base', **kwargs):
|
|||
salt '*' debconf.set_file salt://pathto/pkg.selections
|
||||
'''
|
||||
if '__env__' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'__env__\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('__env__')
|
||||
|
||||
path = __salt__['cp.cache_file'](path, saltenv)
|
||||
|
|
|
@ -2038,19 +2038,12 @@ def build_network_settings(**settings):
|
|||
# Write settings
|
||||
_write_file_network(network, _DEB_NETWORKING_FILE, True)
|
||||
|
||||
# Write hostname to /etc/hostname
|
||||
# Get hostname and domain from opts
|
||||
sline = opts['hostname'].split('.', 1)
|
||||
opts['hostname'] = sline[0]
|
||||
hostname = '{0}\n' . format(opts['hostname'])
|
||||
current_domainname = current_network_settings['domainname']
|
||||
current_searchdomain = current_network_settings['searchdomain']
|
||||
|
||||
# Only write the hostname if it has changed
|
||||
if not opts['hostname'] == current_network_settings['hostname']:
|
||||
if not ('test' in settings and settings['test']):
|
||||
# TODO replace wiht a call to network.mod_hostname instead
|
||||
_write_file_network(hostname, _DEB_HOSTNAME_FILE)
|
||||
|
||||
new_domain = False
|
||||
if len(sline) > 1:
|
||||
new_domainname = sline[1]
|
||||
|
|
|
@ -234,6 +234,7 @@ except ImportError:
|
|||
# pylint: enable=import-error
|
||||
|
||||
HAS_NSENTER = bool(salt.utils.path.which('nsenter'))
|
||||
HUB_PREFIX = 'docker.io/'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -1489,6 +1490,43 @@ def list_tags():
|
|||
return sorted(ret)
|
||||
|
||||
|
||||
def resolve_tag(name, tags=None):
|
||||
'''
|
||||
.. versionadded:: 2017.7.2,Oxygen
|
||||
|
||||
Given an image tag, check the locally-pulled tags (using
|
||||
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>`) and return
|
||||
the matching tag. This helps disambiguate differences on some platforms
|
||||
where images from the Docker Hub are prefixed with ``docker.io/``. If an
|
||||
image name with no tag is passed, a tag of ``latest`` is assumed.
|
||||
|
||||
If the specified image is not pulled locally, this function will return
|
||||
``False``.
|
||||
|
||||
tags
|
||||
An optional Python list of tags to check against. If passed, then
|
||||
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>` will not
|
||||
be run to get a list of tags. This is useful when resolving a number of
|
||||
tags at the same time.
|
||||
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.resolve_tag busybox
|
||||
salt myminion docker.resolve_tag busybox:latest
|
||||
'''
|
||||
tag_name = ':'.join(salt.utils.docker.get_repo_tag(name))
|
||||
if tags is None:
|
||||
tags = list_tags()
|
||||
if tag_name in tags:
|
||||
return tag_name
|
||||
full_name = HUB_PREFIX + tag_name
|
||||
if not name.startswith(HUB_PREFIX) and full_name in tags:
|
||||
return full_name
|
||||
return False
|
||||
|
||||
|
||||
def logs(name):
|
||||
'''
|
||||
Returns the logs for the container. Equivalent to running the ``docker
|
||||
|
@ -5420,7 +5458,7 @@ def sls(name, mods=None, saltenv='base', **kwargs):
|
|||
)
|
||||
if not isinstance(ret, dict):
|
||||
__context__['retcode'] = 1
|
||||
elif not salt.utils.check_state_result(ret):
|
||||
elif not __utils__['state.check_result'](ret):
|
||||
__context__['retcode'] = 2
|
||||
else:
|
||||
__context__['retcode'] = 0
|
||||
|
@ -5494,7 +5532,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
|
|||
# Now execute the state into the container
|
||||
ret = sls(id_, mods, saltenv, **kwargs)
|
||||
# fail if the state was not successful
|
||||
if not dryrun and not salt.utils.check_state_result(ret):
|
||||
if not dryrun and not __utils__['state.check_result'](ret):
|
||||
raise CommandExecutionError(ret)
|
||||
if dryrun is False:
|
||||
ret = commit(id_, name)
|
||||
|
|
|
@ -75,9 +75,20 @@ def _porttree():
|
|||
|
||||
|
||||
def _p_to_cp(p):
|
||||
ret = _porttree().dbapi.xmatch("match-all", p)
|
||||
if ret:
|
||||
return portage.cpv_getkey(ret[0])
|
||||
try:
|
||||
ret = portage.dep_getkey(p)
|
||||
if ret:
|
||||
return ret
|
||||
except portage.exception.InvalidAtom:
|
||||
pass
|
||||
|
||||
try:
|
||||
ret = _porttree().dbapi.xmatch('bestmatch-visible', p)
|
||||
if ret:
|
||||
return portage.dep_getkey(ret)
|
||||
except portage.exception.InvalidAtom:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
@ -91,11 +102,14 @@ def _allnodes():
|
|||
|
||||
|
||||
def _cpv_to_cp(cpv):
|
||||
ret = portage.cpv_getkey(cpv)
|
||||
if ret:
|
||||
return ret
|
||||
else:
|
||||
return cpv
|
||||
try:
|
||||
ret = portage.dep_getkey(cpv)
|
||||
if ret:
|
||||
return ret
|
||||
except portage.exception.InvalidAtom:
|
||||
pass
|
||||
|
||||
return cpv
|
||||
|
||||
|
||||
def _cpv_to_version(cpv):
|
||||
|
|
29
salt/modules/esxcluster.py
Normal file
29
salt/modules/esxcluster.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module used to access the esxcluster proxy connection methods
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
import salt.utils.platform
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__proxyenabled__ = ['esxcluster']
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'esxcluster'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on proxy
|
||||
'''
|
||||
if salt.utils.platform.is_proxy():
|
||||
return __virtualname__
|
||||
return (False, 'Must be run on a proxy minion')
|
||||
|
||||
|
||||
def get_details():
|
||||
return __proxy__['esxcluster.get_details']()
|
|
@ -126,8 +126,8 @@ def _binary_replace(old, new):
|
|||
This function should only be run AFTER it has been determined that the
|
||||
files differ.
|
||||
'''
|
||||
old_isbin = not salt.utils.istextfile(old)
|
||||
new_isbin = not salt.utils.istextfile(new)
|
||||
old_isbin = not __utils__['files.is_text_file'](old)
|
||||
new_isbin = not __utils__['files.is_text_file'](new)
|
||||
if any((old_isbin, new_isbin)):
|
||||
if all((old_isbin, new_isbin)):
|
||||
return u'Replace binary file'
|
||||
|
@ -1436,7 +1436,7 @@ def comment_line(path,
|
|||
raise SaltInvocationError('File not found: {0}'.format(path))
|
||||
|
||||
# Make sure it is a text file
|
||||
if not salt.utils.istextfile(path):
|
||||
if not __utils__['files.is_text_file'](path):
|
||||
raise SaltInvocationError(
|
||||
'Cannot perform string replacements on a binary file: {0}'.format(path))
|
||||
|
||||
|
@ -2180,7 +2180,7 @@ def replace(path,
|
|||
else:
|
||||
raise SaltInvocationError('File not found: {0}'.format(path))
|
||||
|
||||
if not salt.utils.istextfile(path):
|
||||
if not __utils__['files.is_text_file'](path):
|
||||
raise SaltInvocationError(
|
||||
'Cannot perform string replacements on a binary file: {0}'
|
||||
.format(path)
|
||||
|
@ -2497,7 +2497,7 @@ def blockreplace(path,
|
|||
'Only one of append and prepend_if_not_found is permitted'
|
||||
)
|
||||
|
||||
if not salt.utils.istextfile(path):
|
||||
if not __utils__['files.is_text_file'](path):
|
||||
raise SaltInvocationError(
|
||||
'Cannot perform string replacements on a binary file: {0}'
|
||||
.format(path)
|
||||
|
|
|
@ -17,11 +17,12 @@ except ImportError:
|
|||
from pipes import quote as _cmd_quote
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.path
|
||||
import salt.utils.yast
|
||||
import salt.utils.preseed
|
||||
import salt.utils.kickstart
|
||||
import salt.syspaths
|
||||
import salt.utils.kickstart
|
||||
import salt.utils.path
|
||||
import salt.utils.preseed
|
||||
import salt.utils.validate.path
|
||||
import salt.utils.yast
|
||||
from salt.exceptions import SaltInvocationError
|
||||
|
||||
# Import 3rd-party libs
|
||||
|
@ -399,10 +400,15 @@ def _bootstrap_deb(
|
|||
if repo_url is None:
|
||||
repo_url = 'http://ftp.debian.org/debian/'
|
||||
|
||||
if not salt.utils.which('debootstrap'):
|
||||
if not salt.utils.path.which('debootstrap'):
|
||||
log.error('Required tool debootstrap is not installed.')
|
||||
return False
|
||||
|
||||
if static_qemu and not salt.utils.validate.path.is_executable(static_qemu):
|
||||
log.error('Required tool qemu not '
|
||||
'present/readable at: {0}'.format(static_qemu))
|
||||
return False
|
||||
|
||||
if isinstance(pkgs, (list, tuple)):
|
||||
pkgs = ','.join(pkgs)
|
||||
if isinstance(exclude_pkgs, (list, tuple)):
|
||||
|
@ -427,11 +433,13 @@ def _bootstrap_deb(
|
|||
|
||||
__salt__['cmd.run'](deb_args, python_shell=False)
|
||||
|
||||
__salt__['cmd.run'](
|
||||
'cp {qemu} {root}/usr/bin/'.format(
|
||||
qemu=_cmd_quote(static_qemu), root=_cmd_quote(root)
|
||||
if static_qemu:
|
||||
__salt__['cmd.run'](
|
||||
'cp {qemu} {root}/usr/bin/'.format(
|
||||
qemu=_cmd_quote(static_qemu), root=_cmd_quote(root)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
env = {'DEBIAN_FRONTEND': 'noninteractive',
|
||||
'DEBCONF_NONINTERACTIVE_SEEN': 'true',
|
||||
'LC_ALL': 'C',
|
||||
|
|
|
@ -31,7 +31,7 @@ def __virtual__():
|
|||
if __grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'):
|
||||
return __virtualname__
|
||||
return (False, 'The groupadd execution module cannot be loaded: '
|
||||
' only available on Linux, OpenBSD and NetBSD')
|
||||
' only available on Linux, OpenBSD and NetBSD')
|
||||
|
||||
|
||||
def add(name, gid=None, system=False, root=None):
|
||||
|
@ -44,12 +44,12 @@ def add(name, gid=None, system=False, root=None):
|
|||
|
||||
salt '*' group.add foo 3456
|
||||
'''
|
||||
cmd = 'groupadd '
|
||||
cmd = ['groupadd']
|
||||
if gid:
|
||||
cmd += '-g {0} '.format(gid)
|
||||
cmd.append('-g {0}'.format(gid))
|
||||
if system and __grains__['kernel'] != 'OpenBSD':
|
||||
cmd += '-r '
|
||||
cmd += name
|
||||
cmd.append('-r')
|
||||
cmd.append(name)
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -69,7 +69,7 @@ def delete(name, root=None):
|
|||
|
||||
salt '*' group.delete foo
|
||||
'''
|
||||
cmd = ('groupdel', name)
|
||||
cmd = ['groupdel', name]
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -140,7 +140,7 @@ def chgid(name, gid, root=None):
|
|||
pre_gid = __salt__['file.group_to_gid'](name)
|
||||
if gid == pre_gid:
|
||||
return True
|
||||
cmd = ('groupmod', '-g', gid, name)
|
||||
cmd = ['groupmod', '-g', gid, name]
|
||||
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
@ -170,15 +170,15 @@ def adduser(name, username, root=None):
|
|||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-a', username, name)
|
||||
cmd = ['gpasswd', '-a', username, name]
|
||||
elif on_suse_11:
|
||||
cmd = ('usermod', '-A', name, username)
|
||||
cmd = ['usermod', '-A', name, username]
|
||||
else:
|
||||
cmd = ('gpasswd', '--add', username, name)
|
||||
cmd = ['gpasswd', '--add', username, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-Q', root))
|
||||
else:
|
||||
cmd = ('usermod', '-G', name, username)
|
||||
cmd = ['usermod', '-G', name, username]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
|
||||
|
@ -208,20 +208,20 @@ def deluser(name, username, root=None):
|
|||
if username in grp_info['members']:
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-d', username, name)
|
||||
cmd = ['gpasswd', '-d', username, name]
|
||||
elif on_suse_11:
|
||||
cmd = ('usermod', '-R', name, username)
|
||||
cmd = ['usermod', '-R', name, username]
|
||||
else:
|
||||
cmd = ('gpasswd', '--del', username, name)
|
||||
cmd = ['gpasswd', '--del', username, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
elif __grains__['kernel'] == 'OpenBSD':
|
||||
out = __salt__['cmd.run_stdout']('id -Gn {0}'.format(username),
|
||||
python_shell=False)
|
||||
cmd = 'usermod -S '
|
||||
cmd += ','.join([g for g in out.split() if g != str(name)])
|
||||
cmd += ' {0}'.format(username)
|
||||
cmd = ['usermod', '-S']
|
||||
cmd.append(','.join([g for g in out.split() if g != str(name)]))
|
||||
cmd.append('{0}'.format(username))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
else:
|
||||
log.error('group.deluser is not yet supported on this platform')
|
||||
|
@ -249,13 +249,13 @@ def members(name, members_list, root=None):
|
|||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
if on_redhat_5:
|
||||
cmd = ('gpasswd', '-M', members_list, name)
|
||||
cmd = ['gpasswd', '-M', members_list, name]
|
||||
elif on_suse_11:
|
||||
for old_member in __salt__['group.info'](name).get('members'):
|
||||
__salt__['cmd.run']('groupmod -R {0} {1}'.format(old_member, name), python_shell=False)
|
||||
cmd = ('groupmod', '-A', members_list, name)
|
||||
cmd = ['groupmod', '-A', members_list, name]
|
||||
else:
|
||||
cmd = ('gpasswd', '--members', members_list, name)
|
||||
cmd = ['gpasswd', '--members', members_list, name]
|
||||
if root is not None:
|
||||
cmd.extend(('-R', root))
|
||||
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
|
||||
|
@ -270,7 +270,7 @@ def members(name, members_list, root=None):
|
|||
for user in members_list.split(","):
|
||||
if user:
|
||||
retcode = __salt__['cmd.retcode'](
|
||||
'usermod -G {0} {1}'.format(name, user),
|
||||
['usermod', '-G', name, user],
|
||||
python_shell=False)
|
||||
if not retcode == 0:
|
||||
break
|
||||
|
|
|
@ -318,17 +318,18 @@ class _Section(OrderedDict):
|
|||
yield '{0}[{1}]{0}'.format(os.linesep, self.name)
|
||||
sections_dict = OrderedDict()
|
||||
for name, value in six.iteritems(self):
|
||||
# Handle Comment Lines
|
||||
if com_regx.match(name):
|
||||
yield '{0}{1}'.format(value, os.linesep)
|
||||
# Handle Sections
|
||||
elif isinstance(value, _Section):
|
||||
sections_dict.update({name: value})
|
||||
# Key / Value pairs
|
||||
# Adds spaces between the separator
|
||||
else:
|
||||
yield '{0}{1}{2}{3}'.format(
|
||||
name,
|
||||
(
|
||||
' {0} '.format(self.sep) if self.sep != ' '
|
||||
else self.sep
|
||||
),
|
||||
' {0} '.format(self.sep) if self.sep != ' ' else self.sep,
|
||||
value,
|
||||
os.linesep
|
||||
)
|
||||
|
|
|
@ -1463,6 +1463,8 @@ def _parser():
|
|||
add_arg('--or-mark', dest='or-mark', action='append')
|
||||
add_arg('--xor-mark', dest='xor-mark', action='append')
|
||||
add_arg('--set-mark', dest='set-mark', action='append')
|
||||
add_arg('--nfmask', dest='nfmask', action='append')
|
||||
add_arg('--ctmask', dest='ctmask', action='append')
|
||||
## CONNSECMARK
|
||||
add_arg('--save', dest='save', action='append')
|
||||
add_arg('--restore', dest='restore', action='append')
|
||||
|
|
|
@ -159,7 +159,7 @@ def vgdisplay(vgname=''):
|
|||
return ret
|
||||
|
||||
|
||||
def lvdisplay(lvname=''):
|
||||
def lvdisplay(lvname='', quiet=False):
|
||||
'''
|
||||
Return information about the logical volume(s)
|
||||
|
||||
|
@ -174,7 +174,10 @@ def lvdisplay(lvname=''):
|
|||
cmd = ['lvdisplay', '-c']
|
||||
if lvname:
|
||||
cmd.append(lvname)
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
if quiet:
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet')
|
||||
else:
|
||||
cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if cmd_ret['retcode'] != 0:
|
||||
return {}
|
||||
|
|
|
@ -204,7 +204,7 @@ def send(func, *args, **kwargs):
|
|||
if mine_func not in __salt__:
|
||||
return False
|
||||
data = {}
|
||||
arg_data = salt.utils.arg_lookup(__salt__[mine_func])
|
||||
arg_data = salt.utils.args.arg_lookup(__salt__[mine_func])
|
||||
func_data = copy.deepcopy(kwargs)
|
||||
for ind, _ in enumerate(arg_data.get('args', [])):
|
||||
try:
|
||||
|
|
|
@ -1282,6 +1282,7 @@ def load_template(template_name,
|
|||
template_user='root',
|
||||
template_group='root',
|
||||
template_mode='755',
|
||||
template_attrs='--------------e----',
|
||||
saltenv=None,
|
||||
template_engine='jinja',
|
||||
skip_verify=False,
|
||||
|
@ -1368,11 +1369,16 @@ def load_template(template_name,
|
|||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
template_user: 755
|
||||
template_mode: 755
|
||||
Permissions of file.
|
||||
|
||||
.. versionadded:: 2016.11.2
|
||||
|
||||
template_attrs: "--------------e----"
|
||||
attributes of file. (see `man lsattr`)
|
||||
|
||||
.. versionadded:: oxygen
|
||||
|
||||
saltenv: base
|
||||
Specifies the template environment.
|
||||
This will influence the relative imports inside the templates.
|
||||
|
@ -1586,6 +1592,7 @@ def load_template(template_name,
|
|||
user=template_user,
|
||||
group=template_group,
|
||||
mode=template_mode,
|
||||
attrs=template_attrs,
|
||||
template=template_engine,
|
||||
context=template_vars,
|
||||
defaults=defaults,
|
||||
|
|
|
@ -145,6 +145,8 @@ def reload_exports():
|
|||
output = __salt__['cmd.run_all'](command)
|
||||
ret['stdout'] = output['stdout']
|
||||
ret['stderr'] = output['stderr']
|
||||
ret['result'] = not output['retcode']
|
||||
# exportfs always returns 0, so retcode is useless
|
||||
# We will consider it an error if stderr is nonempty
|
||||
ret['result'] = output['stderr'] == ''
|
||||
|
||||
return ret
|
||||
|
|
|
@ -183,10 +183,10 @@ def _get_service_info(service):
|
|||
except Exception as exc:
|
||||
log.warning('Unable to get IPv6 {0} for service {1}\n'.format(info, service))
|
||||
|
||||
domains = []
|
||||
for x in service_info.get_property('Domains'):
|
||||
domains.append(str(x))
|
||||
data['ipv4']['dns'] = domains
|
||||
nameservers = []
|
||||
for x in service_info.get_property('Nameservers'):
|
||||
nameservers.append(str(x))
|
||||
data['ipv4']['dns'] = nameservers
|
||||
else:
|
||||
data['up'] = False
|
||||
|
||||
|
@ -351,13 +351,13 @@ def set_dhcp_linklocal_all(interface):
|
|||
ipv4['Gateway'] = dbus.String('', variant_level=1)
|
||||
try:
|
||||
service.set_property('IPv4.Configuration', ipv4)
|
||||
service.set_property('Domains.Configuration', ['']) # reset domains list
|
||||
service.set_property('Nameservers.Configuration', ['']) # reset nameservers list
|
||||
except Exception as exc:
|
||||
raise salt.exceptions.CommandExecutionError('Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc))
|
||||
return True
|
||||
|
||||
|
||||
def set_static_all(interface, address, netmask, gateway, domains):
|
||||
def set_static_all(interface, address, netmask, gateway, nameservers):
|
||||
'''
|
||||
Configure specified adapter to use ipv4 manual settings
|
||||
|
||||
|
@ -365,7 +365,7 @@ def set_static_all(interface, address, netmask, gateway, domains):
|
|||
:param str address: ipv4 address
|
||||
:param str netmask: ipv4 netmask
|
||||
:param str gateway: ipv4 gateway
|
||||
:param str domains: list of domains servers separated by spaces
|
||||
:param str nameservers: list of nameservers servers separated by spaces
|
||||
:return: True if the settings were applied, otherwise an exception will be thrown.
|
||||
:rtype: bool
|
||||
|
||||
|
@ -373,7 +373,7 @@ def set_static_all(interface, address, netmask, gateway, domains):
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' ip.set_static_all interface-label address netmask gateway domains
|
||||
salt '*' ip.set_static_all interface-label address netmask gateway nameservers
|
||||
'''
|
||||
service = _interface_to_service(interface)
|
||||
if not service:
|
||||
|
@ -381,9 +381,15 @@ def set_static_all(interface, address, netmask, gateway, domains):
|
|||
validate, msg = _validate_ipv4([address, netmask, gateway])
|
||||
if not validate:
|
||||
raise salt.exceptions.CommandExecutionError(msg)
|
||||
validate, msg = _space_delimited_list(domains)
|
||||
if not validate:
|
||||
raise salt.exceptions.CommandExecutionError(msg)
|
||||
if nameservers:
|
||||
validate, msg = _space_delimited_list(nameservers)
|
||||
if not validate:
|
||||
raise salt.exceptions.CommandExecutionError(msg)
|
||||
if not isinstance(nameservers, list):
|
||||
nameservers = nameservers.split(' ')
|
||||
service = _interface_to_service(interface)
|
||||
if not service:
|
||||
raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface))
|
||||
service = pyconnman.ConnService(_add_path(service))
|
||||
ipv4 = service.get_property('IPv4.Configuration')
|
||||
ipv4['Method'] = dbus.String('manual', variant_level=1)
|
||||
|
@ -392,10 +398,8 @@ def set_static_all(interface, address, netmask, gateway, domains):
|
|||
ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1)
|
||||
try:
|
||||
service.set_property('IPv4.Configuration', ipv4)
|
||||
if not isinstance(domains, list):
|
||||
dns = domains.split(' ')
|
||||
domains = dns
|
||||
service.set_property('Domains.Configuration', [dbus.String('{0}'.format(d)) for d in domains])
|
||||
if nameservers:
|
||||
service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers])
|
||||
except Exception as exc:
|
||||
raise salt.exceptions.CommandExecutionError('Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc))
|
||||
return True
|
||||
|
|
|
@ -499,6 +499,76 @@ def get_ha_config():
|
|||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_ha_link():
|
||||
'''
|
||||
Show high-availability link-monitoring state.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_ha_link
|
||||
|
||||
'''
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><high-availability><link-monitoring></link-monitoring></high-availability></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_ha_path():
|
||||
'''
|
||||
Show high-availability path-monitoring state.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_ha_path
|
||||
|
||||
'''
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><high-availability><path-monitoring></path-monitoring></high-availability></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_ha_state():
|
||||
'''
|
||||
Show high-availability state information.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_ha_state
|
||||
|
||||
'''
|
||||
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><high-availability><state></state></high-availability></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_ha_transitions():
|
||||
'''
|
||||
Show high-availability transition statistic information.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_ha_transitions
|
||||
|
||||
'''
|
||||
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><high-availability><transitions></transitions></high-availability></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_hostname():
|
||||
'''
|
||||
Get the hostname of the device.
|
||||
|
@ -856,6 +926,23 @@ def get_platform():
|
|||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_session_info():
|
||||
'''
|
||||
Show device session statistics.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_session_info
|
||||
|
||||
'''
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><session><info></info></session></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_snmp_config():
|
||||
'''
|
||||
Get the SNMP configuration from the device.
|
||||
|
@ -979,6 +1066,23 @@ def get_system_state(filter=None):
|
|||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_uncommitted_changes():
|
||||
'''
|
||||
Retrieve a list of all uncommitted changes on the device.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_uncommitted_changes
|
||||
|
||||
'''
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><config><list><changes></changes></list></config></show>'}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_users_config():
|
||||
'''
|
||||
Get the local administrative user account configuration.
|
||||
|
@ -1261,7 +1365,7 @@ def set_authentication_profile(profile=None, deploy=False):
|
|||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/'
|
||||
'authentication-profile/',
|
||||
'authentication-profile',
|
||||
'element': '<authentication-profile>{0}</authentication-profile>'.format(profile)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1297,7 +1401,7 @@ def set_hostname(hostname=None, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system',
|
||||
'element': '<hostname>{0}</hostname>'.format(hostname)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1337,7 +1441,7 @@ def set_management_icmp(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-icmp>{0}</disable-icmp>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1377,7 +1481,7 @@ def set_management_http(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-http>{0}</disable-http>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1417,7 +1521,7 @@ def set_management_https(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-https>{0}</disable-https>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1457,7 +1561,7 @@ def set_management_ocsp(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-http-ocsp>{0}</disable-http-ocsp>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1497,7 +1601,7 @@ def set_management_snmp(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-snmp>{0}</disable-snmp>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1537,7 +1641,7 @@ def set_management_ssh(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-ssh>{0}</disable-ssh>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1577,7 +1681,7 @@ def set_management_telnet(enabled=True, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
|
||||
'element': '<disable-telnet>{0}</disable-telnet>'.format(value)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
@ -1770,8 +1874,8 @@ def set_permitted_ip(address=None, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip/',
|
||||
'element': '<entry name={0}></entry>'.format(address)}
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip',
|
||||
'element': '<entry name=\'{0}\'></entry>'.format(address)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
||||
|
@ -1806,7 +1910,7 @@ def set_timezone(tz=None, deploy=False):
|
|||
|
||||
query = {'type': 'config',
|
||||
'action': 'set',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone/',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone',
|
||||
'element': '<timezone>{0}</timezone>'.format(tz)}
|
||||
|
||||
ret.update(__proxy__['panos.call'](query))
|
||||
|
|
|
@ -106,12 +106,7 @@ def parse_targets(name=None,
|
|||
salt '*' pkg_resource.parse_targets
|
||||
'''
|
||||
if '__env__' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'__env__\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('__env__')
|
||||
|
||||
if __grains__['os'] == 'MacOS' and sources:
|
||||
|
@ -316,7 +311,8 @@ def format_pkg_list(packages, versions_as_list, attr):
|
|||
'''
|
||||
ret = copy.deepcopy(packages)
|
||||
if attr:
|
||||
requested_attr = set(['version', 'arch', 'install_date', 'install_date_time_t'])
|
||||
requested_attr = set(['epoch', 'version', 'release', 'arch',
|
||||
'install_date', 'install_date_time_t'])
|
||||
|
||||
if attr != 'all':
|
||||
requested_attr &= set(attr + ['version'])
|
||||
|
@ -326,13 +322,25 @@ def format_pkg_list(packages, versions_as_list, attr):
|
|||
for all_attr in ret[name]:
|
||||
filtered_attr = {}
|
||||
for key in requested_attr:
|
||||
filtered_attr[key] = all_attr[key]
|
||||
if all_attr[key]:
|
||||
filtered_attr[key] = all_attr[key]
|
||||
versions.append(filtered_attr)
|
||||
ret[name] = versions
|
||||
return ret
|
||||
|
||||
for name in ret:
|
||||
ret[name] = [d['version'] for d in ret[name]]
|
||||
ret[name] = [format_version(d['epoch'], d['version'], d['release'])
|
||||
for d in ret[name]]
|
||||
if not versions_as_list:
|
||||
stringify(ret)
|
||||
return ret
|
||||
|
||||
|
||||
def format_version(epoch, version, release):
|
||||
'''
|
||||
Formats a version string for list_pkgs.
|
||||
'''
|
||||
full_version = '{0}:{1}'.format(epoch, version) if epoch else version
|
||||
if release:
|
||||
full_version += '-{0}'.format(release)
|
||||
return full_version
|
||||
|
|
|
@ -75,6 +75,8 @@ def _get_config_file(conf, atom):
|
|||
if parts.cp == '*/*':
|
||||
# parts.repo will be empty if there is no repo part
|
||||
relative_path = parts.repo or "gentoo"
|
||||
elif str(parts.cp).endswith('/*'):
|
||||
relative_path = str(parts.cp).split("/")[0] + "_"
|
||||
else:
|
||||
relative_path = os.path.join(*[x for x in os.path.split(parts.cp) if x != '*'])
|
||||
else:
|
||||
|
@ -92,9 +94,20 @@ def _p_to_cp(p):
|
|||
Convert a package name or a DEPEND atom to category/package format.
|
||||
Raises an exception if program name is ambiguous.
|
||||
'''
|
||||
ret = _porttree().dbapi.xmatch("match-all", p)
|
||||
if ret:
|
||||
return portage.cpv_getkey(ret[0])
|
||||
try:
|
||||
ret = portage.dep_getkey(p)
|
||||
if ret:
|
||||
return ret
|
||||
except portage.exception.InvalidAtom:
|
||||
pass
|
||||
|
||||
try:
|
||||
ret = _porttree().dbapi.xmatch('bestmatch-visible', p)
|
||||
if ret:
|
||||
return portage.dep_getkey(ret)
|
||||
except portage.exception.InvalidAtom:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
@ -188,12 +201,7 @@ def _package_conf_file_to_dir(file_name):
|
|||
else:
|
||||
os.rename(path, path + '.tmpbak')
|
||||
os.mkdir(path, 0o755)
|
||||
with salt.utils.files.fopen(path + '.tmpbak') as fh_:
|
||||
for line in fh_:
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
append_to_package_conf(file_name, string=line)
|
||||
os.remove(path + '.tmpbak')
|
||||
os.rename(path + '.tmpbak', os.path.join(path, 'tmp'))
|
||||
return True
|
||||
else:
|
||||
os.mkdir(path, 0o755)
|
||||
|
@ -218,7 +226,7 @@ def _package_conf_ordering(conf, clean=True, keep_backup=False):
|
|||
shutil.copy(file_path, file_path + '.bak')
|
||||
backup_files.append(file_path + '.bak')
|
||||
|
||||
if cp[0] == '/' or cp.split('/') > 2:
|
||||
if cp[0] == '/' or len(cp.split('/')) > 2:
|
||||
with salt.utils.files.fopen(file_path) as fp_:
|
||||
rearrange.extend(fp_.readlines())
|
||||
os.remove(file_path)
|
||||
|
|
1239
salt/modules/purefa.py
Normal file
1239
salt/modules/purefa.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -18,6 +18,8 @@ Module to provide redis functionality to Salt
|
|||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from salt.ext.six.moves import zip
|
||||
from salt.ext import six
|
||||
from datetime import datetime
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
|
@ -513,8 +515,14 @@ def lastsave(host=None, port=None, db=None, password=None):
|
|||
|
||||
salt '*' redis.lastsave
|
||||
'''
|
||||
# Use of %s to get the timestamp is not supported by Python. The reason it
|
||||
# works is because it's passed to the system strftime which may not support
|
||||
# it. See: https://stackoverflow.com/a/11743262
|
||||
server = _connect(host, port, db, password)
|
||||
return int(server.lastsave().strftime("%s"))
|
||||
if six.PY2:
|
||||
return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds())
|
||||
else:
|
||||
return int(server.lastsave().timestamp())
|
||||
|
||||
|
||||
def llen(key, host=None, port=None, db=None, password=None):
|
||||
|
|
|
@ -1013,7 +1013,10 @@ def build_interface(iface, iface_type, enabled, **settings):
|
|||
salt '*' ip.build_interface eth0 eth <settings>
|
||||
'''
|
||||
if __grains__['os'] == 'Fedora':
|
||||
rh_major = '6'
|
||||
if __grains__['osmajorrelease'] >= 18:
|
||||
rh_major = '7'
|
||||
else:
|
||||
rh_major = '6'
|
||||
else:
|
||||
rh_major = __grains__['osrelease'][:1]
|
||||
|
||||
|
|
|
@ -769,10 +769,13 @@ def set_auth_key(
|
|||
with salt.utils.files.fopen(fconfig, 'ab+') as _fh:
|
||||
if new_file is False:
|
||||
# Let's make sure we have a new line at the end of the file
|
||||
_fh.seek(1024, 2)
|
||||
if not _fh.read(1024).rstrip(six.b(' ')).endswith(six.b('\n')):
|
||||
_fh.seek(0, 2)
|
||||
_fh.write(six.b('\n'))
|
||||
_fh.seek(0, 2)
|
||||
if _fh.tell() > 0:
|
||||
# File isn't empty, check if last byte is a newline
|
||||
# If not, add one
|
||||
_fh.seek(-1, 2)
|
||||
if _fh.read(1) != six.b('\n'):
|
||||
_fh.write(six.b('\n'))
|
||||
if six.PY3:
|
||||
auth_line = auth_line.encode(__salt_system_encoding__)
|
||||
_fh.write(auth_line)
|
||||
|
|
|
@ -98,8 +98,7 @@ def _set_retcode(ret, highstate=None):
|
|||
if isinstance(ret, list):
|
||||
__context__['retcode'] = 1
|
||||
return
|
||||
if not salt.utils.check_state_result(ret, highstate=highstate):
|
||||
|
||||
if not __utils__['state.check_result'](ret, highstate=highstate):
|
||||
__context__['retcode'] = 2
|
||||
|
||||
|
||||
|
@ -121,7 +120,7 @@ def _wait(jid):
|
|||
Wait for all previously started state jobs to finish running
|
||||
'''
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid()
|
||||
jid = salt.utils.jid.gen_jid(__opts__)
|
||||
states = _prior_running_states(jid)
|
||||
while states:
|
||||
time.sleep(1)
|
||||
|
@ -316,7 +315,7 @@ def low(data, queue=False, **kwargs):
|
|||
ret = st_.call(data)
|
||||
if isinstance(ret, list):
|
||||
__context__['retcode'] = 1
|
||||
if salt.utils.check_state_result(ret):
|
||||
if __utils__['state.check_result'](ret):
|
||||
__context__['retcode'] = 2
|
||||
return ret
|
||||
|
||||
|
@ -397,12 +396,7 @@ def template(tem, queue=False, **kwargs):
|
|||
salt '*' state.template '<Path to template on the minion>'
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
|
@ -839,12 +833,7 @@ def highstate(test=None, queue=False, **kwargs):
|
|||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
|
@ -1006,12 +995,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
|||
'''
|
||||
concurrent = kwargs.get('concurrent', False)
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
# Modification to __opts__ lost after this if-else
|
||||
|
@ -1489,12 +1473,7 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
|||
salt '*' state.show_low_sls foo saltenv=dev
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
|
@ -1580,12 +1559,7 @@ def show_sls(mods, test=None, queue=False, **kwargs):
|
|||
salt '*' state.show_sls core,edit.vim dev
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
|
@ -1656,12 +1630,7 @@ def show_top(queue=False, **kwargs):
|
|||
salt '*' state.show_top
|
||||
'''
|
||||
if 'env' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Oxygen',
|
||||
'Parameter \'env\' has been detected in the argument list. This '
|
||||
'parameter is no longer used and has been replaced by \'saltenv\' '
|
||||
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
|
||||
)
|
||||
# "env" is not supported; Use "saltenv".
|
||||
kwargs.pop('env')
|
||||
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
|
|
|
@ -12,8 +12,8 @@ import logging
|
|||
import salt.loader
|
||||
import salt.runner
|
||||
import salt.state
|
||||
import salt.utils
|
||||
import salt.utils.schema as S
|
||||
import salt.utils.args
|
||||
import salt.utils.schema
|
||||
from salt.utils.doc import strip_rst as _strip_rst
|
||||
from salt.ext.six.moves import zip
|
||||
|
||||
|
@ -450,7 +450,7 @@ def argspec(module=''):
|
|||
salt '*' sys.argspec 'pkg.*'
|
||||
|
||||
'''
|
||||
return salt.utils.argspec_report(__salt__, module)
|
||||
return salt.utils.args.argspec_report(__salt__, module)
|
||||
|
||||
|
||||
def state_argspec(module=''):
|
||||
|
@ -476,7 +476,7 @@ def state_argspec(module=''):
|
|||
|
||||
'''
|
||||
st_ = salt.state.State(__opts__)
|
||||
return salt.utils.argspec_report(st_.states, module)
|
||||
return salt.utils.args.argspec_report(st_.states, module)
|
||||
|
||||
|
||||
def returner_argspec(module=''):
|
||||
|
@ -502,7 +502,7 @@ def returner_argspec(module=''):
|
|||
|
||||
'''
|
||||
returners_ = salt.loader.returners(__opts__, [])
|
||||
return salt.utils.argspec_report(returners_, module)
|
||||
return salt.utils.args.argspec_report(returners_, module)
|
||||
|
||||
|
||||
def runner_argspec(module=''):
|
||||
|
@ -527,7 +527,7 @@ def runner_argspec(module=''):
|
|||
salt '*' sys.runner_argspec 'winrepo.*'
|
||||
'''
|
||||
run_ = salt.runner.Runner(__opts__)
|
||||
return salt.utils.argspec_report(run_.functions, module)
|
||||
return salt.utils.args.argspec_report(run_.functions, module)
|
||||
|
||||
|
||||
def list_state_functions(*args, **kwargs): # pylint: disable=unused-argument
|
||||
|
@ -844,28 +844,28 @@ def _argspec_to_schema(mod, spec):
|
|||
}
|
||||
|
||||
for i in args_req:
|
||||
types[i] = S.OneOfItem(items=(
|
||||
S.BooleanItem(title=i, description=i, required=True),
|
||||
S.IntegerItem(title=i, description=i, required=True),
|
||||
S.NumberItem(title=i, description=i, required=True),
|
||||
S.StringItem(title=i, description=i, required=True),
|
||||
types[i] = salt.utils.schema.OneOfItem(items=(
|
||||
salt.utils.schema.BooleanItem(title=i, description=i, required=True),
|
||||
salt.utils.schema.IntegerItem(title=i, description=i, required=True),
|
||||
salt.utils.schema.NumberItem(title=i, description=i, required=True),
|
||||
salt.utils.schema.StringItem(title=i, description=i, required=True),
|
||||
|
||||
# S.ArrayItem(title=i, description=i, required=True),
|
||||
# S.DictItem(title=i, description=i, required=True),
|
||||
))
|
||||
|
||||
for i, j in args_defaults:
|
||||
types[i] = S.OneOfItem(items=(
|
||||
S.BooleanItem(title=i, description=i, default=j),
|
||||
S.IntegerItem(title=i, description=i, default=j),
|
||||
S.NumberItem(title=i, description=i, default=j),
|
||||
S.StringItem(title=i, description=i, default=j),
|
||||
types[i] = salt.utils.schema.OneOfItem(items=(
|
||||
salt.utils.schema.BooleanItem(title=i, description=i, default=j),
|
||||
salt.utils.schema.IntegerItem(title=i, description=i, default=j),
|
||||
salt.utils.schema.NumberItem(title=i, description=i, default=j),
|
||||
salt.utils.schema.StringItem(title=i, description=i, default=j),
|
||||
|
||||
# S.ArrayItem(title=i, description=i, default=j),
|
||||
# S.DictItem(title=i, description=i, default=j),
|
||||
))
|
||||
|
||||
return type(mod, (S.Schema,), types).serialize()
|
||||
return type(mod, (salt.utils.schema.Schema,), types).serialize()
|
||||
|
||||
|
||||
def state_schema(module=''):
|
||||
|
|
|
@ -181,7 +181,10 @@ def has_settable_hwclock():
|
|||
salt '*' system.has_settable_hwclock
|
||||
'''
|
||||
if salt.utils.path.which_bin(['hwclock']) is not None:
|
||||
res = __salt__['cmd.run_all'](['hwclock', '--test', '--systohc'], python_shell=False)
|
||||
res = __salt__['cmd.run_all'](
|
||||
['hwclock', '--test', '--systohc'], python_shell=False,
|
||||
output_loglevel='quiet', ignore_retcode=True
|
||||
)
|
||||
return res['retcode'] == 0
|
||||
return False
|
||||
|
||||
|
|
459
salt/modules/textfsm_mod.py
Normal file
459
salt/modules/textfsm_mod.py
Normal file
|
@ -0,0 +1,459 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
TextFSM
|
||||
=======
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Execution module that processes plain text and extracts data
|
||||
using TextFSM templates. The output is presented in JSON serializable
|
||||
data, and can be easily re-used in other modules, or directly
|
||||
inside the renderer (Jinja, Mako, Genshi, etc.).
|
||||
|
||||
:depends: - textfsm Python library
|
||||
|
||||
.. note::
|
||||
|
||||
For Python 2/3 compatibility, it is more recommended to
|
||||
install the ``jtextfsm`` library: ``pip install jtextfsm``.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
import logging
|
||||
|
||||
# Import third party modules
|
||||
try:
|
||||
import textfsm
|
||||
HAS_TEXTFSM = True
|
||||
except ImportError:
|
||||
HAS_TEXTFSM = False
|
||||
|
||||
try:
|
||||
import clitable
|
||||
HAS_CLITABLE = True
|
||||
except ImportError:
|
||||
HAS_CLITABLE = False
|
||||
|
||||
try:
|
||||
from salt.utils.files import fopen
|
||||
except ImportError:
|
||||
from salt.utils import fopen
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'textfsm'
|
||||
__proxyenabled__ = ['*']
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load this execution module if TextFSM is installed.
|
||||
'''
|
||||
if HAS_TEXTFSM:
|
||||
return __virtualname__
|
||||
return (False, 'The textfsm execution module failed to load: requires the textfsm library.')
|
||||
|
||||
|
||||
def _clitable_to_dict(objects, fsm_handler):
|
||||
'''
|
||||
Converts TextFSM cli_table object to list of dictionaries.
|
||||
'''
|
||||
objs = []
|
||||
log.debug('Cli Table:')
|
||||
log.debug(objects)
|
||||
log.debug('FSM handler:')
|
||||
log.debug(fsm_handler)
|
||||
for row in objects:
|
||||
temp_dict = {}
|
||||
for index, element in enumerate(row):
|
||||
temp_dict[fsm_handler.header[index].lower()] = element
|
||||
objs.append(temp_dict)
|
||||
log.debug('Extraction result:')
|
||||
log.debug(objs)
|
||||
return objs
|
||||
|
||||
|
||||
def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'):
|
||||
r'''
|
||||
Extracts the data entities from the unstructured
|
||||
raw text sent as input and returns the data
|
||||
mapping, processing using the TextFSM template.
|
||||
|
||||
template_path
|
||||
The path to the TextFSM template.
|
||||
This can be specified using the absolute path
|
||||
to the file, or using one of the following URL schemes:
|
||||
|
||||
- ``salt://``, to fetch the template from the Salt fileserver.
|
||||
- ``http://`` or ``https://``
|
||||
- ``ftp://``
|
||||
- ``s3://``
|
||||
- ``swift://``
|
||||
|
||||
raw_text: ``None``
|
||||
The unstructured text to be parsed.
|
||||
|
||||
raw_text_file: ``None``
|
||||
Text file to read, having the raw text to be parsed using the TextFSM template.
|
||||
Supports the same URL schemes as the ``template_path`` argument.
|
||||
|
||||
saltenv: ``base``
|
||||
Salt fileserver envrionment from which to retrieve the file.
|
||||
Ignored if ``template_path`` is not a ``salt://`` URL.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' textfsm.extract salt://textfsm/juniper_version_template raw_text_file=s3://junos_ver.txt
|
||||
salt '*' textfsm.extract http://some-server/textfsm/juniper_version_template raw_text='Hostname: router.abc ... snip ...'
|
||||
|
||||
Jinja template example:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set raw_text = 'Hostname: router.abc ... snip ...' -%}
|
||||
{%- set textfsm_extract = salt.textfsm.extract('https://some-server/textfsm/juniper_version_template', raw_text) -%}
|
||||
|
||||
Raw text example:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Hostname: router.abc
|
||||
Model: mx960
|
||||
JUNOS Base OS boot [9.1S3.5]
|
||||
JUNOS Base OS Software Suite [9.1S3.5]
|
||||
JUNOS Kernel Software Suite [9.1S3.5]
|
||||
JUNOS Crypto Software Suite [9.1S3.5]
|
||||
JUNOS Packet Forwarding Engine Support (M/T Common) [9.1S3.5]
|
||||
JUNOS Packet Forwarding Engine Support (MX Common) [9.1S3.5]
|
||||
JUNOS Online Documentation [9.1S3.5]
|
||||
JUNOS Routing Software Suite [9.1S3.5]
|
||||
|
||||
TextFSM Example:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Value Chassis (\S+)
|
||||
Value Required Model (\S+)
|
||||
Value Boot (.*)
|
||||
Value Base (.*)
|
||||
Value Kernel (.*)
|
||||
Value Crypto (.*)
|
||||
Value Documentation (.*)
|
||||
Value Routing (.*)
|
||||
|
||||
Start
|
||||
# Support multiple chassis systems.
|
||||
^\S+:$$ -> Continue.Record
|
||||
^${Chassis}:$$
|
||||
^Model: ${Model}
|
||||
^JUNOS Base OS boot \[${Boot}\]
|
||||
^JUNOS Software Release \[${Base}\]
|
||||
^JUNOS Base OS Software Suite \[${Base}\]
|
||||
^JUNOS Kernel Software Suite \[${Kernel}\]
|
||||
^JUNOS Crypto Software Suite \[${Crypto}\]
|
||||
^JUNOS Online Documentation \[${Documentation}\]
|
||||
^JUNOS Routing Software Suite \[${Routing}\]
|
||||
|
||||
Output example:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
{
|
||||
"comment": "",
|
||||
"result": true,
|
||||
"out": [
|
||||
{
|
||||
"kernel": "9.1S3.5",
|
||||
"documentation": "9.1S3.5",
|
||||
"boot": "9.1S3.5",
|
||||
"crypto": "9.1S3.5",
|
||||
"chassis": "",
|
||||
"routing": "9.1S3.5",
|
||||
"base": "9.1S3.5",
|
||||
"model": "mx960"
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
ret = {
|
||||
'result': False,
|
||||
'comment': '',
|
||||
'out': None
|
||||
}
|
||||
log.debug('Using the saltenv: {}'.format(saltenv))
|
||||
log.debug('Caching {} using the Salt fileserver'.format(template_path))
|
||||
tpl_cached_path = __salt__['cp.cache_file'](template_path, saltenv=saltenv)
|
||||
if tpl_cached_path is False:
|
||||
ret['comment'] = 'Unable to read the TextFSM template from {}'.format(template_path)
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
try:
|
||||
log.debug('Reading TextFSM template from cache path: {}'.format(tpl_cached_path))
|
||||
# Disabling pylint W8470 to nto complain about fopen.
|
||||
# Unfortunately textFSM needs the file handle rather than the content...
|
||||
# pylint: disable=W8470
|
||||
tpl_file_handle = fopen(tpl_cached_path, 'r')
|
||||
# pylint: disable=W8470
|
||||
log.debug(tpl_file_handle.read())
|
||||
tpl_file_handle.seek(0) # move the object position back at the top of the file
|
||||
fsm_handler = textfsm.TextFSM(tpl_file_handle)
|
||||
except textfsm.TextFSMTemplateError as tfte:
|
||||
log.error('Unable to parse the TextFSM template', exc_info=True)
|
||||
ret['comment'] = 'Unable to parse the TextFSM template from {}: {}. Please check the logs.'.format(
|
||||
template_path, tfte)
|
||||
return ret
|
||||
if not raw_text and raw_text_file:
|
||||
log.debug('Trying to read the raw input from {}'.format(raw_text_file))
|
||||
raw_text = __salt__['cp.get_file_str'](raw_text_file, saltenv=saltenv)
|
||||
if raw_text is False:
|
||||
ret['comment'] = 'Unable to read from {}. Please specify a valid input file or text.'.format(raw_text_file)
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
if not raw_text:
|
||||
ret['comment'] = 'Please specify a valid input file or text.'
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
log.debug('Processing the raw text:')
|
||||
log.debug(raw_text)
|
||||
objects = fsm_handler.ParseText(raw_text)
|
||||
ret['out'] = _clitable_to_dict(objects, fsm_handler)
|
||||
ret['result'] = True
|
||||
return ret
|
||||
|
||||
|
||||
def index(command,
|
||||
platform=None,
|
||||
platform_grain_name=None,
|
||||
platform_column_name=None,
|
||||
output=None,
|
||||
output_file=None,
|
||||
textfsm_path=None,
|
||||
index_file=None,
|
||||
saltenv='base',
|
||||
include_empty=False,
|
||||
include_pat=None,
|
||||
exclude_pat=None):
|
||||
'''
|
||||
Dynamically identify the template required to extract the
|
||||
information from the unstructured raw text.
|
||||
|
||||
The output has the same structure as the ``extract`` execution
|
||||
function, the difference being that ``index`` is capable
|
||||
to identify what template to use, based on the platform
|
||||
details and the ``command``.
|
||||
|
||||
command
|
||||
The command executed on the device, to get the output.
|
||||
|
||||
platform
|
||||
The platform name, as defined in the TextFSM index file.
|
||||
|
||||
.. note::
|
||||
For ease of use, it is recommended to define the TextFSM
|
||||
indexfile with values that can be matches using the grains.
|
||||
|
||||
platform_grain_name
|
||||
The name of the grain used to identify the platform name
|
||||
in the TextFSM index file.
|
||||
|
||||
.. note::
|
||||
This option can be also specified in the minion configuration
|
||||
file or pillar as ``textfsm_platform_grain``.
|
||||
|
||||
.. note::
|
||||
This option is ignored when ``platform`` is specified.
|
||||
|
||||
platform_column_name: ``Platform``
|
||||
The column name used to identify the platform,
|
||||
exactly as specified in the TextFSM index file.
|
||||
Default: ``Platform``.
|
||||
|
||||
.. note::
|
||||
This is field is case sensitive, make sure
|
||||
to assign the correct value to this option,
|
||||
exactly as defined in the index file.
|
||||
|
||||
.. note::
|
||||
This option can be also specified in the minion configuration
|
||||
file or pillar as ``textfsm_platform_column_name``.
|
||||
|
||||
output
|
||||
The raw output from the device, to be parsed
|
||||
and extract the structured data.
|
||||
|
||||
output_file
|
||||
The path to a file that contains the raw output from the device,
|
||||
used to extract the structured data.
|
||||
This option supports the usual Salt-specific schemes: ``file://``,
|
||||
``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://``.
|
||||
|
||||
textfsm_path
|
||||
The path where the TextFSM templates can be found. This can be either
|
||||
absolute path on the server, either specified using the following URL
|
||||
schemes: ``file://``, ``salt://``, ``http://``, ``https://``, ``ftp://``,
|
||||
``s3://``, ``swift://``.
|
||||
|
||||
.. note::
|
||||
This needs to be a directory with a flat structure, having an
|
||||
index file (whose name can be specified using the ``index_file`` option)
|
||||
and a number of TextFSM templates.
|
||||
|
||||
.. note::
|
||||
This option can be also specified in the minion configuration
|
||||
file or pillar as ``textfsm_path``.
|
||||
|
||||
index_file: ``index``
|
||||
The name of the TextFSM index file, under the ``textfsm_path``. Default: ``index``.
|
||||
|
||||
.. note::
|
||||
This option can be also specified in the minion configuration
|
||||
file or pillar as ``textfsm_index_file``.
|
||||
|
||||
saltenv: ``base``
|
||||
Salt fileserver envrionment from which to retrieve the file.
|
||||
Ignored if ``textfsm_path`` is not a ``salt://`` URL.
|
||||
|
||||
include_empty: ``False``
|
||||
Include empty files under the ``textfsm_path``.
|
||||
|
||||
include_pat
|
||||
Glob or regex to narrow down the files cached from the given path.
|
||||
If matching with a regex, the regex must be prefixed with ``E@``,
|
||||
otherwise the expression will be interpreted as a glob.
|
||||
|
||||
exclude_pat
|
||||
Glob or regex to exclude certain files from being cached from the given path.
|
||||
If matching with a regex, the regex must be prefixed with ``E@``,
|
||||
otherwise the expression will be interpreted as a glob.
|
||||
|
||||
.. note::
|
||||
If used with ``include_pat``, files matching this pattern will be
|
||||
excluded from the subset of files defined by ``include_pat``.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' textfsm.index 'sh ver' platform=Juniper output_file=salt://textfsm/juniper_version_example textfsm_path=salt://textfsm/
|
||||
salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=ftp://textfsm/ platform_column_name=Vendor
|
||||
salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=https://some-server/textfsm/ platform_column_name=Vendor platform_grain_name=vendor
|
||||
|
||||
TextFSM index file example:
|
||||
|
||||
``salt://textfsm/index``
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Template, Hostname, Vendor, Command
|
||||
juniper_version_template, .*, Juniper, sh[[ow]] ve[[rsion]]
|
||||
|
||||
The usage can be simplified,
|
||||
by defining (some of) the following options: ``textfsm_platform_grain``,
|
||||
``textfsm_path``, ``textfsm_platform_column_name``, or ``textfsm_index_file``,
|
||||
in the (proxy) minion configuration file or pillar.
|
||||
|
||||
Configuration example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
textfsm_platform_grain: vendor
|
||||
textfsm_path: salt://textfsm/
|
||||
textfsm_platform_column_name: Vendor
|
||||
|
||||
And the CLI usage becomes as simple as:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example
|
||||
|
||||
Usgae inside a Jinja template:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set command = 'sh ver' -%}
|
||||
{%- set output = salt.net.cli(command) -%}
|
||||
{%- set textfsm_extract = salt.textfsm.index(command, output=output) -%}
|
||||
'''
|
||||
ret = {
|
||||
'out': None,
|
||||
'result': False,
|
||||
'comment': ''
|
||||
}
|
||||
if not HAS_CLITABLE:
|
||||
ret['comment'] = 'TextFSM doesnt seem that has clitable embedded.'
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
if not platform:
|
||||
platform_grain_name = __opts__.get('textfsm_platform_grain') or\
|
||||
__pillar__.get('textfsm_platform_grain', platform_grain_name)
|
||||
if platform_grain_name:
|
||||
log.debug('Using the {} grain to identify the platform name'.format(platform_grain_name))
|
||||
platform = __grains__.get(platform_grain_name)
|
||||
if not platform:
|
||||
ret['comment'] = 'Unable to identify the platform name using the {} grain.'.format(platform_grain_name)
|
||||
return ret
|
||||
log.info('Using platform: {}'.format(platform))
|
||||
else:
|
||||
ret['comment'] = 'No platform specified, no platform grain identifier configured.'
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
if not textfsm_path:
|
||||
log.debug('No TextFSM templates path specified, trying to look into the opts and pillar')
|
||||
textfsm_path = __opts__.get('textfsm_path') or __pillar__.get('textfsm_path')
|
||||
if not textfsm_path:
|
||||
ret['comment'] = 'No TextFSM templates path specified. Please configure in opts/pillar/function args.'
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
log.debug('Using the saltenv: {}'.format(saltenv))
|
||||
log.debug('Caching {} using the Salt fileserver'.format(textfsm_path))
|
||||
textfsm_cachedir_ret = __salt__['cp.cache_dir'](textfsm_path,
|
||||
saltenv=saltenv,
|
||||
include_empty=include_empty,
|
||||
include_pat=include_pat,
|
||||
exclude_pat=exclude_pat)
|
||||
log.debug('Cache fun return:')
|
||||
log.debug(textfsm_cachedir_ret)
|
||||
if not textfsm_cachedir_ret:
|
||||
ret['comment'] = 'Unable to fetch from {}. Is the TextFSM path correctly specified?'.format(textfsm_path)
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
textfsm_cachedir = os.path.dirname(textfsm_cachedir_ret[0]) # first item
|
||||
index_file = __opts__.get('textfsm_index_file') or __pillar__.get('textfsm_index_file', 'index')
|
||||
index_file_path = os.path.join(textfsm_cachedir, index_file)
|
||||
log.debug('Using the cached index file: {}'.format(index_file_path))
|
||||
log.debug('TextFSM templates cached under: {}'.format(textfsm_cachedir))
|
||||
textfsm_obj = clitable.CliTable(index_file_path, textfsm_cachedir)
|
||||
attrs = {
|
||||
'Command': command
|
||||
}
|
||||
platform_column_name = __opts__.get('textfsm_platform_column_name') or\
|
||||
__pillar__.get('textfsm_platform_column_name', 'Platform')
|
||||
log.info('Using the TextFSM platform idenfiticator: {}'.format(platform_column_name))
|
||||
attrs[platform_column_name] = platform
|
||||
log.debug('Processing the TextFSM index file using the attributes: {}'.format(attrs))
|
||||
if not output and output_file:
|
||||
log.debug('Processing the output from {}'.format(output_file))
|
||||
output = __salt__['cp.get_file_str'](output_file, saltenv=saltenv)
|
||||
if output is False:
|
||||
ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(output_file)
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
if not output:
|
||||
ret['comment'] = 'Please specify a valid output text or file'
|
||||
log.error(ret['comment'])
|
||||
return ret
|
||||
log.debug('Processing the raw text:')
|
||||
log.debug(output)
|
||||
try:
|
||||
# Parse output through template
|
||||
textfsm_obj.ParseCmd(output, attrs)
|
||||
ret['out'] = _clitable_to_dict(textfsm_obj, textfsm_obj)
|
||||
ret['result'] = True
|
||||
except clitable.CliTableError as cterr:
|
||||
log.error('Unable to proces the CliTable', exc_info=True)
|
||||
ret['comment'] = 'Unable to process the output: {}'.format(cterr)
|
||||
return ret
|
|
@ -200,12 +200,10 @@ def create(path,
|
|||
for entry in extra_search_dir:
|
||||
cmd.append('--extra-search-dir={0}'.format(entry))
|
||||
if never_download is True:
|
||||
if virtualenv_version_info >= (1, 10):
|
||||
if virtualenv_version_info >= (1, 10) and virtualenv_version_info < (14, 0, 0):
|
||||
log.info(
|
||||
'The virtualenv \'--never-download\' option has been '
|
||||
'deprecated in virtualenv(>=1.10), as such, the '
|
||||
'\'never_download\' option to `virtualenv.create()` has '
|
||||
'also been deprecated and it\'s not necessary anymore.'
|
||||
'--never-download was deprecated in 1.10.0, but reimplemented in 14.0.0. '
|
||||
'If this feature is needed, please install a supported virtualenv version.'
|
||||
)
|
||||
else:
|
||||
cmd.append('--never-download')
|
||||
|
|
|
@ -195,7 +195,7 @@ else:
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'vsphere'
|
||||
__proxyenabled__ = ['esxi', 'esxdatacenter']
|
||||
__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter']
|
||||
|
||||
|
||||
def __virtual__():
|
||||
|
@ -227,6 +227,8 @@ def _get_proxy_connection_details():
|
|||
proxytype = get_proxy_type()
|
||||
if proxytype == 'esxi':
|
||||
details = __salt__['esxi.get_details']()
|
||||
elif proxytype == 'esxcluster':
|
||||
details = __salt__['esxcluster.get_details']()
|
||||
elif proxytype == 'esxdatacenter':
|
||||
details = __salt__['esxdatacenter.get_details']()
|
||||
else:
|
||||
|
@ -267,7 +269,7 @@ def gets_service_instance_via_proxy(fn):
|
|||
proxy details and passes the connection (vim.ServiceInstance) to
|
||||
the decorated function.
|
||||
|
||||
Supported proxies: esxi, esxdatacenter.
|
||||
Supported proxies: esxi, esxcluster, esxdatacenter.
|
||||
|
||||
Notes:
|
||||
1. The decorated function must have a ``service_instance`` parameter
|
||||
|
@ -354,7 +356,7 @@ def gets_service_instance_via_proxy(fn):
|
|||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxdatacenter')
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter')
|
||||
def get_service_instance_via_proxy(service_instance=None):
|
||||
'''
|
||||
Returns a service instance to the proxied endpoint (vCenter/ESXi host).
|
||||
|
@ -374,7 +376,7 @@ def get_service_instance_via_proxy(service_instance=None):
|
|||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxdatacenter')
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter')
|
||||
def disconnect(service_instance):
|
||||
'''
|
||||
Disconnects from a vCenter or ESXi host
|
||||
|
@ -1909,7 +1911,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None,
|
|||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxi', 'esxdatacenter')
|
||||
@supports_proxies('esxi', 'esxcluster', 'esxdatacenter')
|
||||
@gets_service_instance_via_proxy
|
||||
def test_vcenter_connection(service_instance=None):
|
||||
'''
|
||||
|
@ -3598,7 +3600,7 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N
|
|||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter')
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_datacenters_via_proxy(datacenter_names=None, service_instance=None):
|
||||
'''
|
||||
|
@ -4294,3 +4296,14 @@ def _get_esxdatacenter_proxy_details():
|
|||
return det.get('vcenter'), det.get('username'), det.get('password'), \
|
||||
det.get('protocol'), det.get('port'), det.get('mechanism'), \
|
||||
det.get('principal'), det.get('domain'), det.get('datacenter')
|
||||
|
||||
|
||||
def _get_esxcluster_proxy_details():
|
||||
'''
|
||||
Returns the running esxcluster's proxy details
|
||||
'''
|
||||
det = __salt__['esxcluster.get_details']()
|
||||
return det.get('vcenter'), det.get('username'), det.get('password'), \
|
||||
det.get('protocol'), det.get('port'), det.get('mechanism'), \
|
||||
det.get('principal'), det.get('domain'), det.get('datacenter'), \
|
||||
det.get('cluster')
|
||||
|
|
|
@ -837,6 +837,9 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
|
|||
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
|
||||
# Modify IP spec to IIS 7.5 format
|
||||
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
|
||||
# win 2008 uses the following format: ip!port and not ip!port!
|
||||
if iis7path.endswith("!"):
|
||||
iis7path = iis7path[:-1]
|
||||
|
||||
ps_cmd = ['New-Item',
|
||||
'-Path', "'{0}'".format(iis7path),
|
||||
|
@ -1255,6 +1258,9 @@ def set_container_setting(name, container, settings):
|
|||
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
|
||||
settings="{'managedPipeLineMode': 'Integrated'}"
|
||||
'''
|
||||
|
||||
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
|
||||
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
|
||||
ps_cmd = list()
|
||||
container_path = r"IIS:\{0}\{1}".format(container, name)
|
||||
|
||||
|
@ -1281,6 +1287,10 @@ def set_container_setting(name, container, settings):
|
|||
except ValueError:
|
||||
value = "'{0}'".format(settings[setting])
|
||||
|
||||
# Map to numeric to support server 2008
|
||||
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
|
||||
value = identityType_map2numeric[settings[setting]]
|
||||
|
||||
ps_cmd.extend(['Set-ItemProperty',
|
||||
'-Path', "'{0}'".format(container_path),
|
||||
'-Name', "'{0}'".format(setting),
|
||||
|
@ -1300,6 +1310,10 @@ def set_container_setting(name, container, settings):
|
|||
failed_settings = dict()
|
||||
|
||||
for setting in settings:
|
||||
# map identity type from numeric to string for comparing
|
||||
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
|
||||
settings[setting] = identityType_map2string[settings[setting]]
|
||||
|
||||
if str(settings[setting]) != str(new_settings[setting]):
|
||||
failed_settings[setting] = settings[setting]
|
||||
|
||||
|
|
|
@ -987,18 +987,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
# Version 1.2.3 will apply to packages foo and bar
|
||||
salt '*' pkg.install foo,bar version=1.2.3
|
||||
|
||||
cache_file (str):
|
||||
A single file to copy down for use with the installer. Copied to the
|
||||
same location as the installer. Use this over ``cache_dir`` if there
|
||||
are many files in the directory and you only need a specific file
|
||||
and don't want to cache additional files that may reside in the
|
||||
installer directory. Only applies to files on ``salt://``
|
||||
|
||||
cache_dir (bool):
|
||||
True will copy the contents of the installer directory. This is
|
||||
useful for installations that are not a single file. Only applies to
|
||||
directories on ``salt://``
|
||||
|
||||
extra_install_flags (str):
|
||||
Additional install flags that will be appended to the
|
||||
``install_flags`` defined in the software definition file. Only
|
||||
|
@ -1290,7 +1278,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
else:
|
||||
|
|
|
@ -170,7 +170,11 @@ def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE):
|
|||
if key not in blacklist_keys:
|
||||
cert_info[key.lower()] = item[key]
|
||||
|
||||
cert_info['dnsnames'] = [name.get('Unicode') for name in item.get('DnsNameList', {})]
|
||||
names = item.get('DnsNameList', None)
|
||||
if isinstance(names, list):
|
||||
cert_info['dnsnames'] = [name.get('Unicode') for name in names]
|
||||
else:
|
||||
cert_info['dnsnames'] = []
|
||||
ret[item['Thumbprint']] = cert_info
|
||||
return ret
|
||||
|
||||
|
|
|
@ -616,7 +616,8 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|||
|
||||
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
|
||||
|
||||
Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``.
|
||||
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
|
||||
``install_date``, ``install_date_time_t``.
|
||||
|
||||
If ``all`` is specified, all valid attributes will be returned.
|
||||
|
||||
|
@ -652,7 +653,16 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|||
osarch=__grains__['osarch']
|
||||
)
|
||||
if pkginfo is not None:
|
||||
all_attr = {'version': pkginfo.version, 'arch': pkginfo.arch, 'install_date': pkginfo.install_date,
|
||||
# see rpm version string rules available at https://goo.gl/UGKPNd
|
||||
pkgver = pkginfo.version
|
||||
epoch = ''
|
||||
release = ''
|
||||
if ':' in pkgver:
|
||||
epoch, pkgver = pkgver.split(":", 1)
|
||||
if '-' in pkgver:
|
||||
pkgver, release = pkgver.split("-", 1)
|
||||
all_attr = {'epoch': epoch, 'version': pkgver, 'release': release,
|
||||
'arch': pkginfo.arch, 'install_date': pkginfo.install_date,
|
||||
'install_date_time_t': pkginfo.install_date_time_t}
|
||||
__salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr)
|
||||
|
||||
|
@ -879,8 +889,8 @@ def list_repo_pkgs(*args, **kwargs):
|
|||
_parse_output(out['stdout'], strict=True)
|
||||
else:
|
||||
for repo in repos:
|
||||
cmd = [_yum(), '--quiet', 'repository-packages', repo,
|
||||
'list', '--showduplicates']
|
||||
cmd = [_yum(), '--quiet', '--showduplicates',
|
||||
'repository-packages', repo, 'list']
|
||||
if cacheonly:
|
||||
cmd.append('-C')
|
||||
# Can't concatenate because args is a tuple, using list.extend()
|
||||
|
@ -1284,7 +1294,8 @@ def install(name=None,
|
|||
'version': '<new-version>',
|
||||
'arch': '<new-arch>'}}}
|
||||
|
||||
Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``.
|
||||
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
|
||||
``install_date``, ``install_date_time_t``.
|
||||
|
||||
If ``all`` is specified, all valid attributes will be returned.
|
||||
|
||||
|
@ -2829,7 +2840,7 @@ def _parse_repo_file(filename):
|
|||
|
||||
for section in parsed._sections:
|
||||
section_dict = dict(parsed._sections[section])
|
||||
section_dict.pop('__name__')
|
||||
section_dict.pop('__name__', None)
|
||||
config[section] = section_dict
|
||||
|
||||
# Try to extract leading comments
|
||||
|
|
|
@ -666,7 +666,8 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|||
|
||||
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
|
||||
|
||||
Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``.
|
||||
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
|
||||
``install_date``, ``install_date_time_t``.
|
||||
|
||||
If ``all`` is specified, all valid attributes will be returned.
|
||||
|
||||
|
@ -702,15 +703,11 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
|||
ret = {}
|
||||
for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines():
|
||||
name, pkgver, rel, arch, epoch, install_time = line.split('_|-')
|
||||
if epoch:
|
||||
pkgver = '{0}:{1}'.format(epoch, pkgver)
|
||||
if rel:
|
||||
pkgver += '-{0}'.format(rel)
|
||||
install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z"
|
||||
install_date_time_t = int(install_time)
|
||||
|
||||
all_attr = {'version': pkgver, 'arch': arch, 'install_date': install_date,
|
||||
'install_date_time_t': install_date_time_t}
|
||||
all_attr = {'epoch': epoch, 'version': pkgver, 'release': rel, 'arch': arch,
|
||||
'install_date': install_date, 'install_date_time_t': install_date_time_t}
|
||||
__salt__['pkg_resource.add_pkg'](ret, name, all_attr)
|
||||
|
||||
for pkgname in ret:
|
||||
|
@ -1097,7 +1094,8 @@ def install(name=None,
|
|||
'version': '<new-version>',
|
||||
'arch': '<new-arch>'}}}
|
||||
|
||||
Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``.
|
||||
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
|
||||
``install_date``, ``install_date_time_t``.
|
||||
|
||||
If ``all`` is specified, all valid attributes will be returned.
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ try:
|
|||
except ImportError as exc:
|
||||
cpy_error = exc
|
||||
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy'
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_cherrypy'
|
||||
|
||||
logger = logging.getLogger(__virtualname__)
|
||||
cpy_min = '3.2.2'
|
||||
|
|
|
@ -10,7 +10,7 @@ import os
|
|||
import salt.auth
|
||||
from salt.utils.versions import StrictVersion as _StrictVersion
|
||||
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_tornado'
|
||||
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_tornado'
|
||||
|
||||
logger = logging.getLogger(__virtualname__)
|
||||
|
||||
|
|
|
@ -242,8 +242,15 @@ def _format_host(host, data):
|
|||
if ret['result'] is None:
|
||||
hcolor = colors['LIGHT_YELLOW']
|
||||
tcolor = colors['LIGHT_YELLOW']
|
||||
|
||||
state_output = __opts__.get('state_output', 'full').lower()
|
||||
comps = [sdecode(comp) for comp in tname.split('_|-')]
|
||||
if __opts__.get('state_output', 'full').lower() == 'filter':
|
||||
|
||||
if state_output == 'mixed_id':
|
||||
# Swap in the ID for the name. Refs #35137
|
||||
comps[2] = comps[1]
|
||||
|
||||
if state_output.startswith('filter'):
|
||||
# By default, full data is shown for all types. However, return
|
||||
# data may be excluded by setting state_output_exclude to a
|
||||
# comma-separated list of True, False or None, or including the
|
||||
|
@ -276,28 +283,17 @@ def _format_host(host, data):
|
|||
continue
|
||||
if str(ret['result']) in exclude:
|
||||
continue
|
||||
elif __opts__.get('state_output', 'full').lower() == 'terse':
|
||||
# Print this chunk in a terse way and continue in the
|
||||
# loop
|
||||
|
||||
elif any((
|
||||
state_output.startswith('terse'),
|
||||
state_output.startswith('mixed') and ret['result'] is not False, # only non-error'd
|
||||
state_output.startswith('changes') and ret['result'] and not schanged # non-error'd non-changed
|
||||
)):
|
||||
# Print this chunk in a terse way and continue in the loop
|
||||
msg = _format_terse(tcolor, comps, ret, colors, tabular)
|
||||
hstrs.append(msg)
|
||||
continue
|
||||
elif __opts__.get('state_output', 'full').lower().startswith('mixed'):
|
||||
if __opts__['state_output'] == 'mixed_id':
|
||||
# Swap in the ID for the name. Refs #35137
|
||||
comps[2] = comps[1]
|
||||
# Print terse unless it failed
|
||||
if ret['result'] is not False:
|
||||
msg = _format_terse(tcolor, comps, ret, colors, tabular)
|
||||
hstrs.append(msg)
|
||||
continue
|
||||
elif __opts__.get('state_output', 'full').lower() == 'changes':
|
||||
# Print terse if no error and no changes, otherwise, be
|
||||
# verbose
|
||||
if ret['result'] and not schanged:
|
||||
msg = _format_terse(tcolor, comps, ret, colors, tabular)
|
||||
hstrs.append(msg)
|
||||
continue
|
||||
|
||||
state_lines = [
|
||||
u'{tcolor}----------{colors[ENDC]}',
|
||||
u' {tcolor} ID: {comps[1]}{colors[ENDC]}',
|
||||
|
|
|
@ -13,6 +13,7 @@ import logging
|
|||
import tornado.gen
|
||||
import sys
|
||||
import traceback
|
||||
import inspect
|
||||
|
||||
# Import salt libs
|
||||
import salt.loader
|
||||
|
@ -23,6 +24,8 @@ import salt.transport
|
|||
import salt.utils.url
|
||||
import salt.utils.cache
|
||||
import salt.utils.crypt
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.args
|
||||
from salt.exceptions import SaltClientError
|
||||
from salt.template import compile_template
|
||||
from salt.utils.dictupdate import merge
|
||||
|
@ -36,7 +39,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None, extra_minion_data=None):
|
||||
'''
|
||||
Return the correct pillar driver based on the file_client option
|
||||
'''
|
||||
|
@ -55,12 +58,14 @@ def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
|
|||
return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs,
|
||||
pillar_override=pillar_override, pillarenv=pillarenv)
|
||||
return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,
|
||||
pillar_override=pillar_override, pillarenv=pillarenv)
|
||||
pillar_override=pillar_override, pillarenv=pillarenv,
|
||||
extra_minion_data=extra_minion_data)
|
||||
|
||||
|
||||
# TODO: migrate everyone to this one!
|
||||
def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None,
|
||||
extra_minion_data=None):
|
||||
'''
|
||||
Return the correct pillar driver based on the file_client option
|
||||
'''
|
||||
|
@ -72,15 +77,62 @@ def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None
|
|||
'local': AsyncPillar,
|
||||
}.get(file_client, AsyncPillar)
|
||||
return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,
|
||||
pillar_override=pillar_override, pillarenv=pillarenv)
|
||||
pillar_override=pillar_override, pillarenv=pillarenv,
|
||||
extra_minion_data=extra_minion_data)
|
||||
|
||||
|
||||
class AsyncRemotePillar(object):
|
||||
class RemotePillarMixin(object):
|
||||
'''
|
||||
Common remote pillar functionality
|
||||
'''
|
||||
def get_ext_pillar_extra_minion_data(self, opts):
|
||||
'''
|
||||
Returns the extra data from the minion's opts dict (the config file).
|
||||
|
||||
This data will be passed to external pillar functions.
|
||||
'''
|
||||
def get_subconfig(opts_key):
|
||||
'''
|
||||
Returns a dict containing the opts key subtree, while maintaining
|
||||
the opts structure
|
||||
'''
|
||||
ret_dict = aux_dict = {}
|
||||
config_val = opts
|
||||
subkeys = opts_key.split(':')
|
||||
# Build an empty dict with the opts path
|
||||
for subkey in subkeys[:-1]:
|
||||
aux_dict[subkey] = {}
|
||||
aux_dict = aux_dict[subkey]
|
||||
if not config_val.get(subkey):
|
||||
# The subkey is not in the config
|
||||
return {}
|
||||
config_val = config_val[subkey]
|
||||
if subkeys[-1] not in config_val:
|
||||
return {}
|
||||
aux_dict[subkeys[-1]] = config_val[subkeys[-1]]
|
||||
return ret_dict
|
||||
|
||||
extra_data = {}
|
||||
if 'pass_to_ext_pillars' in opts:
|
||||
if not isinstance(opts['pass_to_ext_pillars'], list):
|
||||
log.exception('\'pass_to_ext_pillars\' config is malformed.')
|
||||
raise SaltClientError('\'pass_to_ext_pillars\' config is '
|
||||
'malformed.')
|
||||
for key in opts['pass_to_ext_pillars']:
|
||||
salt.utils.dictupdate.update(extra_data,
|
||||
get_subconfig(key),
|
||||
recursive_update=True,
|
||||
merge_lists=True)
|
||||
log.trace('ext_pillar_extra_data = {0}'.format(extra_data))
|
||||
return extra_data
|
||||
|
||||
|
||||
class AsyncRemotePillar(RemotePillarMixin):
|
||||
'''
|
||||
Get the pillar from the master
|
||||
'''
|
||||
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None, extra_minion_data=None):
|
||||
self.opts = opts
|
||||
self.opts['environment'] = saltenv
|
||||
self.ext = ext
|
||||
|
@ -93,6 +145,14 @@ class AsyncRemotePillar(object):
|
|||
if not isinstance(self.pillar_override, dict):
|
||||
self.pillar_override = {}
|
||||
log.error('Pillar data must be a dictionary')
|
||||
self.extra_minion_data = extra_minion_data or {}
|
||||
if not isinstance(self.extra_minion_data, dict):
|
||||
self.extra_minion_data = {}
|
||||
log.error('Extra minion data must be a dictionary')
|
||||
salt.utils.dictupdate.update(self.extra_minion_data,
|
||||
self.get_ext_pillar_extra_minion_data(opts),
|
||||
recursive_update=True,
|
||||
merge_lists=True)
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def compile_pillar(self):
|
||||
|
@ -104,6 +164,7 @@ class AsyncRemotePillar(object):
|
|||
'saltenv': self.opts['environment'],
|
||||
'pillarenv': self.opts['pillarenv'],
|
||||
'pillar_override': self.pillar_override,
|
||||
'extra_minion_data': self.extra_minion_data,
|
||||
'ver': '2',
|
||||
'cmd': '_pillar'}
|
||||
if self.ext:
|
||||
|
@ -126,12 +187,12 @@ class AsyncRemotePillar(object):
|
|||
raise tornado.gen.Return(ret_pillar)
|
||||
|
||||
|
||||
class RemotePillar(object):
|
||||
class RemotePillar(RemotePillarMixin):
|
||||
'''
|
||||
Get the pillar from the master
|
||||
'''
|
||||
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None, extra_minion_data=None):
|
||||
self.opts = opts
|
||||
self.opts['environment'] = saltenv
|
||||
self.ext = ext
|
||||
|
@ -144,6 +205,14 @@ class RemotePillar(object):
|
|||
if not isinstance(self.pillar_override, dict):
|
||||
self.pillar_override = {}
|
||||
log.error('Pillar data must be a dictionary')
|
||||
self.extra_minion_data = extra_minion_data or {}
|
||||
if not isinstance(self.extra_minion_data, dict):
|
||||
self.extra_minion_data = {}
|
||||
log.error('Extra minion data must be a dictionary')
|
||||
salt.utils.dictupdate.update(self.extra_minion_data,
|
||||
self.get_ext_pillar_extra_minion_data(opts),
|
||||
recursive_update=True,
|
||||
merge_lists=True)
|
||||
|
||||
def compile_pillar(self):
|
||||
'''
|
||||
|
@ -154,6 +223,7 @@ class RemotePillar(object):
|
|||
'saltenv': self.opts['environment'],
|
||||
'pillarenv': self.opts['pillarenv'],
|
||||
'pillar_override': self.pillar_override,
|
||||
'extra_minion_data': self.extra_minion_data,
|
||||
'ver': '2',
|
||||
'cmd': '_pillar'}
|
||||
if self.ext:
|
||||
|
@ -187,7 +257,7 @@ class PillarCache(object):
|
|||
'''
|
||||
# TODO ABC?
|
||||
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None, extra_minion_data=None):
|
||||
# Yes, we need all of these because we need to route to the Pillar object
|
||||
# if we have no cache. This is another refactor target.
|
||||
|
||||
|
@ -265,7 +335,7 @@ class Pillar(object):
|
|||
Read over the pillar top files and render the pillar data
|
||||
'''
|
||||
def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None,
|
||||
pillar_override=None, pillarenv=None):
|
||||
pillar_override=None, pillarenv=None, extra_minion_data=None):
|
||||
self.minion_id = minion_id
|
||||
self.ext = ext
|
||||
if pillarenv is None:
|
||||
|
@ -311,6 +381,10 @@ class Pillar(object):
|
|||
if not isinstance(self.pillar_override, dict):
|
||||
self.pillar_override = {}
|
||||
log.error('Pillar data must be a dictionary')
|
||||
self.extra_minion_data = extra_minion_data or {}
|
||||
if not isinstance(self.extra_minion_data, dict):
|
||||
self.extra_minion_data = {}
|
||||
log.error('Extra minion data must be a dictionary')
|
||||
|
||||
def __valid_on_demand_ext_pillar(self, opts):
|
||||
'''
|
||||
|
@ -416,20 +490,19 @@ class Pillar(object):
|
|||
self.opts['pillarenv'], ', '.join(self.opts['file_roots'])
|
||||
)
|
||||
else:
|
||||
tops[self.opts['pillarenv']] = [
|
||||
compile_template(
|
||||
self.client.cache_file(
|
||||
self.opts['state_top'],
|
||||
self.opts['pillarenv']
|
||||
),
|
||||
self.rend,
|
||||
self.opts['renderer'],
|
||||
self.opts['renderer_blacklist'],
|
||||
self.opts['renderer_whitelist'],
|
||||
self.opts['pillarenv'],
|
||||
_pillar_rend=True,
|
||||
)
|
||||
]
|
||||
top = self.client.cache_file(self.opts['state_top'], self.opts['pillarenv'])
|
||||
if top:
|
||||
tops[self.opts['pillarenv']] = [
|
||||
compile_template(
|
||||
top,
|
||||
self.rend,
|
||||
self.opts['renderer'],
|
||||
self.opts['renderer_blacklist'],
|
||||
self.opts['renderer_whitelist'],
|
||||
self.opts['pillarenv'],
|
||||
_pillar_rend=True,
|
||||
)
|
||||
]
|
||||
else:
|
||||
for saltenv in self._get_envs():
|
||||
if self.opts.get('pillar_source_merging_strategy', None) == "none":
|
||||
|
@ -768,17 +841,35 @@ class Pillar(object):
|
|||
Builds actual pillar data structure and updates the ``pillar`` variable
|
||||
'''
|
||||
ext = None
|
||||
args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args
|
||||
|
||||
if isinstance(val, dict):
|
||||
ext = self.ext_pillars[key](self.minion_id, pillar, **val)
|
||||
if ('extra_minion_data' in args) and self.extra_minion_data:
|
||||
ext = self.ext_pillars[key](
|
||||
self.minion_id, pillar,
|
||||
extra_minion_data=self.extra_minion_data, **val)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id, pillar, **val)
|
||||
elif isinstance(val, list):
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
if ('extra_minion_data' in args) and self.extra_minion_data:
|
||||
ext = self.ext_pillars[key](
|
||||
self.minion_id, pillar, *val,
|
||||
extra_minion_data=self.extra_minion_data)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
*val)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
if ('extra_minion_data' in args) and self.extra_minion_data:
|
||||
ext = self.ext_pillars[key](
|
||||
self.minion_id,
|
||||
pillar,
|
||||
val,
|
||||
extra_minion_data=self.extra_minion_data)
|
||||
else:
|
||||
ext = self.ext_pillars[key](self.minion_id,
|
||||
pillar,
|
||||
val)
|
||||
return ext
|
||||
|
||||
def ext_pillar(self, pillar, errors=None):
|
||||
|
|
|
@ -167,7 +167,8 @@ def ext_pillar(minion_id,
|
|||
opts['target'] = match.group(1)
|
||||
temp = temp.replace(match.group(0), '')
|
||||
checker = salt.utils.minions.CkMinions(__opts__)
|
||||
minions = checker.check_minions(opts['target'], 'compound')
|
||||
_res = checker.check_minions(opts['target'], 'compound')
|
||||
minions = _res['minions']
|
||||
if minion_id not in minions:
|
||||
return {}
|
||||
|
||||
|
|
|
@ -336,9 +336,10 @@ def ext_pillar(minion_id,
|
|||
if (os.path.isdir(nodegroups_dir) and
|
||||
nodegroup in master_ngroups):
|
||||
ckminions = salt.utils.minions.CkMinions(__opts__)
|
||||
match = ckminions.check_minions(
|
||||
_res = ckminions.check_minions(
|
||||
master_ngroups[nodegroup],
|
||||
'compound')
|
||||
match = _res['minions']
|
||||
if minion_id in match:
|
||||
ngroup_dir = os.path.join(
|
||||
nodegroups_dir, str(nodegroup))
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue