Merge branch 'oxygen' into py3-loader

This commit is contained in:
Nicole Thomas 2017-12-27 13:42:44 -05:00 committed by GitHub
commit 42185b79b8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
46 changed files with 1306 additions and 767 deletions

View file

@ -24,7 +24,7 @@ transport:
name: sftp
<% end %>
sudo: false
sudo: true
provisioner:
name: salt_solo
salt_install: bootstrap
@ -184,10 +184,9 @@ suites:
verifier:
name: shell
remote_exec: true
sudo: false
live_stream: {}
<% if ENV['TESTOPTS'].nil? %>
command: '$(kitchen) /tmp/kitchen/testing/tests/runtests.py --run-destructive --sysinfo --transport=zeromq --output-columns=80 --ssh --coverage-xml=/tmp/coverage.xml --xml=/tmp/xml-unittests-output'
command: 'sudo -E $(kitchen) /tmp/kitchen/testing/tests/runtests.py -v --run-destructive --sysinfo --transport=zeromq --output-columns=80 --ssh --coverage-xml=/tmp/coverage.xml --xml=/tmp/xml-unittests-output'
<% else %>
command: '$(kitchen) /tmp/kitchen/testing/tests/runtests.py --run-destructive --output-columns 80 <%= ENV["TESTOPTS"] %>'
command: 'sudo -E $(kitchen) /tmp/kitchen/testing/tests/runtests.py -v --run-destructive --output-columns 80 <%= ENV["TESTOPTS"] %>'
<% end %>

View file

@ -22,3 +22,7 @@ group :windows do
gem 'winrm', '~>2.0'
gem 'winrm-fs', '~>1.0'
end
group :ec2 do
gem 'kitchen-ec2'
end

View file

@ -3,3 +3,4 @@
# user: fred
# password: saltybacon
# private_key: /root/joyent.pem
# keyname: saltstack

View file

@ -235,13 +235,13 @@
# cause sub minion process to restart.
#auth_safemode: False
# Ping Master to ensure connection is alive (seconds).
# Ping Master to ensure connection is alive (minutes).
#ping_interval: 0
# To auto recover minions if master changes IP address (DDNS)
# auth_tries: 10
# auth_safemode: False
# ping_interval: 90
# ping_interval: 2
#
# Minions won't know master is missing until a ping fails. After the ping fail,
# the minion will attempt authentication and likely fails out and cause a restart.

View file

@ -1039,7 +1039,7 @@ restart.
Default: ``0``
Instructs the minion to ping its master(s) every n number of seconds. Used
Instructs the minion to ping its master(s) every n number of minutes. Used
primarily as a mitigation technique against minion disconnects.
.. code-block:: yaml

View file

@ -11,6 +11,7 @@ This section contains a list of the Python modules that are used to extend the v
../ref/auth/all/index
../ref/beacons/all/index
../ref/cache/all/index
../ref/clouds/all/index
../ref/engines/all/index
../ref/executors/all/index
../ref/file_server/all/index

View file

@ -107,35 +107,44 @@ Minion silently:
========================= =====================================================
Option Description
========================= =====================================================
``/master=`` A string value to set the IP address or host name of
``/master=`` A string value to set the IP address or hostname of
the master. Default value is 'salt'. You can pass a
single master or a comma-separated list of masters.
Setting the master will replace existing config with
the default config. Cannot be used in conjunction
with ``/use-existing-config``
``/minion-name=`` A string value to set the minion name. Default is
'hostname'. Setting the minion name will replace
existing config with the default config. Cannot be
used in conjunction with ``/use-existing-config``
Setting the master will cause the installer to use
the default config or a custom config if defined.
``/minion-name=`` A string value to set the minion name. Default value
is 'hostname'. Setting the minion name causes the
installer to use the default config or a custom
config if defined.
``/start-minion=`` Either a 1 or 0. '1' will start the salt-minion
service, '0' will not. Default is to start the
service after installation
service after installation.
``/start-minion-delayed`` Set the minion start type to
``Automatic (Delayed Start)``
``/use-existing-config`` Either a 1 or 0. '1' will use the existing config if
present. '0' will replace existing config with the
default config. Default is '1'. If this is set to '1'
values passed in ``/master`` and ``/minion-name``
will be ignored
``Automatic (Delayed Start)``.
``/default-config`` Overwrite the existing config if present with the
default config for salt. Default is to use the
existing config if present. If ``/master`` and/or
``/minion-name`` is passed, those values will be used
to update the new default config.
``/custom-config=`` A string value specifying the name of a custom config
file in the same path as the installer of the full
path to a custom config file. If ``/master`` and/or
``/minion-name`` is passed, those values will be used
to update the new custom config.
``/S`` Runs the installation silently. Uses the above
settings or the defaults
``/?`` Displays command line help
settings or the defaults.
``/?`` Displays command line help.
========================= =====================================================
.. note::
``/start-service`` has been deprecated but will continue to function as
expected for the time being.
.. note::
``/default-config`` and ``/custom-config=`` will backup an existing config
if found. A timestamp and a ``.bak`` extension will be added. That includes
the ``minion`` file and the ``minion.d`` directory.
Here are some examples of using the silent installer:
.. code-block:: bat
@ -152,6 +161,13 @@ Here are some examples of using the silent installer:
Salt-Minion-2017.7.1-Py3-AMD64-Setup.exe /S /master=yoursaltmaster /minion-name=yourminionname /start-minion=0
.. code-block:: bat
# Install the Salt Minion
# Configure the minion using a custom config and configuring multimaster
Salt-Minion-2017.7.1-Py3-AMD64-Setup.exe /S /custom-config=windows_minion /master=prod_master1,prod_master2
Running the Salt Minion on Windows as an Unprivileged User
==========================================================

View file

@ -109,13 +109,19 @@ Var ConfigMinionName
Var MinionName
Var MinionName_State
Var ExistingConfigFound
Var UseExistingConfig
Var UseExistingConfig_State
Var ConfigType
Var ConfigType_State
Var CustomConfig
Var CustomConfig_btn
Var CustomConfig_State
Var WarningCustomConfig
Var WarningExistingConfig
Var WarningDefaultConfig
Var StartMinion
Var StartMinionDelayed
Var DeleteInstallDir
Var ConfigWriteMinion
Var ConfigWriteMaster
###############################################################################
@ -146,10 +152,12 @@ Function pageMinionConfig
${NSD_CreateText} 0 43u 100% 12u $MinionName_State
Pop $MinionName
# Use Existing Config Checkbox
${NSD_CreateCheckBox} 0 65u 100% 12u "&Use Existing Config"
Pop $UseExistingConfig
${NSD_OnClick} $UseExistingConfig pageMinionConfig_OnClick
# Config Drop List
${NSD_CreateDropList} 0 65u 25% 36u ""
Pop $ConfigType
${NSD_CB_AddString} $ConfigType "Default Config"
${NSD_CB_AddString} $ConfigType "Custom Config"
${NSD_OnChange} $ConfigType pageMinionConfig_OnChange
# Add Existing Config Warning Label
${NSD_CreateLabel} 0 80u 100% 60u "The values above are taken from an \
@ -164,73 +172,174 @@ Function pageMinionConfig
SetCtlColors $WarningExistingConfig 0xBB0000 transparent
# Add Default Config Warning Label
${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will remove the \
the existing minion config file and remove the minion.d directories. \
The values above will be used in the new default config."
${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will backup the \
the existing minion config file and minion.d directories. The values \
above will be used in the new default config.$\r$\n\
$\r$\n\
NOTE: If Master IP is set to `salt` and Minion Name is set to \
`hostname` no changes will be made."
Pop $WarningDefaultConfig
CreateFont $0 "Arial" 10 500 /ITALIC
SendMessage $WarningDefaultConfig ${WM_SETFONT} $0 1
SetCtlColors $WarningDefaultConfig 0xBB0000 transparent
# If no existing config found, disable the checkbox and stuff
# Set UseExistingConfig_State to 0
# Add Custom Config File Selector and Warning Label
${NSD_CreateText} 26% 65u 64% 12u $CustomConfig_State
Pop $CustomConfig
${NSD_CreateButton} 91% 65u 9% 12u "..."
Pop $CustomConfig_btn
${NSD_OnClick} $CustomConfig_btn pageCustomConfigBtn_OnClick
${If} $ExistingConfigFound == 0
StrCpy $UseExistingConfig_State 0
ShowWindow $UseExistingConfig ${SW_HIDE}
ShowWindow $WarningExistingConfig ${SW_HIDE}
${NSD_CreateLabel} 0 80u 100% 60u "Values entered above will be used \
in the custom config.$\r$\n\
$\r$\n\
NOTE: If Master IP is set to `salt` and Minion Name is set to \
`hostname` no changes will be made."
${Else}
${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will backup the \
the existing minion config file and minion.d directories. The \
values above will be used in the custom config.$\r$\n\
$\r$\n\
NOTE: If Master IP is set to `salt` and Minion Name is set to \
`hostname` no changes will be made."
${Endif}
Pop $WarningCustomConfig
CreateFont $0 "Arial" 10 500 /ITALIC
SendMessage $WarningCustomConfig ${WM_SETFONT} $0 1
SetCtlColors $WarningCustomConfig 0xBB0000 transparent
# If existing config found, add the Existing Config option to the Drop List
# If not, hide the Default Warning
${If} $ExistingConfigFound == 1
${NSD_CB_AddString} $ConfigType "Existing Config"
${Else}
ShowWindow $WarningDefaultConfig ${SW_HIDE}
${Endif}
${NSD_SetState} $UseExistingConfig $UseExistingConfig_State
${NSD_CB_SelectString} $ConfigType $ConfigType_State
${NSD_SetText} $CustomConfig $CustomConfig_State
Call pageMinionConfig_OnClick
Call pageMinionConfig_OnChange
nsDialogs::Show
FunctionEnd
Function pageMinionConfig_OnClick
Function pageMinionConfig_OnChange
# You have to pop the top handle to keep the stack clean
Pop $R0
# Assign the current checkbox state to the variable
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
${NSD_GetText} $ConfigType $ConfigType_State
# Validate the checkboxes
${If} $UseExistingConfig_State == ${BST_CHECKED}
# Use Existing Config is checked, show warning
ShowWindow $WarningExistingConfig ${SW_SHOW}
EnableWindow $MasterHost 0
EnableWindow $MinionName 0
${NSD_SetText} $MasterHost $ConfigMasterHost
${NSD_SetText} $MinionName $ConfigMinionName
${If} $ExistingConfigFound == 1
# Update Dialog
${Switch} $ConfigType_State
${Case} "Existing Config"
# Enable Master/Minion and set values
EnableWindow $MasterHost 0
EnableWindow $MinionName 0
${NSD_SetText} $MasterHost $ConfigMasterHost
${NSD_SetText} $MinionName $ConfigMinionName
# Hide Custom File Picker
ShowWindow $CustomConfig ${SW_HIDE}
ShowWindow $CustomConfig_btn ${SW_HIDE}
# Hide Warnings
ShowWindow $WarningDefaultConfig ${SW_HIDE}
${Endif}
${Else}
# Use Existing Config is not checked, hide the warning
ShowWindow $WarningExistingConfig ${SW_HIDE}
EnableWindow $MasterHost 1
EnableWindow $MinionName 1
${NSD_SetText} $MasterHost $MasterHost_State
${NSD_SetText} $MinionName $MinionName_State
${If} $ExistingConfigFound == 1
ShowWindow $WarningDefaultConfig ${SW_SHOW}
${Endif}
ShowWindow $WarningCustomConfig ${SW_HIDE}
# Show Existing Warning
ShowWindow $WarningExistingConfig ${SW_SHOW}
${Break}
${Case} "Custom Config"
# Enable Master/Minion and set values
EnableWindow $MasterHost 1
EnableWindow $MinionName 1
${NSD_SetText} $MasterHost $MasterHost_State
${NSD_SetText} $MinionName $MinionName_State
# Show Custom File Picker
ShowWindow $CustomConfig ${SW_SHOW}
ShowWindow $CustomConfig_btn ${SW_SHOW}
# Hide Warnings
ShowWindow $WarningDefaultConfig ${SW_HIDE}
ShowWindow $WarningExistingConfig ${SW_HIDE}
# Show Custom Warning
ShowWindow $WarningCustomConfig ${SW_SHOW}
${Break}
${Case} "Default Config"
# Enable Master/Minion and set values
EnableWindow $MasterHost 1
EnableWindow $MinionName 1
${NSD_SetText} $MasterHost $MasterHost_State
${NSD_SetText} $MinionName $MinionName_State
# Hide Custom File Picker
ShowWindow $CustomConfig ${SW_HIDE}
ShowWindow $CustomConfig_btn ${SW_HIDE}
# Hide Warnings
ShowWindow $WarningExistingConfig ${SW_HIDE}
ShowWindow $WarningCustomConfig ${SW_HIDE}
# Show Default Warning, if there is an existing config
${If} $ExistingConfigFound == 1
ShowWindow $WarningDefaultConfig ${SW_SHOW}
${Endif}
${Break}
${EndSwitch}
FunctionEnd
# File Picker Definitions
!define OFN_FILEMUSTEXIST 0x00001000
!define OFN_DONTADDTOREC 0x02000000
!define OPENFILENAME_SIZE_VERSION_400 76
!define OPENFILENAME 'i,i,i,i,i,i,i,i,i,i,i,i,i,i,&i2,&i2,i,i,i,i'
Function pageCustomConfigBtn_OnClick
Pop $0
System::Call '*(&t${NSIS_MAX_STRLEN})i.s' # Allocate OPENFILENAME.lpstrFile buffer
System::Call '*(${OPENFILENAME})i.r0' # Allocate OPENFILENAME struct
System::Call '*$0(${OPENFILENAME})(${OPENFILENAME_SIZE_VERSION_400}, \
$hwndparent, , , , , , sr1, ${NSIS_MAX_STRLEN} , , , , \
t"Select Custom Config File", \
${OFN_FILEMUSTEXIST} | ${OFN_DONTADDTOREC})'
# Populate file name field
${NSD_GetText} $CustomConfig $2
System::Call "*$1(&t${NSIS_MAX_STRLEN}r2)" ; Set lpstrFile to the old path (if any)
# Open the dialog
System::Call 'COMDLG32::GetOpenFileName(ir0)i.r2'
# Get file name field
${If} $2 <> 0
System::Call "*$1(&t${NSIS_MAX_STRLEN}.r2)"
${NSD_SetText} $CustomConfig $2
${EndIf}
# Free resources
System::Free $1
System::Free $0
FunctionEnd
Function pageMinionConfig_Leave
# Save the State
${NSD_GetText} $MasterHost $MasterHost_State
${NSD_GetText} $MinionName $MinionName_State
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
${NSD_GetText} $ConfigType $ConfigType_State
${NSD_GetText} $CustomConfig $CustomConfig_State
Call RemoveExistingConfig
# Abort if config file not found
${If} $ConfigType_State == "Custom Config"
IfFileExists "$CustomConfig_State" continue 0
MessageBox MB_OK "File not found: $CustomConfig_State" /SD IDOK
Abort
${EndIf}
continue:
Call BackupExistingConfig
FunctionEnd
@ -362,7 +471,7 @@ Section -Prerequisites
# /qb! used by 2008 installer
# It just ignores the unrecognized switches...
ClearErrors
ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0
ExecWait '"$INSTDIR\vcredist.exe" /qb! /quiet /norestart' $0
IfErrors 0 CheckVcRedistErrorCode
MessageBox MB_OK \
"$VcRedistName failed to install. Try installing the package manually." \
@ -408,6 +517,14 @@ Function .onInit
Call parseCommandLineSwitches
# If custom config passed, verify its existence before continuing so we
# don't uninstall an existing installation and then fail
${If} $ConfigType_State == "Custom Config"
IfFileExists "$CustomConfig_State" customConfigExists 0
Abort
${EndIf}
customConfigExists:
# Check for existing installation
ReadRegStr $R0 HKLM \
"Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \
@ -457,21 +574,52 @@ Function .onInit
skipUninstall:
Call getMinionConfig
Call getExistingMinionConfig
${If} $ExistingConfigFound == 0
${AndIf} $ConfigType_State == "Existing Config"
StrCpy $ConfigType_State "Default Config"
${EndIf}
IfSilent 0 +2
Call RemoveExistingConfig
Call BackupExistingConfig
FunctionEnd
Function RemoveExistingConfig
# Time Stamp Definition
!define /date TIME_STAMP "%Y-%m-%d-%H-%M-%S"
Function BackupExistingConfig
${If} $ExistingConfigFound == 1 # If existing config found
${AndIfNot} $ConfigType_State == "Existing Config" # If not using Existing Config
# Backup the minion config
Rename "$INSTDIR\conf\minion" "$INSTDIR\conf\minion-${TIME_STAMP}.bak"
IfFileExists "$INSTDIR\conf\minion.d" 0 +2
Rename "$INSTDIR\conf\minion.d" "$INSTDIR\conf\minion.d-{TIME_STAMP}$.bak"
${EndIf}
# By this point there should be no existing config
# It was either backed up or wasn't there to begin with
${If} $ConfigType_State == "Custom Config" # If we're using Custom Config
${AndIfNot} $CustomConfig_State == "" # If a custom config is passed
# Check for a file name
# Named file should be in the same directory as the installer
CreateDirectory "$INSTDIR\conf"
IfFileExists "$EXEDIR\$CustomConfig_State" 0 checkFullPath
CopyFiles /SILENT /FILESONLY "$EXEDIR\$CustomConfig_State" "$INSTDIR\conf\minion"
goto finished
# Maybe it was a full path to a file
checkFullPath:
IfFileExists "$CustomConfig_State" 0 finished
CopyFiles /SILENT /FILESONLY "$CustomConfig_State" "$INSTDIR\conf\minion"
finished:
${If} $ExistingConfigFound == 1
${AndIf} $UseExistingConfig_State == 0
# Wipe out the Existing Config
Delete "$INSTDIR\conf\minion"
RMDir /r "$INSTDIR\conf\minion.d"
${EndIf}
FunctionEnd
@ -517,7 +665,7 @@ Section -Post
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"
${If} $UseExistingConfig_State == 0
${IfNot} $ConfigType_State == "Existing Config" # If not using Existing Config
Call updateMinionConfig
${EndIf}
@ -803,7 +951,7 @@ FunctionEnd
# Push "this is some string"
# Push "some"
# Call StrStr
# Pop $0 ; "some string"
# Pop $0 # "some string"
#------------------------------------------------------------------------------
!macro StrStr un
Function ${un}StrStr
@ -1029,7 +1177,7 @@ FunctionEnd
###############################################################################
# Specialty Functions
###############################################################################
Function getMinionConfig
Function getExistingMinionConfig
# Set Config Found Default Value
StrCpy $ExistingConfigFound 0
@ -1050,30 +1198,30 @@ Function getMinionConfig
FileOpen $0 "$INSTDIR\conf\minion" r
confLoop:
ClearErrors # Clear Errors
FileRead $0 $1 # Read the next line
IfErrors EndOfFile # Error is probably EOF
${StrLoc} $2 $1 "master:" ">" # Find `master:` starting at the beginning
${If} $2 == 0 # If it found it in the first position, then it is defined
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" # Read everything after `master: `
${Trim} $2 $2 # Trim white space
${If} $2 == "" # If it's empty, it's probably a list
ClearErrors # clear Errors
FileRead $0 $1 # read the next line
IfErrors EndOfFile # error is probably EOF
${StrLoc} $2 $1 "master:" ">" # find `master:` starting at the beginning
${If} $2 == 0 # if it found it in the first position, then it is defined
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" # read everything after `master: `
${Trim} $2 $2 # trim white space
${If} $2 == "" # if it's empty, it's probably a list of masters
masterLoop:
ClearErrors # Clear Errors
FileRead $0 $1 # Read the next line
IfErrors EndOfFile # Error is probably EOF
${StrStrAdv} $2 $1 "- " ">" ">" "0" "0" "0" # Read everything after `- `
${Trim} $2 $2 # Trim white space
${IfNot} $2 == "" # If it's not empty, we found something
${If} $ConfigMasterHost == "" # Is the default `salt` there
StrCpy $ConfigMasterHost $2 # If so, make the first item the new entry
ClearErrors # clear Errors
FileRead $0 $1 # read the next line
IfErrors EndOfFile # error is probably EOF
${StrStrAdv} $2 $1 "- " ">" ">" "0" "0" "0" # read everything after `- `
${Trim} $2 $2 # trim white space
${IfNot} $2 == "" # if the line is not empty, we found something
${If} $ConfigMasterHost == "" # if the config setting is empty
StrCpy $ConfigMasterHost $2 # make the first item the new entry
${Else}
StrCpy $ConfigMasterHost "$ConfigMasterHost,$2" # Append the new master, comma separated
${EndIf}
Goto masterLoop # Check the next one
Goto masterLoop # check the next one
${EndIf}
${Else}
StrCpy $ConfigMasterHost $2 # A single master entry
StrCpy $ConfigMasterHost $2 # a single master entry
${EndIf}
${EndIf}
@ -1102,59 +1250,126 @@ Function getMinionConfig
FunctionEnd
Var cfg_line
Var chk_line
Var lst_check
Function updateMinionConfig
ClearErrors
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
GetTempFileName $R0 # get new temp file name
FileOpen $1 $R0 "w" # open temp file for writing
FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading
GetTempFileName $R0 # get new temp file name
FileOpen $1 $R0 "w" # open temp file for writing
loop: # loop through each line
FileRead $0 $2 # read line from target file
IfErrors done # end if errors are encountered (end of line)
StrCpy $ConfigWriteMaster 1 # write the master config value
StrCpy $ConfigWriteMinion 1 # write the minion config value
${If} $MasterHost_State != "" # if master is empty
${AndIf} $MasterHost_State != "salt" # and if master is not 'salt'
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
${If} $3 == 0 # is it in the first...
${OrIf} $3 == 1 # or second position (account for comments)
loop: # loop through each line
FileRead $0 $cfg_line # read line from target file
IfErrors done # end if errors are encountered (end of line)
${Explode} $9 "," $MasterHost_state # Split the hostname on commas, $9 is the number of items found
${If} $9 == 1 # 1 means only a single master was passed
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
${Else} # Make a multi-master entry
StrCpy $2 "master:" # Make the first line "master:"
loop_after_read:
StrCpy $lst_check 0 # list check not performed
loop_explode: # Start a loop to go through the list in the config
pop $8 # Pop the next item off the stack
${Trim} $8 $8 # Trim any whitespace
StrCpy $2 "$2$\r$\n - $8" # Add it to the master variable ($2)
IntOp $9 $9 - 1 # Decrement the list count
${If} $9 >= 1 # If it's not 0
Goto loop_explode # Do it again
${EndIf} # close if statement
${EndIf} # close if statement
${EndIf} # close if statement
${EndIf} # close if statement
${If} $MasterHost_State == "" # if master is empty
${OrIf} $MasterHost_State == "salt" # or if master is 'salt'
StrCpy $ConfigWriteMaster 0 # no need to write master config
${EndIf} # close if statement
${If} $MinionName_State == "" # if minion is empty
${OrIf} $MinionName_State == "hostname" # and if minion is not 'hostname'
StrCpy $ConfigWriteMinion 0 # no need to write minion config
${EndIf} # close if statement
${If} $MinionName_State != "" # if minion is empty
${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname'
${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line
${If} $3 == 0 # is it in the first...
${OrIf} $3 == 1 # or the second position (account for comments)
StrCpy $2 "id: $MinionName_State$\r$\n" # change line
${EndIf} # close if statement
${EndIf} # close if statement
${If} $ConfigWriteMaster == 1 # if we need to write master config
FileWrite $1 $2 # write changed or unchanged line to temp file
Goto loop
${StrLoc} $3 $cfg_line "master:" ">" # where is 'master:' in this line
${If} $3 == 0 # is it in the first...
${OrIf} $3 == 1 # or second position (account for comments)
${Explode} $9 "," $MasterHost_state # Split the hostname on commas, $9 is the number of items found
${If} $9 == 1 # 1 means only a single master was passed
StrCpy $cfg_line "master: $MasterHost_State$\r$\n" # write the master
${Else} # make a multi-master entry
StrCpy $cfg_line "master:" # make the first line "master:"
loop_explode: # start a loop to go through the list in the config
pop $8 # pop the next item off the stack
${Trim} $8 $8 # trim any whitespace
StrCpy $cfg_line "$cfg_line$\r$\n - $8" # add it to the master variable ($2)
IntOp $9 $9 - 1 # decrement the list count
${If} $9 >= 1 # if it's not 0
Goto loop_explode # do it again
${EndIf} # close if statement
StrCpy $cfg_line "$cfg_line$\r$\n" # Make sure there's a new line at the end
# Remove remaining items in list
${While} $lst_check == 0 # while list item found
FileRead $0 $chk_line # read line from target file
IfErrors done # end if errors are encountered (end of line)
${StrLoc} $3 $chk_line " - " ">" # where is 'master:' in this line
${If} $3 == "" # is it in the first...
StrCpy $lst_check 1 # list check performed and finished
${EndIf}
${EndWhile}
${EndIf} # close if statement
StrCpy $ConfigWriteMaster 0 # master value written to config
${EndIf} # close if statement
${EndIf} # close if statement
${If} $ConfigWriteMinion == 1 # if we need to write minion config
${StrLoc} $3 $cfg_line "id:" ">" # where is 'id:' in this line
${If} $3 == 0 # is it in the first...
${OrIf} $3 == 1 # or the second position (account for comments)
StrCpy $cfg_line "id: $MinionName_State$\r$\n" # write the minion config setting
StrCpy $ConfigWriteMinion 0 # minion value written to config
${EndIf} # close if statement
${EndIf} # close if statement
FileWrite $1 $cfg_line # write changed or unchanged line to temp file
${If} $lst_check == 1 # master not written to the config
StrCpy $cfg_line $chk_line
Goto loop_after_read # A loop was performed, skip the next read
${EndIf} # close if statement
Goto loop # check the next line in the config file
done:
FileClose $0 # close target file
FileClose $1 # close temp file
Delete "$INSTDIR\conf\minion" # delete target file
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
Delete $R0 # delete temp file
ClearErrors
# Does master config still need to be written
${If} $ConfigWriteMaster == 1 # master not written to the config
${Explode} $9 "," $MasterHost_state # split the hostname on commas, $9 is the number of items found
${If} $9 == 1 # 1 means only a single master was passed
StrCpy $cfg_line "master: $MasterHost_State" # write the master
${Else} # make a multi-master entry
StrCpy $cfg_line "master:" # make the first line "master:"
loop_explode_2: # start a loop to go through the list in the config
pop $8 # pop the next item off the stack
${Trim} $8 $8 # trim any whitespace
StrCpy $cfg_line "$cfg_line$\r$\n - $8" # add it to the master variable ($2)
IntOp $9 $9 - 1 # decrement the list count
${If} $9 >= 1 # if it's not 0
Goto loop_explode_2 # do it again
${EndIf} # close if statement
${EndIf} # close if statement
FileWrite $1 $cfg_line # write changed or unchanged line to temp file
${EndIf} # close if statement
${If} $ConfigWriteMinion == 1 # minion ID not written to the config
StrCpy $cfg_line "$\r$\nid: $MinionName_State" # write the minion config setting
FileWrite $1 $cfg_line # write changed or unchanged line to temp file
${EndIf} # close if statement
FileClose $0 # close target file
FileClose $1 # close temp file
Delete "$INSTDIR\conf\minion" # delete target file
CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file
Delete $R0 # delete temp file
FunctionEnd
@ -1181,26 +1396,40 @@ Function parseCommandLineSwitches
FileWrite $0 "Help for Salt Minion installation$\n"
FileWrite $0 "===============================================================================$\n"
FileWrite $0 "$\n"
FileWrite $0 "/minion-name=$\t$\tA string value to set the minion name. Default is$\n"
FileWrite $0 "$\t$\t$\t'hostname'. Setting the minion name will replace$\n"
FileWrite $0 "$\t$\t$\texisting config with a default config. Cannot be$\n"
FileWrite $0 "$\t$\t$\tused in conjunction with /use-existing-config=1$\n"
FileWrite $0 "/minion-name=$\t$\tA string value to set the minion name. Default value is$\n"
FileWrite $0 "$\t$\t$\t'hostname'. Setting the minion name causes the installer$\n"
FileWrite $0 "$\t$\t$\tto use the default config or a custom config if defined$\n"
FileWrite $0 "$\n"
FileWrite $0 "/master=$\t$\tA string value to set the IP address or hostname of$\n"
FileWrite $0 "$\t$\t$\tthe master. Default value is 'salt'. You may pass a$\n"
FileWrite $0 "$\t$\t$\tsingle master, or a comma separated list of masters.$\n"
FileWrite $0 "$\t$\t$\tSetting the master will replace existing config with$\n"
FileWrite $0 "$\t$\t$\ta default config. Cannot be used in conjunction with$\n"
FileWrite $0 "$\t$\t$\t/use-existing-config=1$\n"
FileWrite $0 "/master=$\t$\tA string value to set the IP address or hostname of the$\n"
FileWrite $0 "$\t$\t$\tmaster. Default value is 'salt'. You may pass a single$\n"
FileWrite $0 "$\t$\t$\tmaster or a comma-separated list of masters. Setting$\n"
FileWrite $0 "$\t$\t$\tthe master will cause the installer to use the default$\n"
FileWrite $0 "$\t$\t$\tconfig or a custom config if defined$\n"
FileWrite $0 "$\n"
FileWrite $0 "/start-minion=$\t$\t1 will start the service, 0 will not. Default is 1$\n"
FileWrite $0 "/start-minion=$\t$\t1 will start the minion service, 0 will not.$\n"
FileWrite $0 "$\t$\t$\tDefault is 1$\n"
FileWrite $0 "$\n"
FileWrite $0 "/start-minion-delayed$\tSet the minion start type to 'Automatic (Delayed Start)'$\n"
FileWrite $0 "$\n"
FileWrite $0 "/use-existing-config=$\t1 will use the existing config if present, 0 will$\n"
FileWrite $0 "$\t$\t$\treplace existing config with a default config. Default$\n"
FileWrite $0 "$\t$\t$\tis 1. If this is set to 1, values passed in$\n"
FileWrite $0 "$\t$\t$\t/minion-name and /master will be ignored$\n"
FileWrite $0 "/default-config$\t$\tOverwrite the existing config if present with the$\n"
FileWrite $0 "$\t$\t$\tdefault config for salt. Default is to use the existing$\n"
FileWrite $0 "$\t$\t$\tconfig if present. If /master and/or /minion-name is$\n"
FileWrite $0 "$\t$\t$\tpassed, those values will be used to update the new$\n"
FileWrite $0 "$\t$\t$\tdefault config$\n"
FileWrite $0 "$\n"
FileWrite $0 "$\t$\t$\tAny existing config will be backed up by appending$\n"
FileWrite $0 "$\t$\t$\ta timestamp and a .bak$ extension. That includes\n"
FileWrite $0 "$\t$\t$\tthe minion file and the minion.d directory$\n"
FileWrite $0 "$\n"
FileWrite $0 "/custom-config=$\t$\tA string value specifying the name of a custom config$\n"
FileWrite $0 "$\t$\t$\tfile in the same path as the installer or the full path$\n"
FileWrite $0 "$\t$\t$\tto a custom config file. If /master and/or /minion-name$\n"
FileWrite $0 "$\t$\t$\tis passed, those values will be used to update the new$\n"
FileWrite $0 "$\t$\t$\tcustom config$\n"
FileWrite $0 "$\n"
FileWrite $0 "$\t$\t$\tAny existing config will be backed up by appending$\n"
FileWrite $0 "$\t$\t$\ta timestamp and a .bak$ extension. That includes\n"
FileWrite $0 "$\t$\t$\tthe minion file and the minion.d directory$\n"
FileWrite $0 "$\n"
FileWrite $0 "/S$\t$\t$\tInstall Salt silently$\n"
FileWrite $0 "$\n"
@ -1215,15 +1444,23 @@ Function parseCommandLineSwitches
FileWrite $0 "${OutFile} /S /minion-name=myminion /master=master.mydomain.com /start-minion-delayed$\n"
FileWrite $0 "$\n"
FileWrite $0 "===============================================================================$\n"
FileWrite $0 "Press Enter to continue..."
FileWrite $0 "$\n"
System::Free $0
System::Free $1
System::Call 'kernel32::FreeConsole()'
# Give the user back the prompt
!define VK_RETURN 0x0D ; Enter Key
!define KEYEVENTF_EXTENDEDKEY 0x0001
!define KEYEVENTF_KEYUP 0x0002
System::Call "user32::keybd_event(i${VK_RETURN}, i0x45, i${KEYEVENTF_EXTENDEDKEY}|0, i0)"
System::Call "user32::keybd_event(i${VK_RETURN}, i0x45, i${KEYEVENTF_EXTENDEDKEY}|${KEYEVENTF_KEYUP}, i0)"
Abort
display_help_not_found:
# Set default value for Use Existing Config
StrCpy $UseExistingConfig_State 1
StrCpy $ConfigType_State "Existing Config"
# Check for start-minion switches
# /start-service is to be deprecated, so we must check for both
@ -1254,7 +1491,7 @@ Function parseCommandLineSwitches
${GetOptions} $R0 "/master=" $R1
${IfNot} $R1 == ""
StrCpy $MasterHost_State $R1
StrCpy $UseExistingConfig_State 0
StrCpy $ConfigType_State "Default Config"
${ElseIf} $MasterHost_State == ""
StrCpy $MasterHost_State "salt"
${EndIf}
@ -1264,17 +1501,26 @@ Function parseCommandLineSwitches
${GetOptions} $R0 "/minion-name=" $R1
${IfNot} $R1 == ""
StrCpy $MinionName_State $R1
StrCpy $UseExistingConfig_State 0
StrCpy $ConfigType_State "Default Config"
${ElseIf} $MinionName_State == ""
StrCpy $MinionName_State "hostname"
${EndIf}
# Use Existing Config
# Overrides above settings with user passed settings
${GetOptions} $R0 "/use-existing-config=" $R1
# Use Default Config
${GetOptions} $R0 "/default-config" $R1
IfErrors default_config_not_found
StrCpy $ConfigType_State "Default Config"
default_config_not_found:
# Use Custom Config
# Set default value for Use Custom Config
StrCpy $CustomConfig_State ""
# Existing config will get a `.bak` extension
${GetOptions} $R0 "/custom-config=" $R1
${IfNot} $R1 == ""
# Use Existing Config was passed something, set it
StrCpy $UseExistingConfig_State $R1
# Custom Config was passed something, set it
StrCpy $CustomConfig_State $R1
StrCpy $ConfigType_State "Custom Config"
${EndIf}
FunctionEnd

View file

@ -35,7 +35,7 @@ Function Get-Settings {
# Prerequisite software
$Prerequisites = @{
"NSIS" = "nsis-3.0b1-setup.exe"
"NSIS" = "nsis-3.02.1-setup.exe"
"VCforPython" = "VCForPython27.msi"
"VCppBuildTools" = "visualcppbuildtools_full.exe"
}

View file

@ -25,7 +25,7 @@ pyOpenSSL==17.5.0
python-dateutil==2.6.1
python-gnupg==0.4.1
pyyaml==3.12
pyzmq==17.0.0b3
pyzmq==16.0.3
requests==2.18.4
singledispatch==3.4.0.3
six==1.11.0

View file

@ -18,7 +18,7 @@
#======================================================================================================================
set -o nounset # Treat unset variables as an error
__ScriptVersion="2017.08.17"
__ScriptVersion="2017.12.13"
__ScriptName="bootstrap-salt.sh"
__ScriptFullName="$0"
@ -710,10 +710,24 @@ else
SETUP_PY_INSTALL_ARGS=""
fi
# Handle the insecure flags
if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then
_CURL_ARGS="${_CURL_ARGS} --insecure"
_FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer"
_GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert"
_WGET_ARGS="${_WGET_ARGS} --no-check-certificate"
else
_GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt"
fi
# Export the http_proxy configuration to our current environment
if [ "${_HTTP_PROXY}" != "" ]; then
export http_proxy="$_HTTP_PROXY"
export https_proxy="$_HTTP_PROXY"
export http_proxy="${_HTTP_PROXY}"
export https_proxy="${_HTTP_PROXY}"
# Using "deprecated" option here, but that appears the only way to make it work.
# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=818802
# and https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1625848
_GPG_ARGS="${_GPG_ARGS},http-proxy=${_HTTP_PROXY}"
fi
# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394
@ -737,15 +751,6 @@ if [ -d "${_VIRTUALENV_DIR}" ]; then
exit 1
fi
# Handle the insecure flags
if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then
_CURL_ARGS="${_CURL_ARGS} --insecure"
_FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer"
_GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert"
_WGET_ARGS="${_WGET_ARGS} --no-check-certificate"
else
_GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt"
fi
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __fetch_url
@ -915,7 +920,7 @@ __sort_release_files() {
done
# Now let's sort by know files importance, max important goes last in the max_prio list
max_prio="redhat-release centos-release oracle-release"
max_prio="redhat-release centos-release oracle-release fedora-release"
for entry in $max_prio; do
if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then
primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\(.*\)\($entry\)\(.*\):\2 \1 \3:g")
@ -1001,7 +1006,6 @@ __gather_linux_system_info() {
echo redhat-release lsb-release
)"); do
[ -L "/etc/${rsource}" ] && continue # Don't follow symlinks
[ ! -f "/etc/${rsource}" ] && continue # Does not exist
n=$(echo "${rsource}" | sed -e 's/[_-]release$//' -e 's/[_-]version$//')
@ -1397,13 +1401,16 @@ __debian_derivatives_translation() {
# If the file does not exist, return
[ ! -f /etc/os-release ] && return
DEBIAN_DERIVATIVES="(cumulus_.+|kali|linuxmint|raspbian)"
DEBIAN_DERIVATIVES="(cumulus_.+|devuan|kali|linuxmint|raspbian)"
# Mappings
cumulus_2_debian_base="7.0"
cumulus_3_debian_base="8.0"
devuan_1_debian_base="8.0"
devuan_2_debian_base="9.0"
kali_1_debian_base="7.0"
linuxmint_1_debian_base="8.0"
raspbian_8_debian_base="8.0"
raspbian_9_debian_base="9.0"
# Translate Debian derivatives to their base Debian version
match=$(echo "$DISTRO_NAME_L" | egrep ${DEBIAN_DERIVATIVES})
@ -1414,6 +1421,10 @@ __debian_derivatives_translation() {
_major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')
_debian_derivative="cumulus"
;;
devuan)
_major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')
_debian_derivative="devuan"
;;
kali)
_major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')
_debian_derivative="kali"
@ -1428,12 +1439,13 @@ __debian_derivatives_translation() {
;;
esac
_debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base")
_debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base" 2>/dev/null)
if [ "$_debian_version" != "" ]; then
echodebug "Detected Debian $_debian_version derivative"
DISTRO_NAME_L="debian"
DISTRO_VERSION="$_debian_version"
DISTRO_MAJOR_VERSION="$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')"
fi
fi
}
@ -1489,8 +1501,8 @@ __check_end_of_life_versions() {
# = 14.10
# = 15.04, 15.10
if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \
[ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \
([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
[ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \
([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://wiki.ubuntu.com/Releases"
@ -1501,8 +1513,10 @@ __check_end_of_life_versions() {
opensuse)
# openSUSE versions not supported
#
# <= 12.1
if ([ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 1 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 12 ]; then
# <= 13.X
# <= 42.1
if [ "$DISTRO_MAJOR_VERSION" -le 13 ] || \
([ "$DISTRO_MAJOR_VERSION" -eq 42 ] && [ "$DISTRO_MINOR_VERSION" -le 1 ]); then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " http://en.opensuse.org/Lifetime"
@ -1513,21 +1527,25 @@ __check_end_of_life_versions() {
suse)
# SuSE versions not supported
#
# < 11 SP2
# < 11 SP4
# < 12 SP2
SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release )
if [ "${SUSE_PATCHLEVEL}" = "" ]; then
SUSE_PATCHLEVEL="00"
fi
if ([ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then
echoerror "Versions lower than SuSE 11 SP2 are not supported."
if [ "$DISTRO_MAJOR_VERSION" -lt 11 ] || \
([ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 04 ]) || \
([ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]); then
echoerror "Versions lower than SuSE 11 SP4 or 12 SP2 are not supported."
echoerror "Please consider upgrading to the next stable"
echoerror " https://www.suse.com/lifecycle/"
exit 1
fi
;;
fedora)
# Fedora lower than 24 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 24 ]; then
# Fedora lower than 25 are no longer supported
if [ "$DISTRO_MAJOR_VERSION" -lt 25 ]; then
echoerror "End of life distributions are not supported."
echoerror "Please consider upgrading to the next stable. See:"
echoerror " https://fedoraproject.org/wiki/Releases"
@ -1729,18 +1747,6 @@ if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]); then
exit 1
fi
# Starting from Debian 9 and Ubuntu 16.10, gnupg-curl has been renamed to gnupg1-curl.
GNUPG_CURL="gnupg-curl"
if [ "$DISTRO_NAME_L" = "debian" ]; then
if [ "$DISTRO_MAJOR_VERSION" -gt 8 ]; then
GNUPG_CURL="gnupg1-curl"
fi
elif [ "$DISTRO_NAME_L" = "ubuntu" ]; then
if [ "${DISTRO_VERSION}" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
GNUPG_CURL="gnupg1-curl"
fi
fi
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: __function_defined
@ -1786,8 +1792,6 @@ __apt_get_upgrade_noinput() {
__apt_key_fetch() {
url=$1
__apt_get_install_noinput ${GNUPG_CURL} || return 1
# shellcheck disable=SC2086
apt-key adv ${_GPG_ARGS} --fetch-keys "$url"; return $?
} # ---------- end of function __apt_key_fetch ----------
@ -2539,7 +2543,6 @@ __enable_universe_repository() {
}
__install_saltstack_ubuntu_repository() {
# Workaround for latest non-LTS ubuntu
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems."
@ -2550,15 +2553,27 @@ __install_saltstack_ubuntu_repository() {
UBUNTU_CODENAME=$DISTRO_CODENAME
fi
# SaltStack's stable Ubuntu repository:
SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/apt/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}"
echo "deb $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list
__PACKAGES=''
# Install downloader backend for GPG keys fetching
if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then
__PACKAGES="${__PACKAGES} gnupg2 dirmngr"
else
__PACKAGES="${__PACKAGES} gnupg-curl"
fi
# Make sure https transport is available
if [ "$HTTP_VAL" = "https" ] ; then
__apt_get_install_noinput apt-transport-https ca-certificates || return 1
__PACKAGES="${__PACKAGES} apt-transport-https ca-certificates"
fi
# shellcheck disable=SC2086,SC2090
__apt_get_install_noinput ${__PACKAGES} || return 1
# SaltStack's stable Ubuntu repository:
SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/apt/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}"
echo "deb $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list
__apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1
apt-get update
@ -2640,9 +2655,8 @@ install_ubuntu_stable_deps() {
__apt_get_upgrade_noinput || return 1
fi
__check_dpkg_architecture || return 1
if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then
__check_dpkg_architecture || return 1
__install_saltstack_ubuntu_repository || return 1
fi
@ -2936,18 +2950,30 @@ __install_saltstack_debian_repository() {
DEBIAN_CODENAME="$DISTRO_CODENAME"
fi
__PACKAGES=''
# Install downloader backend for GPG keys fetching
if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then
__PACKAGES="${__PACKAGES} gnupg2 dirmngr"
else
__PACKAGES="${__PACKAGES} gnupg-curl"
fi
# Make sure https transport is available
if [ "$HTTP_VAL" = "https" ] ; then
__PACKAGES="${__PACKAGES} apt-transport-https ca-certificates"
fi
# shellcheck disable=SC2086,SC2090
__apt_get_install_noinput ${__PACKAGES} || return 1
# amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location
SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/apt/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}"
echo "deb $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/saltstack.list"
if [ "$HTTP_VAL" = "https" ] ; then
__apt_get_install_noinput apt-transport-https ca-certificates || return 1
fi
__apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1
apt-get update
}
install_debian_deps() {
@ -2970,23 +2996,17 @@ install_debian_deps() {
__apt_get_upgrade_noinput || return 1
fi
__check_dpkg_architecture || return 1
# Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813
__PACKAGES='procps pciutils'
# YAML module is used for generating custom master/minion configs
__PACKAGES="${__PACKAGES} python-yaml"
# Debian 9 needs the dirmgr package in order to import the GPG key later
if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then
__PACKAGES="${__PACKAGES} dirmngr"
fi
# shellcheck disable=SC2086
__apt_get_install_noinput ${__PACKAGES} || return 1
if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then
__check_dpkg_architecture || return 1
__install_saltstack_debian_repository || return 1
fi
@ -3318,7 +3338,13 @@ install_fedora_deps() {
__install_saltstack_copr_salt_repository || return 1
fi
__PACKAGES="yum-utils PyYAML libyaml python-crypto python-jinja2 python-zmq python2-msgpack python2-requests"
__PACKAGES="PyYAML libyaml python-crypto python-jinja2 python-zmq python2-msgpack python2-requests"
if [ "$DISTRO_MAJOR_VERSION" -lt 26 ]; then
__PACKAGES="${__PACKAGES} yum-utils"
else
__PACKAGES="${__PACKAGES} dnf-utils"
fi
# shellcheck disable=SC2086
dnf install -y ${__PACKAGES} || return 1
@ -4404,7 +4430,7 @@ install_amazon_linux_ami_deps() {
repo_rev="$(echo "${STABLE_REV}" | sed 's|.*\/||g')"
if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$' || \
( echo "$repo_rev" | egrep -q '^[0-9]+$' && [ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ] ); then
[ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ]; then
_USEAWS=$BS_TRUE
pkg_append="python27"
fi
@ -5075,7 +5101,7 @@ install_freebsd_restart_daemons() {
__choose_openbsd_mirror() {
OPENBSD_REPO=''
MINTIME=''
MIRROR_LIST=$(ftp -w 15 -Vao - 'http://ftp.openbsd.org/cgi-bin/ftplist.cgi?dbversion=1' | awk '/^http/ {print $1}')
MIRROR_LIST=$(ftp -w 15 -Vao - 'https://ftp.openbsd.org/cgi-bin/ftplist.cgi?dbversion=1' | awk '/^http/ {print $1}')
for MIRROR in $MIRROR_LIST; do
MIRROR_HOST=$(echo "$MIRROR" | sed -e 's|.*//||' -e 's|+*/.*$||')
@ -5096,10 +5122,12 @@ __choose_openbsd_mirror() {
}
install_openbsd_deps() {
__choose_openbsd_mirror || return 1
echoinfo "setting package repository to $OPENBSD_REPO with ping time of $MINTIME"
[ -n "$OPENBSD_REPO" ] || return 1
echo "${OPENBSD_REPO}" >>/etc/installurl || return 1
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
__choose_openbsd_mirror || return 1
echoinfo "setting package repository to $OPENBSD_REPO with ping time of $MINTIME"
[ -n "$OPENBSD_REPO" ] || return 1
echo "${OPENBSD_REPO}" >>/etc/installurl || return 1
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
@ -5383,9 +5411,30 @@ install_smartos_restart_daemons() {
#
__ZYPPER_REQUIRES_REPLACE_FILES=-1
__set_suse_pkg_repo() {
# Set distro repo variable
if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then
DISTRO_REPO="openSUSE_Tumbleweed"
elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ]; then
DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}"
elif [ "${DISTRO_MAJOR_VERSION}" -lt 42 ]; then
DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}"
fi
if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then
suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack"
suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo"
else
suse_pkg_url_base="${HTTP_VAL}://repo.saltstack.com/opensuse"
suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo"
fi
SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path"
}
__check_and_refresh_suse_pkg_repo() {
# Check to see if systemsmanagement_saltstack exists
__zypper repos | grep systemsmanagement_saltstack >/dev/null 2>&1
__zypper repos | grep -q systemsmanagement_saltstack
if [ $? -eq 1 ]; then
# zypper does not yet know anything about systemsmanagement_saltstack
@ -5393,16 +5442,6 @@ __check_and_refresh_suse_pkg_repo() {
fi
}
__set_suse_pkg_repo() {
suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo"
if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then
suse_pkg_url_base="http://download.opensuse.org/repositories/systemsmanagement:/saltstack"
else
suse_pkg_url_base="${HTTP_VAL}://repo.saltstack.com/opensuse"
fi
SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path"
}
__version_lte() {
if ! __check_command_exists python; then
zypper zypper --non-interactive install --replacefiles --auto-agree-with-licenses python || \
@ -5435,14 +5474,6 @@ __zypper_install() {
}
install_opensuse_stable_deps() {
if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then
DISTRO_REPO="openSUSE_Tumbleweed"
elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ]; then
DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}"
elif [ "${DISTRO_MAJOR_VERSION}" -lt 42 ]; then
DISTRO_REPO="openSUSE_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}"
fi
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
# Is the repository already known
__set_suse_pkg_repo
@ -5466,25 +5497,14 @@ install_opensuse_stable_deps() {
__zypper --gpg-auto-import-keys update || return 1
fi
# YAML module is used for generating custom master/minion configs
# requests is still used by many salt modules
# Salt needs python-zypp installed in order to use the zypper module
__PACKAGES="python-zypp"
__PACKAGES="${__PACKAGES} python python-Jinja2 python-M2Crypto python-PyYAML python-requests"
__PACKAGES="${__PACKAGES} python-msgpack-python python-pycrypto python-pyzmq python-xml"
if [ "$DISTRO_MAJOR_VERSION" -lt 13 ]; then
__PACKAGES="${__PACKAGES} libzmq3"
elif [ "$DISTRO_MAJOR_VERSION" -eq 13 ]; then
__PACKAGES="${__PACKAGES} libzmq3"
elif [ "$DISTRO_MAJOR_VERSION" -gt 13 ]; then
__PACKAGES="${__PACKAGES} libzmq5"
fi
__PACKAGES="python-PyYAML python-requests python-zypp"
# shellcheck disable=SC2086
__zypper_install ${__PACKAGES} || return 1
# Fix for OpenSUSE 13.2 and 2015.8 - gcc should not be required. Work around until package is fixed by SuSE
_EXTRA_PACKAGES="${_EXTRA_PACKAGES} gcc python-devel libgit2-devel"
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5509,7 +5529,7 @@ install_opensuse_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES=""
__PACKAGES="libzmq5 python-Jinja2 python-msgpack-python python-pycrypto python-pyzmq python-xml"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5678,18 +5698,6 @@ install_opensuse_check_services() {
#
install_suse_12_stable_deps() {
SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release )
if [ "${SUSE_PATCHLEVEL}" != "" ]; then
DISTRO_PATCHLEVEL="_SP${SUSE_PATCHLEVEL}"
fi
DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}${DISTRO_PATCHLEVEL}"
# SLES 12 repo name does not use a patch level so PATCHLEVEL will need to be updated with SP1
#DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}${DISTRO_PATCHLEVEL}"
DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}"
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
# Is the repository already known
__set_suse_pkg_repo
@ -5703,18 +5711,10 @@ install_suse_12_stable_deps() {
__zypper --gpg-auto-import-keys update || return 1
fi
# YAML module is used for generating custom master/minion configs
# requests is still used by many salt modules
# Salt needs python-zypp installed in order to use the zypper module
__PACKAGES="python-zypp"
# shellcheck disable=SC2089
__PACKAGES="${__PACKAGES} libzmq5 python python-Jinja2 python-msgpack-python"
__PACKAGES="${__PACKAGES} python-pycrypto python-pyzmq python-pip python-xml python-requests"
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
__check_pip_allowed
echowarn "PyYaml will be installed using pip"
else
__PACKAGES="${__PACKAGES} python-PyYAML"
fi
__PACKAGES="python-PyYAML python-requests python-zypp"
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
__PACKAGES="${__PACKAGES} python-apache-libcloud"
@ -5723,41 +5723,6 @@ install_suse_12_stable_deps() {
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
# There's no python-PyYaml in SP1, let's install it using pip
pip install PyYaml || return 1
fi
# PIP based installs need to copy configuration files "by hand".
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
# Let's trigger config_salt()
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
# Let's set the configuration directory to /tmp
_TEMP_CONFIG_DIR="/tmp"
CONFIG_SALT_FUNC="config_salt"
for fname in api master minion syndic; do
# Skip salt-api since there is no example config for it in the Salt git repo
[ $fname = "api" ] && continue
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# Syndic uses the same configuration file as the master
[ $fname = "syndic" ] && fname=master
# Let's download, since they were not provided, the default configuration files
if [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ ! -f "$_TEMP_CONFIG_DIR/$fname" ]; then
# shellcheck disable=SC2086
curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/$fname" -L \
"https://raw.githubusercontent.com/saltstack/salt/develop/conf/$fname" || return 1
fi
done
fi
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5777,6 +5742,9 @@ install_suse_12_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES=""
# shellcheck disable=SC2089
__PACKAGES="${__PACKAGES} libzmq3 python-Jinja2 python-msgpack-python python-pycrypto"
__PACKAGES="${__PACKAGES} python-pyzmq python-xml"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5803,14 +5771,7 @@ install_suse_12_git_deps() {
}
install_suse_12_stable() {
if [ "$SUSE_PATCHLEVEL" -gt 1 ]; then
install_opensuse_stable || return 1
else
# USE_SETUPTOOLS=1 To work around
# error: option --single-version-externally-managed not recognized
USE_SETUPTOOLS=1 pip install salt || return 1
fi
install_opensuse_stable || return 1
return 0
}
@ -5820,34 +5781,7 @@ install_suse_12_git() {
}
install_suse_12_stable_post() {
if [ "$SUSE_PATCHLEVEL" -gt 1 ]; then
install_opensuse_stable_post || return 1
else
for fname in api master minion syndic; do
# Skip if not meant to be installed
[ $fname = "api" ] && \
([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
if [ -f /bin/systemctl ]; then
# shellcheck disable=SC2086
curl $_CURL_ARGS -L "https://github.com/saltstack/salt/raw/develop/pkg/salt-$fname.service" \
-o "/usr/lib/systemd/system/salt-$fname.service" || return 1
fi
# Skip salt-api since the service should be opt-in and not necessarily started on boot
[ $fname = "api" ] && continue
if [ -f /bin/systemctl ]; then
systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service)
sleep 0.1
systemctl daemon-reload
fi
done
fi
install_opensuse_stable_post || return 1
return 0
}
@ -5872,16 +5806,6 @@ install_suse_12_restart_daemons() {
#
install_suse_11_stable_deps() {
SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release )
if [ "${SUSE_PATCHLEVEL}" != "" ]; then
if [ "${SUSE_PATCHLEVEL}" != "4" ]; then
echowarn "Salt packages for SLE 11 are only build for SP4."
echowarn "Attempting to install SP4 packages on SP${SUSE_PATCHLEVEL}."
fi
DISTRO_PATCHLEVEL="_SP4"
fi
DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}${DISTRO_PATCHLEVEL}"
if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then
# Is the repository already known
__set_suse_pkg_repo
@ -5895,57 +5819,12 @@ install_suse_11_stable_deps() {
__zypper --gpg-auto-import-keys update || return 1
fi
# Salt needs python-zypp installed in order to use the zypper module
__PACKAGES="python-zypp"
# shellcheck disable=SC2089
__PACKAGES="${__PACKAGES} libzmq5 python python-Jinja2 python-msgpack-python"
__PACKAGES="${__PACKAGES} python-pycrypto python-pyzmq python-pip python-xml python-requests"
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
__check_pip_allowed
echowarn "PyYaml will be installed using pip"
else
__PACKAGES="${__PACKAGES} python-PyYAML"
fi
# YAML module is used for generating custom master/minion configs
__PACKAGES="python-PyYAML"
# shellcheck disable=SC2086,SC2090
__zypper_install ${__PACKAGES} || return 1
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
# There's no python-PyYaml in SP1, let's install it using pip
pip install PyYaml || return 1
fi
# PIP based installs need to copy configuration files "by hand".
if [ "$SUSE_PATCHLEVEL" -eq 1 ]; then
# Let's trigger config_salt()
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
# Let's set the configuration directory to /tmp
_TEMP_CONFIG_DIR="/tmp"
CONFIG_SALT_FUNC="config_salt"
for fname in api master minion syndic; do
# Skip salt-api since there is no example config for it in the Salt git repo
[ $fname = "api" ] && continue
# Skip if not meant to be installed
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
# Syndic uses the same configuration file as the master
[ $fname = "syndic" ] && fname=master
# Let's download, since they were not provided, the default configuration files
if [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ ! -f "$_TEMP_CONFIG_DIR/$fname" ]; then
# shellcheck disable=SC2086
curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/$fname" -L \
"https://raw.githubusercontent.com/saltstack/salt/develop/conf/$fname" || return 1
fi
done
fi
fi
if [ "${_EXTRA_PACKAGES}" != "" ]; then
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
# shellcheck disable=SC2086
@ -5965,6 +5844,9 @@ install_suse_11_git_deps() {
__git_clone_and_checkout || return 1
__PACKAGES=""
# shellcheck disable=SC2089
__PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto"
__PACKAGES="${__PACKAGES} python-pyzmq python-xml python-zypp"
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
# We're on the develop branch, install whichever tornado is on the requirements file
@ -5991,13 +5873,7 @@ install_suse_11_git_deps() {
}
install_suse_11_stable() {
if [ "$SUSE_PATCHLEVEL" -gt 1 ]; then
install_opensuse_stable || return 1
else
# USE_SETUPTOOLS=1 To work around
# error: option --single-version-externally-managed not recognized
USE_SETUPTOOLS=1 pip install salt || return 1
fi
install_opensuse_stable || return 1
return 0
}
@ -6007,32 +5883,7 @@ install_suse_11_git() {
}
install_suse_11_stable_post() {
if [ "$SUSE_PATCHLEVEL" -gt 1 ]; then
install_opensuse_stable_post || return 1
else
for fname in api master minion syndic; do
# Skip if not meant to be installed
[ $fname = "api" ] && \
([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue
[ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue
[ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
if [ -f /bin/systemctl ]; then
# shellcheck disable=SC2086
curl $_CURL_ARGS -L "https://github.com/saltstack/salt/raw/develop/pkg/salt-$fname.service" \
-o "/lib/systemd/system/salt-$fname.service" || return 1
continue
fi
# shellcheck disable=SC2086
curl $_CURL_ARGS -L "https://github.com/saltstack/salt/raw/develop/pkg/rpm/salt-$fname" \
-o "/etc/init.d/salt-$fname" || return 1
chmod +x "/etc/init.d/salt-$fname"
done
fi
install_opensuse_stable_post || return 1
return 0
}
@ -6050,6 +5901,8 @@ install_suse_11_restart_daemons() {
#
# End of SUSE Enterprise 11
#
#######################################################################################################################
#######################################################################################################################
#
# SUSE Enterprise General Functions
@ -6078,7 +5931,7 @@ install_suse_check_services() {
}
#
# SUSE Enterprise General Functions
# End of SUSE Enterprise General Functions
#
#######################################################################################################################
@ -6698,7 +6551,7 @@ for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do
done
echodebug "CHECK_SERVICES_FUNC=${CHECK_SERVICES_FUNC}"
if [ "$DEPS_INSTALL_FUNC" = "null" ]; then
if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ "$DEPS_INSTALL_FUNC" = "null" ]; then
echoerror "No dependencies installation function found. Exiting..."
exit 1
fi
@ -6709,7 +6562,7 @@ if [ "$INSTALL_FUNC" = "null" ]; then
fi
# Install dependencies
if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then
if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_FALSE ]; then
# Only execute function is not in config mode only
echoinfo "Running ${DEPS_INSTALL_FUNC}()"
$DEPS_INSTALL_FUNC
@ -6725,7 +6578,7 @@ if [ "$_CUSTOM_MASTER_CONFIG" != "null" ] || [ "$_CUSTOM_MINION_CONFIG" != "null
_TEMP_CONFIG_DIR="$_SALT_ETC_DIR"
fi
if [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then
if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_TRUE ]; then
# Execute function to satisfy dependencies for configuration step
echoinfo "Running ${DEPS_INSTALL_FUNC}()"
$DEPS_INSTALL_FUNC

View file

@ -1005,7 +1005,7 @@ VALID_OPTS = {
'queue_dirs': list,
# Instructs the minion to ping its master(s) every n number of seconds. Used
# Instructs the minion to ping its master(s) every n number of minutes. Used
# primarily as a mitigation technique against minion disconnects.
'ping_interval': int,
@ -1180,6 +1180,9 @@ VALID_OPTS = {
# SSDP discovery pause between the attempts
'pause': int,
# Scheduler should be a dictionary
'schedule': dict,
}
# default configurations
@ -1471,6 +1474,7 @@ DEFAULT_MINION_OPTS = {
'match': 'any',
'mapping': {},
},
'schedule': {},
}
DEFAULT_MASTER_OPTS = {
@ -1793,6 +1797,7 @@ DEFAULT_MASTER_OPTS = {
'port': 4520,
'mapping': {},
},
'schedule': {},
}
@ -3754,10 +3759,6 @@ def apply_minion_config(overrides=None,
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
# if there is no schedule option yet, add an empty scheduler
if 'schedule' not in opts:
opts['schedule'] = {}
# Make sure hash_type is lowercase
opts['hash_type'] = opts['hash_type'].lower()

View file

@ -810,12 +810,14 @@ def _virtual(osdata):
pass
if os.path.isfile('/proc/1/cgroup'):
try:
with salt.utils.files.fopen('/proc/1/cgroup', 'r') as fhr:
if ':/lxc/' in fhr.read():
grains['virtual_subtype'] = 'LXC'
with salt.utils.files.fopen('/proc/1/cgroup', 'r') as fhr:
fhr_contents = fhr.read()
if ':/docker/' in fhr_contents or ':/system.slice/docker' in fhr_contents:
if ':/lxc/' in fhr_contents:
grains['virtual_subtype'] = 'LXC'
else:
if any(x in fhr_contents
for x in (':/system.slice/docker', ':/docker/',
':/docker-ce/')):
grains['virtual_subtype'] = 'Docker'
except IOError:
pass
@ -2550,7 +2552,7 @@ def _windows_wwns():
'''
Return Fibre Channel port WWNs from a Windows host.
'''
ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ps_cmd = r'Get-WmiObject -ErrorAction Stop -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
ret = []

View file

@ -533,14 +533,14 @@ def tar(options, tarfile, sources=None, dest=None,
raise SaltInvocationError('Tar options can not be empty')
cmd = ['tar']
if dest:
cmd.extend(['-C', '{0}'.format(dest)])
if options:
cmd.extend(options.split())
cmd.extend(['{0}'.format(tarfile)])
cmd.extend(_expand_sources(sources))
if dest:
cmd.extend(['-C', '{0}'.format(dest)])
return __salt__['cmd.run'](cmd,
cwd=cwd,
template=template,

View file

@ -2,7 +2,7 @@
'''
Connection module for Amazon S3 using boto3
.. versionadded:: develop
.. versionadded:: Oxygen
:configuration: This module accepts explicit AWS credentials but can also
utilize IAM roles assigned to the instance through Instance Profiles or

View file

@ -3241,6 +3241,13 @@ def powershell(cmd,
else:
encoded_cmd = False
# Put the whole command inside a try / catch block
# Some errors in PowerShell are not "Terminating Errors" and will not be
# caught in a try/catch block. For example, the `Get-WmiObject` command will
# often return a "Non Terminating Error". To fix this, make sure
# `-ErrorAction Stop` is set in the powershell command
cmd = 'try {' + cmd + '} catch { "{}" | ConvertTo-JSON}'
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
cwd=cwd,

View file

@ -3128,12 +3128,13 @@ def _getDataFromRegPolData(search_string, policy_data, return_value_name=False):
'''
value = None
values = []
encoded_semicolon = ';'.encode('utf-16-le')
if return_value_name:
values = {}
if search_string:
registry = Registry()
if len(search_string.split('{0};'.format(chr(0)))) >= 3:
vtype = registry.vtype_reverse[ord(search_string.split('{0};'.format(chr(0)))[2])]
if len(search_string.split(encoded_semicolon)) >= 3:
vtype = registry.vtype_reverse[ord(search_string.split(encoded_semicolon)[2].decode('utf-32-le'))]
else:
vtype = None
search_string = re.escape(search_string)
@ -3141,29 +3142,28 @@ def _getDataFromRegPolData(search_string, policy_data, return_value_name=False):
matches = [m for m in matches]
if matches:
for match in matches:
pol_entry = policy_data[match.start():(policy_data.index(']',
pol_entry = policy_data[match.start():(policy_data.index(']'.encode('utf-16-le'),
match.end())
)
].split('{0};'.format(chr(0)))
].split(encoded_semicolon)
if len(pol_entry) >= 2:
valueName = pol_entry[1]
if len(pol_entry) >= 5:
value = pol_entry[4]
if vtype == 'REG_DWORD' or vtype == 'REG_QWORD':
if value:
vlist = list(ord(v) for v in value)
if vtype == 'REG_DWORD':
for v in struct.unpack('I', struct.pack('2H', *vlist)):
for v in struct.unpack('I', value):
value = v
elif vtype == 'REG_QWORD':
for v in struct.unpack('I', struct.pack('4H', *vlist)):
for v in struct.unpack('Q', value):
value = v
else:
value = 0
elif vtype == 'REG_MULTI_SZ':
value = value.rstrip(chr(0)).split(chr(0))
value = value.decode('utf-16-le').rstrip(chr(0)).split(chr(0))
else:
value = value.rstrip(chr(0))
value = value.decode('utf-16-le').rstrip(chr(0))
if return_value_name:
log.debug('we want value names and the value')
values[valueName] = value
@ -3274,35 +3274,52 @@ def _buildKnownDataSearchString(reg_key, reg_valueName, reg_vtype, reg_data,
'''
registry = Registry()
this_element_value = None
expected_string = ''
expected_string = b''
encoded_semicolon = ';'.encode('utf-16-le')
encoded_null = chr(0).encode('utf-16-le')
if reg_key:
reg_key = reg_key.encode('utf-16-le')
if reg_valueName:
reg_valueName = reg_valueName.encode('utf-16-le')
if reg_data and not check_deleted:
if reg_vtype == 'REG_DWORD':
this_element_value = ''
for v in struct.unpack('2H', struct.pack('I', int(reg_data))):
this_element_value = this_element_value + six.unichr(v)
elif reg_vtype == 'REG_QWORD':
this_element_value = ''
for v in struct.unpack('4H', struct.pack('I', int(reg_data))):
this_element_value = this_element_value + six.unichr(v)
this_element_value = struct.pack('I', int(reg_data))
elif reg_vtype == "REG_QWORD":
this_element_value = struct.pack('Q', int(reg_data))
elif reg_vtype == 'REG_SZ':
this_element_value = '{0}{1}'.format(reg_data, chr(0))
this_element_value = b''.join([reg_data.encode('utf-16-le'),
encoded_null])
if check_deleted:
reg_vtype = 'REG_SZ'
expected_string = u'[{1}{0};**del.{2}{0};{3}{0};{4}{0};{5}{0}]'.format(
chr(0),
reg_key,
reg_valueName,
chr(registry.vtype[reg_vtype]),
six.unichr(len(' {0}'.format(chr(0)).encode('utf-16-le'))),
' ')
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
'**del.'.encode('utf-16-le'),
reg_valueName,
encoded_null,
encoded_semicolon,
chr(registry.vtype[reg_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len(' {0}'.format(chr(0)).encode('utf-16-le'))).encode('utf-32-le'),
encoded_semicolon,
' '.encode('utf-16-le'),
encoded_null,
']'.encode('utf-16-le')])
else:
expected_string = u'[{1}{0};{2}{0};{3}{0};{4}{0};{5}]'.format(
chr(0),
reg_key,
reg_valueName,
chr(registry.vtype[reg_vtype]),
six.unichr(len(this_element_value.encode('utf-16-le'))),
this_element_value)
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
reg_valueName,
encoded_null,
encoded_semicolon,
chr(registry.vtype[reg_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len(this_element_value)).encode('utf-32-le'),
encoded_semicolon,
this_element_value,
']'.encode('utf-16-le')])
return expected_string
@ -3330,13 +3347,16 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
expected_string = None
# https://msdn.microsoft.com/en-us/library/dn606006(v=vs.85).aspx
this_vtype = 'REG_SZ'
standard_layout = u'[{1}{0};{2}{0};{3}{0};{4}{0};{5}]'
encoded_semicolon = ';'.encode('utf-16-le')
encoded_null = chr(0).encode('utf-16-le')
if reg_key:
reg_key = reg_key.encode('utf-16-le')
if reg_valuename:
reg_valuename = reg_valuename.encode('utf-16-le')
if etree.QName(element).localname == 'decimal' and etree.QName(parent_element).localname != 'elements':
this_vtype = 'REG_DWORD'
if 'value' in element.attrib:
this_element_value = ''
for val in struct.unpack('2H', struct.pack('I', int(element.attrib['value']))):
this_element_value = this_element_value + six.unichr(val)
this_element_value = struct.pack('I', int(element.attrib['value']))
else:
msg = ('The {2} child {1} element for the policy with attributes: '
'{0} does not have the required "value" attribute. The '
@ -3351,9 +3371,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
# server, so untested/assumed
this_vtype = 'REG_QWORD'
if 'value' in element.attrib:
this_element_value = ''
for val in struct.unpack('4H', struct.pack('I', int(element.attrib['value']))):
this_element_value = this_element_value + six.unichr(val)
this_element_value = struct.pack('Q', int(element.attrib['value']))
else:
msg = ('The {2} child {1} element for the policy with attributes: '
'{0} does not have the required "value" attribute. The '
@ -3365,7 +3383,8 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
return None
elif etree.QName(element).localname == 'string':
this_vtype = 'REG_SZ'
this_element_value = '{0}{1}'.format(element.text, chr(0))
this_element_value = b''.join([element.text.encode('utf-16-le'),
encoded_null])
elif etree.QName(parent_element).localname == 'elements':
standard_element_expected_string = True
if etree.QName(element).localname == 'boolean':
@ -3376,22 +3395,19 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
check_deleted = True
if not check_deleted:
this_vtype = 'REG_DWORD'
this_element_value = chr(1)
this_element_value = chr(1).encode('utf-16-le')
standard_element_expected_string = False
elif etree.QName(element).localname == 'decimal':
# https://msdn.microsoft.com/en-us/library/dn605987(v=vs.85).aspx
this_vtype = 'REG_DWORD'
requested_val = this_element_value
if this_element_value is not None:
temp_val = ''
for v in struct.unpack('2H', struct.pack('I', int(this_element_value))):
temp_val = temp_val + six.unichr(v)
this_element_value = temp_val
this_element_value = struct.pack('I', int(this_element_value))
if 'storeAsText' in element.attrib:
if element.attrib['storeAsText'].lower() == 'true':
this_vtype = 'REG_SZ'
if requested_val is not None:
this_element_value = str(requested_val)
this_element_value = str(requested_val).encode('utf-16-le')
if check_deleted:
this_vtype = 'REG_SZ'
elif etree.QName(element).localname == 'longDecimal':
@ -3399,15 +3415,12 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
this_vtype = 'REG_QWORD'
requested_val = this_element_value
if this_element_value is not None:
temp_val = ''
for v in struct.unpack('4H', struct.pack('I', int(this_element_value))):
temp_val = temp_val + six.unichr(v)
this_element_value = temp_val
this_element_value = struct.pack('Q', int(this_element_value))
if 'storeAsText' in element.attrib:
if element.attrib['storeAsText'].lower() == 'true':
this_vtype = 'REG_SZ'
if requested_val is not None:
this_element_value = str(requested_val)
this_element_value = str(requested_val).encode('utf-16-le')
elif etree.QName(element).localname == 'text':
# https://msdn.microsoft.com/en-us/library/dn605969(v=vs.85).aspx
this_vtype = 'REG_SZ'
@ -3415,14 +3428,15 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
if element.attrib['expandable'].lower() == 'true':
this_vtype = 'REG_EXPAND_SZ'
if this_element_value is not None:
this_element_value = '{0}{1}'.format(this_element_value, chr(0))
this_element_value = b''.join([this_element_value.encode('utf-16-le'),
encoded_null])
elif etree.QName(element).localname == 'multiText':
this_vtype = 'REG_MULTI_SZ'
if this_element_value is not None:
this_element_value = '{0}{1}{1}'.format(chr(0).join(this_element_value), chr(0))
elif etree.QName(element).localname == 'list':
standard_element_expected_string = False
del_keys = ''
del_keys = b''
element_valuenames = []
element_values = this_element_value
if this_element_value is not None:
@ -3431,12 +3445,20 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
if element.attrib['additive'].lower() == 'false':
# a delete values will be added before all the other
# value = data pairs
del_keys = u'[{1}{0};**delvals.{0};{2}{0};{3}{0};{4}{0}]'.format(
chr(0),
reg_key,
chr(registry.vtype[this_vtype]),
chr(len(' {0}'.format(chr(0)).encode('utf-16-le'))),
' ')
del_keys = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
'**delvals.'.encode('utf-16-le'),
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
chr(len(' {0}'.format(chr(0)).encode('utf-16-le'))).encode('utf-32-le'),
encoded_semicolon,
' '.encode('utf-16-le'),
encoded_null,
']'.encode('utf-16-le')])
if 'expandable' in element.attrib:
this_vtype = 'REG_EXPAND_SZ'
if 'explicitValue' in element.attrib and element.attrib['explicitValue'].lower() == 'true':
@ -3455,61 +3477,103 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
log.debug('element_valuenames == {0} and element_values == {1}'.format(element_valuenames,
element_values))
for i, item in enumerate(element_valuenames):
expected_string = expected_string + standard_layout.format(
chr(0),
reg_key,
element_valuenames[i],
chr(registry.vtype[this_vtype]),
six.unichr(len('{0}{1}'.format(element_values[i],
chr(0)).encode('utf-16-le'))),
'{0}{1}'.format(element_values[i], chr(0)))
expected_string = expected_string + b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
element_valuenames[i].encode('utf-16-le'),
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len('{0}{1}'.format(element_values[i],
chr(0)).encode('utf-16-le'))).encode('utf-32-le'),
encoded_semicolon,
b''.join([element_values[i].encode('utf-16-le'),
encoded_null]),
']'.encode('utf-16-le')])
else:
expected_string = del_keys + r'[{1}{0};'.format(chr(0),
reg_key)
expected_string = del_keys + b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon])
else:
expected_string = u'[{1}{0};**delvals.{0};{2}{0};{3}{0};{4}{0}]'.format(
chr(0),
reg_key,
chr(registry.vtype[this_vtype]),
chr(len(' {0}'.format(chr(0)).encode('utf-16-le'))),
' ')
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
'**delvals.'.encode('utf-16-le'),
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
chr(len(' {0}'.format(chr(0)))).encode('utf-32-le'),
encoded_semicolon,
' '.encode('utf-16-le'),
encoded_null,
']'.encode('utf-16-le')])
elif etree.QName(element).localname == 'enum':
if this_element_value is not None:
pass
if standard_element_expected_string and not check_deleted:
if this_element_value is not None:
expected_string = standard_layout.format(
chr(0),
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]),
six.unichr(len(this_element_value.encode('utf-16-le'))),
this_element_value)
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
reg_valuename,
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len(this_element_value)).encode('utf-32-le'),
encoded_semicolon,
this_element_value,
']'.encode('utf-16-le')])
else:
expected_string = u'[{1}{0};{2}{0};{3}{0};'.format(chr(0),
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]))
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
reg_valuename,
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon])
if not expected_string:
if etree.QName(element).localname == "delete" or check_deleted:
# delete value
expected_string = u'[{1}{0};**del.{2}{0};{3}{0};{4}{0};{5}{0}]'.format(
chr(0),
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]),
six.unichr(len(' {0}'.format(chr(0)).encode('utf-16-le'))),
' ')
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
'**del.'.encode('utf-16-le'),
reg_valuename,
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len(' {0}'.format(chr(0)).encode('utf-16-le'))).encode('utf-32-le'),
encoded_semicolon,
' '.encode('utf-16-le'),
encoded_null,
']'.encode('utf-16-le')])
else:
expected_string = standard_layout.format(
chr(0),
reg_key,
reg_valuename,
chr(registry.vtype[this_vtype]),
six.unichr(len(this_element_value.encode('utf-16-le', '' if six.PY2 else 'surrogatepass'))),
this_element_value)
expected_string = b''.join(['['.encode('utf-16-le'),
reg_key,
encoded_null,
encoded_semicolon,
reg_valuename,
encoded_null,
encoded_semicolon,
chr(registry.vtype[this_vtype]).encode('utf-32-le'),
encoded_semicolon,
six.unichr(len(this_element_value)).encode('utf-32-le'),
encoded_semicolon,
this_element_value,
']'.encode('utf-16-le')])
return expected_string
@ -3534,17 +3598,16 @@ def _checkAllAdmxPolicies(policy_class,
full_names = {}
if policy_filedata:
log.debug('POLICY CLASS {0} has file data'.format(policy_class))
policy_filedata_split = re.sub(r'\]$',
'',
re.sub(r'^\[',
'',
policy_filedata.replace(module_policy_data.reg_pol_header, ''))
).split('][')
policy_filedata_split = re.sub(salt.utils.to_bytes(r'\]{0}$'.format(chr(0))),
b'',
re.sub(salt.utils.to_bytes(r'^\[{0}'.format(chr(0))),
b'',
re.sub(re.escape(module_policy_data.reg_pol_header.encode('utf-16-le')), b'', policy_filedata))
).split(']['.encode('utf-16-le'))
for policy_item in policy_filedata_split:
policy_item_key = policy_item.split('{0};'.format(chr(0)))[0]
policy_item_key = policy_item.split('{0};'.format(chr(0)).encode('utf-16-le'))[0].decode('utf-16-le').lower()
if policy_item_key:
for admx_item in REGKEY_XPATH(admx_policy_definitions, keyvalue=policy_item_key.lower()):
for admx_item in REGKEY_XPATH(admx_policy_definitions, keyvalue=policy_item_key):
if etree.QName(admx_item).localname == 'policy':
if admx_item not in admx_policies:
admx_policies.append(admx_item)
@ -3607,8 +3670,11 @@ def _checkAllAdmxPolicies(policy_class,
break
this_policynamespace = admx_policy.nsmap[admx_policy.prefix]
if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
# some policies have a disabled list but not an enabled list
# added this to address those issues
if DISABLED_LIST_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
this_policyname,
this_key,
@ -3621,8 +3687,11 @@ def _checkAllAdmxPolicies(policy_class,
policy_vals[this_policynamespace] = {}
policy_vals[this_policynamespace][this_policyname] = this_policy_setting
if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured':
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
# some policies have a disabled list but not an enabled list
# added this to address those issues
if ENABLED_LIST_XPATH(admx_policy):
element_only_enabled_disabled = False
explicit_enable_disable_value_setting = True
if _checkValueItemParent(admx_policy,
this_policyname,
this_key,
@ -3847,7 +3916,7 @@ def _checkAllAdmxPolicies(policy_class,
admx_policy,
elements_item,
check_deleted=False)
) + r'(?!\*\*delvals\.)',
) + salt.utils.to_bytes(r'(?!\*\*delvals\.)'),
policy_filedata):
configured_value = _getDataFromRegPolData(_processValueItem(child_item,
child_key,
@ -4040,7 +4109,6 @@ def _read_regpol_file(reg_pol_path):
if os.path.exists(reg_pol_path):
with salt.utils.files.fopen(reg_pol_path, 'rb') as pol_file:
returndata = pol_file.read()
returndata = returndata.decode('utf-16-le')
return returndata
@ -4050,12 +4118,13 @@ def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey):
for a policy_regpath and policy_regkey combo
'''
if policy_data:
specialValueRegex = r'(\*\*Del\.|\*\*DelVals\.){0,1}'
_thisSearch = r'\[{1}{0};{3}{2}{0};'.format(
chr(0),
re.escape(policy_regpath),
re.escape(policy_regkey),
specialValueRegex)
specialValueRegex = salt.utils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}')
_thisSearch = b''.join([salt.utils.to_bytes(r'\['),
re.escape(policy_regpath),
b'\00;',
specialValueRegex,
re.escape(policy_regkey),
b'\00;'])
match = re.search(_thisSearch, policy_data, re.IGNORECASE)
if match:
return policy_data[match.start():(policy_data.index(']', match.end())) + 1]
@ -4086,9 +4155,9 @@ def _write_regpol_data(data_to_write,
if not os.path.exists(policy_file_path):
ret = __salt__['file.makedirs'](policy_file_path)
with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file:
if not data_to_write.startswith(reg_pol_header):
if not data_to_write.startswith(reg_pol_header.encode('utf-16-le')):
pol_file.write(reg_pol_header.encode('utf-16-le'))
pol_file.write(data_to_write.encode('utf-16-le'))
pol_file.write(data_to_write)
try:
gpt_ini_data = ''
if os.path.exists(gpt_ini_path):
@ -4164,13 +4233,14 @@ def _policyFileReplaceOrAppendList(string_list, policy_data):
update existing strings or append the strings
'''
if not policy_data:
policy_data = ''
policy_data = b''
# we are going to clean off the special pre-fixes, so we get only the valuename
specialValueRegex = r'(\*\*Del\.|\*\*DelVals\.){0,1}'
specialValueRegex = salt.utils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}')
for this_string in string_list:
list_item_key = this_string.split('{0};'.format(chr(0)))[0].lstrip('[')
list_item_key = this_string.split(b'\00;')[0].lstrip(b'[')
list_item_value_name = re.sub(specialValueRegex,
'', this_string.split('{0};'.format(chr(0)))[1],
b'',
this_string.split(b'\00;')[1],
flags=re.IGNORECASE)
log.debug('item value name is {0}'.format(list_item_value_name))
data_to_replace = _regexSearchKeyValueCombo(policy_data,
@ -4181,7 +4251,7 @@ def _policyFileReplaceOrAppendList(string_list, policy_data):
policy_data = policy_data.replace(data_to_replace, this_string)
else:
log.debug('appending {0}'.format([this_string]))
policy_data = ''.join([policy_data, this_string])
policy_data = b''.join([policy_data, this_string])
return policy_data
@ -4192,16 +4262,16 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False):
'''
# we are going to clean off the special pre-fixes, so we get only the valuename
if not policy_data:
policy_data = ''
specialValueRegex = r'(\*\*Del\.|\*\*DelVals\.){0,1}'
policy_data = b''
specialValueRegex = salt.utils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}')
item_key = None
item_value_name = None
data_to_replace = None
if not append_only:
item_key = this_string.split('{0};'.format(chr(0)))[0].lstrip('[')
item_key = this_string.split(b'\00;')[0].lstrip(b'[')
item_value_name = re.sub(specialValueRegex,
'',
this_string.split('{0};'.format(chr(0)))[1],
b'',
this_string.split(b'\00;')[1],
flags=re.IGNORECASE)
log.debug('item value name is {0}'.format(item_value_name))
data_to_replace = _regexSearchKeyValueCombo(policy_data, item_key, item_value_name)
@ -4210,7 +4280,7 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False):
policy_data = policy_data.replace(data_to_replace, this_string)
else:
log.debug('appending {0}'.format([this_string]))
policy_data = ''.join([policy_data, this_string])
policy_data = b''.join([policy_data, this_string])
return policy_data
@ -4228,9 +4298,10 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
REGISTRY_FILE_VERSION (u'\x01\00')
https://msdn.microsoft.com/en-us/library/aa374407(VS.85).aspx
[Registry Path<NULL>;Reg Value<NULL>;Reg Type<NULL>;SizeInBytes<NULL>;Data<NULL>]
+ https://msdn.microsoft.com/en-us/library/cc232696.aspx
[Registry Path<NULL>;Reg Value<NULL>;Reg Type;SizeInBytes;Data<NULL>]
'''
existing_data = ''
existing_data = b''
base_policy_settings = {}
policy_data = _policy_info()
policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]'
@ -4856,7 +4927,7 @@ def get_policy_info(policy_name,
policy_class,
', '.join(policy_data.policies.keys()))
return ret
if policy_name in policy_data.policies[policy_class]:
if policy_name in policy_data.policies[policy_class]['policies']:
ret['policy_aliases'].append(policy_data.policies[policy_class]['policies'][policy_name]['Policy'])
ret['policy_found'] = True
ret['message'] = ''

View file

@ -280,7 +280,7 @@ def list_available(*names, **kwargs):
refresh = salt.utils.data.is_true(kwargs.get('refresh', False))
_refresh_db_conditional(saltenv, force=refresh)
return_dict_always = \
salt.utils.is_true(kwargs.get('return_dict_always', False))
salt.utils.data.is_true(kwargs.get('return_dict_always', False))
if len(names) == 1 and not return_dict_always:
pkginfo = _get_package_info(names[0], saltenv=saltenv)
if not pkginfo:

View file

@ -618,6 +618,7 @@ import salt.utils.event
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
@ -950,18 +951,6 @@ def urlencoded_processor(entity):
:param entity: raw POST data
'''
if six.PY3:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
entity.fp.read(fp_out=contents)
contents.seek(0)
body_str = contents.read()
body_bytes = salt.utils.stringutils.to_bytes(body_str)
body_bytes = six.BytesIO(body_bytes)
body_bytes.seek(0)
# Patch fp
entity.fp = body_bytes
del contents
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
@ -980,10 +969,10 @@ def json_processor(entity):
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
@ -1004,10 +993,10 @@ def yaml_processor(entity):
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
@ -1030,10 +1019,10 @@ def text_processor(entity):
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:

View file

@ -168,7 +168,7 @@ More complete example for MySQL (to also show configuration)
as_list: True
with_lists: [1,3]
'''
from __future__ import absolute_import
from __future__ import absolute_import, unicode_literals
# Please don't strip redundant parentheses from this file.
# I have added some for clarity.
@ -275,7 +275,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)):
# May set 'as_list' from qb[1][2].
else:
defaults.update(qb[1])
if defaults['with_lists']:
if defaults['with_lists'] and isinstance(defaults['with_lists'], six.string_types):
defaults['with_lists'] = [
int(i) for i in defaults['with_lists'].split(',')
]
@ -437,8 +437,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)):
cursor.execute(details['query'], (minion_id,))
# Extract the field names the db has returned and process them
self.process_fields([row[0] for row in cursor.description],
details['depth'])
self.process_fields([row[0] for row in cursor.description], details['depth'])
self.enter_root(root)
self.as_list = details['as_list']
if details['with_lists']:

View file

@ -310,20 +310,28 @@ def clear_git_lock(role, remote=None, **kwargs):
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
type : update,checkout
The types of lock to clear. Can be ``update``, ``checkout``, or both of
et (either comma-separated or as a Python list).
type : update,checkout,mountpoint
The types of lock to clear. Can be one or more of ``update``,
``checkout``, and ``mountpoint``, and can be passed either as a
comma-separated or Python list.
.. versionadded:: 2015.8.8
.. versionchanged:: Oxygen
``mountpoint`` lock type added
CLI Example:
CLI Examples:
.. code-block:: bash
salt-run cache.clear_git_lock gitfs
salt-run cache.clear_git_lock git_pillar
salt-run cache.clear_git_lock git_pillar type=update
salt-run cache.clear_git_lock git_pillar type=update,checkout
salt-run cache.clear_git_lock git_pillar type='["update", "mountpoint"]'
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
type_ = salt.utils.args.split_input(kwargs.pop('type', ['update', 'checkout']))
type_ = salt.utils.args.split_input(
kwargs.pop('type', ['update', 'checkout', 'mountpoint']))
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)

View file

@ -3,7 +3,7 @@
Manage S3 Resources
=================
.. versionadded:: 2016.3.0
.. versionadded:: Oxygen
Manage S3 resources. Be aware that this interacts with Amazon's services,
and so may incur charges.
@ -59,7 +59,7 @@ import yaml
# Import Salt libs
import salt.ext.six as six
import salt.utils
import salt.utils.hashutils
log = logging.getLogger(__name__)
@ -189,7 +189,7 @@ def object_present(
if not hash_type:
hash_type = __opts__['hash_type']
try:
digest = salt.utils.get_hash(source, form=hash_type)
digest = salt.utils.hashutils.get_hash(source, form=hash_type)
except IOError as e:
ret['result'] = False
ret['comment'] = "Could not read local file {0}: {1}".format(

View file

@ -369,7 +369,7 @@ def upgraded(name,
if version:
# If installed version and new version are the same
if salt.utils.compare_versions(
if salt.utils.versions.compare(
ver1=installed_version,
oper="==",
ver2=version):
@ -383,7 +383,7 @@ def upgraded(name,
''.format(name, installed_version)
else:
# If installed version is older than new version
if salt.utils.compare_versions(
if salt.utils.versions.compare(
ver1=installed_version, oper="<", ver2=version):
ret['pchanges'] = {
name: 'Version {0} will be upgraded to Version {1} '
@ -429,6 +429,6 @@ def upgraded(name,
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__['chocolatey.list'](local_only=True)
ret['changes'] = salt.utils.compare_dicts(pre_install, post_install)
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret

View file

@ -132,8 +132,18 @@ def wait_for_successful_query(name, wait_for=300, **kwargs):
Like query but, repeat and wait until match/match_type or status is fulfilled. State returns result from last
query state in case of success or if no successful query was made within wait_for timeout.
name
The name of the query.
wait_for
Total time to wait for requests that succeed.
request_interval
Optional interval to delay requests by N seconds to reduce the number of requests sent.
.. note::
All other arguements are passed to the http.query state.
'''
starttime = time.time()
@ -141,7 +151,7 @@ def wait_for_successful_query(name, wait_for=300, **kwargs):
caught_exception = None
ret = None
try:
ret = query(name, wait_for=wait_for, **kwargs)
ret = query(name, **kwargs)
if ret['result']:
return ret
except Exception as exc:

View file

@ -89,10 +89,10 @@ def zone_present(domain, type, profile):
type = 'master'
matching_zone = [z for z in zones if z['domain'] == domain]
if len(matching_zone) > 0:
return state_result(True, "Zone already exists", domain)
return state_result(True, 'Zone already exists', domain)
else:
result = __salt__['libcloud_dns.create_zone'](domain, profile, type)
return state_result(True, "Created new zone", domain, result)
return state_result(True, 'Created new zone', domain, result)
def zone_absent(domain, profile):
@ -108,10 +108,10 @@ def zone_absent(domain, profile):
zones = __salt__['libcloud_dns.list_zones'](profile)
matching_zone = [z for z in zones if z['domain'] == domain]
if len(matching_zone) == 0:
return state_result(True, "Zone already absent", domain)
return state_result(True, 'Zone already absent', domain)
else:
result = __salt__['libcloud_dns.delete_zone'](matching_zone[0]['id'], profile)
return state_result(result, "Deleted zone", domain)
return state_result(result, 'Deleted zone', domain)
def record_present(name, zone, type, data, profile):
@ -140,7 +140,7 @@ def record_present(name, zone, type, data, profile):
try:
matching_zone = [z for z in zones if z['domain'] == zone][0]
except IndexError:
return state_result(False, "Could not locate zone", name)
return state_result(False, 'Could not locate zone', name)
records = __salt__['libcloud_dns.list_records'](matching_zone['id'], profile)
matching_records = [record for record in records
if record['name'] == name and
@ -150,9 +150,9 @@ def record_present(name, zone, type, data, profile):
result = __salt__['libcloud_dns.create_record'](
name, matching_zone['id'],
type, data, profile)
return state_result(True, "Created new record", name, result)
return state_result(True, 'Created new record', name, result)
else:
return state_result(True, "Record already exists", name)
return state_result(True, 'Record already exists', name)
def record_absent(name, zone, type, data, profile):
@ -181,7 +181,7 @@ def record_absent(name, zone, type, data, profile):
try:
matching_zone = [z for z in zones if z['domain'] == zone][0]
except IndexError:
return state_result(False, "Zone could not be found", name)
return state_result(False, 'Zone could not be found', name)
records = __salt__['libcloud_dns.list_records'](matching_zone['id'], profile)
matching_records = [record for record in records
if record['name'] == name and
@ -194,6 +194,6 @@ def record_absent(name, zone, type, data, profile):
matching_zone['id'],
record['id'],
profile))
return state_result(all(result), "Removed {0} records".format(len(result)), name)
return state_result(all(result), 'Removed {0} records'.format(len(result)), name)
else:
return state_result(True, "Records already absent", name)
return state_result(True, 'Records already absent', name)

View file

@ -68,7 +68,7 @@ def _changes(name,
workphone='',
homephone='',
loginclass=None,
date=0,
date=None,
mindays=0,
maxdays=999999,
inactdays=0,
@ -135,7 +135,7 @@ def _changes(name,
change['passwd'] = password
if empty_password and lshad['passwd'] != '':
change['empty_password'] = True
if date and date is not 0 and lshad['lstchg'] != date:
if date is not None and lshad['lstchg'] != date:
change['date'] = date
if mindays and mindays is not 0 and lshad['min'] != mindays:
change['mindays'] = mindays
@ -687,7 +687,7 @@ def present(name,
'empty password'.format(name)
ret['result'] = False
ret['changes']['password'] = ''
if date:
if date is not None:
__salt__['shadow.set_date'](name, date)
spost = __salt__['shadow.info'](name)
if spost['lstchg'] != date:

View file

@ -15,6 +15,9 @@ import random
import shutil
from salt.ext import six
# Import salt libs
import salt.utils.win_dacl
CAN_RENAME_OPEN_FILE = False
if os.name == 'nt': # pragma: no cover
@ -120,8 +123,12 @@ class _AtomicWFile(object):
self._fh.close()
if os.path.isfile(self._filename):
shutil.copymode(self._filename, self._tmp_filename)
st = os.stat(self._filename)
os.chown(self._tmp_filename, st.st_uid, st.st_gid)
if salt.utils.win_dacl.HAS_WIN32:
owner = salt.utils.win_dacl.get_owner(self._filename)
salt.utils.win_dacl.set_owner(self._tmp_filename, owner)
else:
st = os.stat(self._filename)
os.chown(self._tmp_filename, st.st_uid, st.st_gid)
atomic_rename(self._tmp_filename, self._filename)
def __exit__(self, exc_type, exc_value, traceback):

View file

@ -407,11 +407,6 @@ class GitProvider(object):
self.linkdir = salt.utils.path.join(cache_root,
'links',
self.cachedir_basename)
try:
# Remove linkdir if it exists
salt.utils.files.rm_rf(self.linkdir)
except OSError:
pass
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
@ -835,17 +830,55 @@ class GitProvider(object):
return success, failed
@contextlib.contextmanager
def gen_lock(self, lock_type='update'):
def gen_lock(self, lock_type='update', timeout=0, poll_interval=0.5):
'''
Set and automatically clear a lock
'''
if not isinstance(lock_type, six.string_types):
raise GitLockError(
errno.EINVAL,
'Invalid lock_type \'{0}\''.format(lock_type)
)
# Make sure that we have a positive integer timeout, otherwise just set
# it to zero.
try:
timeout = int(timeout)
except ValueError:
timeout = 0
else:
if timeout < 0:
timeout = 0
if not isinstance(poll_interval, (six.integer_types, float)) \
or poll_interval < 0:
poll_interval = 0.5
if poll_interval > timeout:
poll_interval = timeout
lock_set = False
try:
self._lock(lock_type=lock_type, failhard=True)
lock_set = True
yield
except (OSError, IOError, GitLockError) as exc:
raise GitLockError(exc.errno, exc.strerror)
time_start = time.time()
while True:
try:
self._lock(lock_type=lock_type, failhard=True)
lock_set = True
yield
# Break out of his loop once we've yielded the lock, to
# avoid continued attempts to iterate and establish lock
break
except (OSError, IOError, GitLockError) as exc:
if not timeout or time.time() - time_start > timeout:
raise GitLockError(exc.errno, exc.strerror)
else:
log.debug(
'A %s lock is already present for %s remote '
'\'%s\', sleeping %f second(s)',
lock_type, self.role, self.id, poll_interval
)
time.sleep(poll_interval)
continue
finally:
if lock_set:
self.clear_lock(lock_type=lock_type)
@ -961,6 +994,42 @@ class GitProvider(object):
else:
self.url = self.id
@property
def linkdir_walk(self):
'''
Return the expected result of an os.walk on the linkdir, based on the
mountpoint value.
'''
try:
# Use cached linkdir_walk if we've already run this
return self._linkdir_walk
except AttributeError:
self._linkdir_walk = []
try:
parts = self._mountpoint.split('/')
except AttributeError:
log.error(
'%s class is missing a \'_mountpoint\' attribute',
self.__class__.__name__
)
else:
for idx, item in enumerate(parts[:-1]):
try:
dirs = [parts[idx + 1]]
except IndexError:
dirs = []
self._linkdir_walk.append((
salt.utils.path_join(self.linkdir, *parts[:idx + 1]),
dirs,
[]
))
try:
# The linkdir itself goes at the beginning
self._linkdir_walk.insert(0, (self.linkdir, [parts[0]], []))
except IndexError:
pass
return self._linkdir_walk
def setup_callbacks(self):
'''
Only needed in pygit2, included in the base class for simplicty of use
@ -2857,69 +2926,123 @@ class GitPillar(GitBase):
base_branch = self.opts['{0}_base'.format(self.role)]
env = 'base' if repo.branch == base_branch else repo.branch
if repo._mountpoint:
if self.link_mountpoint(repo, cachedir):
if self.link_mountpoint(repo):
self.pillar_dirs[repo.linkdir] = env
self.pillar_linked_dirs.append(repo.linkdir)
else:
self.pillar_dirs[cachedir] = env
def link_mountpoint(self, repo, cachedir):
def link_mountpoint(self, repo):
'''
Ensure that the mountpoint is linked to the passed cachedir
Ensure that the mountpoint is present in the correct location and
points at the correct path
'''
lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint)
if not os.path.islink(lcachelink):
ldirname = os.path.dirname(lcachelink)
try:
os.symlink(cachedir, lcachelink)
except OSError as exc:
if exc.errno == errno.ENOENT:
# The parent dir does not exist, create it and then
# re-attempt to create the symlink
lcachelink = salt.utils.path_join(repo.linkdir, repo._mountpoint)
wipe_linkdir = False
create_link = False
try:
with repo.gen_lock(lock_type='mountpoint', timeout=10):
walk_results = list(os.walk(repo.linkdir, followlinks=False))
if walk_results != repo.linkdir_walk:
log.debug(
'Results of walking %s differ from expected results',
repo.linkdir
)
log.debug('Walk results: %s', walk_results)
log.debug('Expected results: %s', repo.linkdir_walk)
wipe_linkdir = True
else:
if not all(not salt.utils.path.islink(x[0])
and os.path.isdir(x[0])
for x in walk_results[:-1]):
log.debug(
'Linkdir parents of %s are not all directories',
lcachelink
)
wipe_linkdir = True
elif not salt.utils.path.islink(lcachelink):
wipe_linkdir = True
else:
try:
ldest = salt.utils.path.readlink(lcachelink)
except Exception:
log.debug(
'Failed to read destination of %s', lcachelink
)
wipe_linkdir = True
else:
if ldest != repo.cachedir:
log.debug(
'Destination of %s (%s) does not match '
'the expected value (%s)',
lcachelink, ldest, repo.cachedir
)
# Since we know that the parent dirs of the
# link are set up properly, all we need to do
# is remove the symlink and let it be created
# below.
try:
if salt.utils.is_windows() \
and not ldest.startswith('\\\\') \
and os.path.isdir(ldest):
# On Windows, symlinks to directories
# must be removed as if they were
# themselves directories.
shutil.rmtree(lcachelink)
else:
os.remove(lcachelink)
except Exception as exc:
log.exception(
'Failed to remove existing git_pillar '
'mountpoint link %s: %s',
lcachelink, exc.__str__()
)
wipe_linkdir = False
create_link = True
if wipe_linkdir:
# Wiping implies that we need to create the link
create_link = True
try:
shutil.rmtree(repo.linkdir)
except OSError:
pass
try:
ldirname = os.path.dirname(lcachelink)
os.makedirs(ldirname)
log.debug('Successfully made linkdir parent %s', ldirname)
except OSError as exc:
log.error(
'Failed to create path %s: %s',
'Failed to os.makedirs() linkdir parent %s: %s',
ldirname, exc.__str__()
)
return False
else:
try:
os.symlink(cachedir, lcachelink)
except OSError:
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
elif exc.errno == errno.EEXIST:
# A file or dir already exists at this path, remove it and
# then re-attempt to create the symlink
if create_link:
try:
salt.utils.files.rm_rf(lcachelink)
os.symlink(repo.cachedir, lcachelink)
log.debug(
'Successfully linked %s to cachedir %s',
lcachelink, repo.cachedir
)
return True
except OSError as exc:
log.error(
'Failed to remove file/dir at path %s: %s',
lcachelink, exc.__str__()
'Failed to create symlink to %s at path %s: %s',
repo.cachedir, lcachelink, exc.__str__()
)
return False
else:
try:
os.symlink(cachedir, lcachelink)
except OSError:
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
else:
# Other kind of error encountered
log.error(
'Could not create symlink to %s at path %s: %s',
cachedir, lcachelink, exc.__str__()
)
return False
except GitLockError:
log.error(
'Timed out setting mountpoint lock for %s remote \'%s\'. If '
'this error persists, it may be because an earlier %s '
'checkout was interrupted. The lock can be cleared by running '
'\'salt-run cache.clear_git_lock %s type=mountpoint\', or by '
'manually removing %s.',
self.role, repo.id, self.role, self.role,
repo._get_lock_file(lock_type='mountpoint')
)
return False
return True

View file

@ -923,7 +923,8 @@ class Schedule(object):
elif 'once' in data:
if data['_next_fire_time'] and \
data['_next_fire_time'] != now and \
data['_next_fire_time'] < now - self.opts['loop_interval'] and \
data['_next_fire_time'] > now and \
not data['_splay']:
continue
@ -939,7 +940,10 @@ class Schedule(object):
log.error('Date string could not be parsed: %s, %s',
data['once'], once_fmt)
continue
if data['_next_fire_time'] != now:
# If _next_fire_time is less than now or greater
# than now, continue.
if data['_next_fire_time'] < now - self.opts['loop_interval'] and \
data['_next_fire_time'] > now:
continue
elif 'when' in data:
@ -1007,7 +1011,7 @@ class Schedule(object):
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now)
data['_run'] = bool(when >= now - self.opts['loop_interval'])
if not data['_next_fire_time']:
data['_next_fire_time'] = when
@ -1054,7 +1058,7 @@ class Schedule(object):
continue
when = int(time.mktime(when__.timetuple()))
if when < now and \
if when < now - self.opts['loop_interval'] and \
not data.get('_run', False) and \
not run and \
not data['_splay']:
@ -1133,6 +1137,9 @@ class Schedule(object):
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + self.opts['loop_interval']):
run = True
elif seconds == 0:
run = True

View file

@ -4,7 +4,6 @@ exclude:
include:
- include-test
{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('exclude-test') }}:
file:
- managed
{{ pillar['exclude-test'] }}:
file.managed:
- source: salt://testfile

View file

@ -1,7 +1,6 @@
include:
- to-include-test
{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('include-test') }}:
file:
- managed
{{ pillar['include-test'] }}:
file.managed:
- source: salt://testfile

View file

@ -8,7 +8,7 @@ test-state:
salt.state:
- tgt: '*'
- sls:
- include-test
- orch.target-test
cmd.run:
salt.function:

View file

@ -8,7 +8,7 @@ test-state:
salt.state:
- tgt: '*'
- sls:
- include-test
- orch.target-test
cmd.run:
salt.function:

View file

@ -0,0 +1,2 @@
always_true:
test.succeed_without_changes

View file

@ -1,4 +1,3 @@
{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('to-include-test') }}:
file:
- managed
{{ pillar['to-include-test'] }}:
file.managed:
- source: salt://testfile

View file

@ -4,6 +4,7 @@
from __future__ import absolute_import
import os
import shutil
import tempfile
import textwrap
import threading
import time
@ -330,44 +331,28 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
os.unlink(testfile)
def test_include(self):
fnames = (
os.path.join(TMP, 'include-test'),
os.path.join(TMP, 'to-include-test')
)
exclude_test_file = os.path.join(
TMP, 'exclude-test'
)
try:
ret = self.run_function('state.sls', mods='include-test')
self.assertSaltTrueReturn(ret)
for fname in fnames:
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.isfile(exclude_test_file))
finally:
for fname in list(fnames) + [exclude_test_file]:
if os.path.isfile(fname):
os.remove(fname)
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
fnames = (
os.path.join(TMP, 'include-test'),
os.path.join(TMP, 'exclude-test')
)
to_include_test_file = os.path.join(
TMP, 'to-include-test'
)
try:
ret = self.run_function('state.sls', mods='exclude-test')
self.assertSaltTrueReturn(ret)
for fname in fnames:
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.isfile(to_include_test_file))
finally:
for fname in list(fnames) + [to_include_test_file]:
if os.path.isfile(fname):
os.remove(fname)
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):

View file

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import random
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
class StatusModuleTest(ModuleCase):
'''
Test the status module
'''
@skipIf(salt.utils.is_windows(), 'minion is windows')
def test_status_pid(self):
'''
status.pid
'''
status_pid = self.run_function('status.pid', ['salt'])
grab_pids = status_pid.split()[:10]
random_pid = random.choice(grab_pids)
grep_salt = self.run_function('cmd.run', ['ps aux | grep salt'])
self.assertIn(random_pid, grep_salt)

View file

@ -132,5 +132,5 @@ class RunnerReturnsTest(ShellCase):
'jid': jid,
'return': {'args': ['foo'], 'kwargs': {'bar': 'hello world!'}},
'success': True,
'user': RUNTIME_VARS.RUNNING_TESTS_USER}}
'user': RUNTIME_VARS.RUNNING_TESTS_USER if 'SUDO_USER' not in os.environ else 'root'}}
)

View file

@ -5,6 +5,7 @@ from __future__ import absolute_import
import copy
import logging
import os
import random
import time
import dateutil.parser as dateutil_parser
@ -42,6 +43,7 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
functions = {'test.ping': ping}
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
self.schedule.opts['loop_interval'] = 1
def test_eval(self):
'''
@ -70,3 +72,145 @@ class SchedulerEvalTest(ModuleCase, SaltReturnAssertsMixin):
self.schedule.eval(now=run_time2)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time2)
def test_eval_multiple_whens(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': [
'11/29/2017 4:00pm',
'11/29/2017 5:00pm',
]
}
}
}
run_time1 = int(time.mktime(dateutil_parser.parse('11/29/2017 4:00pm').timetuple()))
run_time2 = int(time.mktime(dateutil_parser.parse('11/29/2017 5:00pm').timetuple()))
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate run time1
self.schedule.eval(now=run_time1)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time1)
# Evaluate run time2
self.schedule.eval(now=run_time2)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time2)
def test_eval_loop_interval(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': '11/29/2017 4:00pm',
}
}
}
# 30 second loop interval
LOOP_INTERVAL = random.randint(30, 59)
self.schedule.opts['loop_interval'] = LOOP_INTERVAL
run_time2 = int(time.mktime(dateutil_parser.parse('11/29/2017 4:00pm').timetuple()))
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time2 + LOOP_INTERVAL)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time2 + LOOP_INTERVAL)
def test_eval_multiple_whens_loop_interval(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': [
'11/29/2017 4:00pm',
'11/29/2017 5:00pm',
]
}
}
}
# 30 second loop interval
LOOP_INTERVAL = random.randint(30, 59)
self.schedule.opts['loop_interval'] = LOOP_INTERVAL
run_time1 = int(time.mktime(dateutil_parser.parse('11/29/2017 4:00pm').timetuple()))
run_time2 = int(time.mktime(dateutil_parser.parse('11/29/2017 5:00pm').timetuple()))
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time1 + LOOP_INTERVAL)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time1 + LOOP_INTERVAL)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time2 + LOOP_INTERVAL)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time2 + LOOP_INTERVAL)
def test_eval_once(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'once': '2017-12-13T13:00:00',
}
}
}
run_time = int(time.mktime(dateutil_parser.parse('12/13/2017 1:00pm').timetuple()))
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)
def test_eval_once_loop_interval(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'once': '2017-12-13T13:00:00',
}
}
}
# Randomn second loop interval
LOOP_INTERVAL = random.randint(0, 59)
self.schedule.opts['loop_interval'] = LOOP_INTERVAL
# Run the job at the right plus LOOP_INTERVAL
run_time = int(time.mktime(dateutil_parser.parse('12/13/2017 1:00:{0}pm'.format(LOOP_INTERVAL)).timetuple()))
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate at the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_last_run'], run_time)

View file

@ -39,6 +39,7 @@ class SchedulerPostponeTest(ModuleCase, SaltReturnAssertsMixin):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
functions = {'test.ping': ping}
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
self.schedule.opts['loop_interval'] = 1
def test_postpone(self):
'''

View file

@ -42,6 +42,7 @@ class SchedulerSkipTest(ModuleCase, SaltReturnAssertsMixin):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
functions = {'test.ping': ping}
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
self.schedule.opts['loop_interval'] = 1
def test_skip(self):
'''

View file

@ -43,9 +43,9 @@ class SPMBuildTest(SPMCase, ModuleCase):
test spm build with a big file
'''
# check to make sure there is enough space to run this test
check_space = self.run_function('status.diskusage', ['/'])
space = check_space['/']['available']
if space < 2000000:
check_space = self.run_function('status.diskusage', ['/tmp'])
space = check_space['/tmp']['available']
if space < 3000000000:
self.skipTest('Not enough space on host to run this test')
self.run_function('cmd.run',

View file

@ -14,7 +14,8 @@ class DownloadArtifacts(object):
def __init__(self, instance, artifacts):
self.instance = instance
self.artifacts = artifacts
self.client = self.setup_transport()
self.transport = self.setup_transport()
self.sftpclient = paramiko.SFTPClient.from_transport(self.transport)
def setup_transport(self):
# pylint: disable=minimum-python-version
@ -33,19 +34,30 @@ class DownloadArtifacts(object):
username=state.get('username', tport.get('username', 'root')),
pkey=pkey
)
return paramiko.SFTPClient.from_transport(transport)
return transport
def _set_permissions(self):
'''
Make sure all xml files are readable by the world so that anyone can grab them
'''
for remote, _ in self.artifacts:
self.transport.open_session().exec_command('sudo chmod -R +r {}'.format(remote))
def download(self):
self._set_permissions()
for remote, local in self.artifacts:
if remote.endswith('/'):
for fxml in self.client.listdir(remote):
for fxml in self.sftpclient.listdir(remote):
self._do_download(os.path.join(remote, fxml), os.path.join(local, os.path.basename(fxml)))
else:
self._do_download(remote, os.path.join(local, os.path.basename(remote)))
def _do_download(self, remote, local):
print('Copying from {0} to {1}'.format(remote, local))
self.client.get(remote, local)
try:
self.sftpclient.get(remote, local)
except IOError:
print('Failed to copy: {0}'.format(remote))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Jenkins Artifact Download Helper')

View file

@ -38,9 +38,11 @@ from tests.support.case import TestCase
# pylint: disable=import-error
import cherrypy # pylint: disable=3rd-party-module-not-gated
from salt.ext import six
from salt.ext.six.moves import StringIO
from salt.ext.six import BytesIO
# pylint: enable=import-error
import salt.utils.stringutils
# Not strictly speaking mandatory but just makes sense
cherrypy.config.update({'environment': "test_suite"})
@ -92,7 +94,7 @@ class BaseCherryPyTestCase(TestCase):
fd = None
if body is not None:
h['content-length'] = '{0}'.format(len(body))
fd = StringIO(body)
fd = BytesIO(salt.utils.stringutils.to_bytes(body))
if headers is not None:
h.update(headers)

View file

@ -5,6 +5,7 @@
# Import Python libs
from __future__ import absolute_import
import logging
import os
# Import Salt Testing Libs
@ -29,6 +30,8 @@ if six.PY3:
else:
import salt.ext.ipaddress as ipaddress
log = logging.getLogger(__name__)
# Globals
IPv4Address = ipaddress.IPv4Address
IPv6Address = ipaddress.IPv6Address
@ -683,6 +686,26 @@ SwapTotal: 4789244 kB'''
self.assertEqual(os_grains.get('mem_total'), 2023)
self.assertEqual(os_grains.get('swap_total'), 400)
def test_docker_virtual(self):
'''
Test if OS grains are parsed correctly in Ubuntu Xenial Xerus
'''
with patch.object(os.path, 'isdir', MagicMock(return_value=False)):
with patch.object(os.path,
'isfile',
MagicMock(side_effect=lambda x: True if x == '/proc/1/cgroup' else False)):
for cgroup_substr in (':/system.slice/docker', ':/docker/',
':/docker-ce/'):
cgroup_data = \
'10:memory{0}a_long_sha256sum'.format(cgroup_substr)
log.debug(
'Testing Docker cgroup substring \'%s\'', cgroup_substr)
with patch('salt.utils.files.fopen', mock_open(read_data=cgroup_data)):
self.assertEqual(
core._virtual({'kernel': 'Linux'}).get('virtual_subtype'),
'Docker'
)
def _check_ipaddress(self, value, ip_v):
'''
check if ip address in a list is valid

View file

@ -489,7 +489,7 @@ class LogSettingsParserTests(TestCase):
# Check log file logger
self.assertEqual(self.log_setup.log_level_logfile, log_level_logfile)
@skipIf(salt.utils.is_windows(), 'Windows uses a logging listener')
@skipIf(salt.utils.platform.is_windows(), 'Windows uses a logging listener')
def test_log_created(self):
'''
Tests that log file is created