Merge branch '2017.7' into improve-async-operation-handling-in-kubernetes-module

This commit is contained in:
Jochen Breuer 2017-09-06 17:06:18 +02:00 committed by GitHub
commit 91076bbafa
44 changed files with 599 additions and 240 deletions

60
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,60 @@
# SALTSTACK CODE OWNERS
# See https://help.github.com/articles/about-codeowners/
# for more info about CODEOWNERS file
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
# See https://help.github.com/articles/about-codeowners/
# for more info about the CODEOWNERS file
# Team Boto
salt/**/*boto* @saltstack/team-boto
# Team Core
salt/auth/ @saltstack/team-core
salt/cache/ @saltstack/team-core
salt/cli/ @saltstack/team-core
salt/client/* @saltstack/team-core
salt/config/* @saltstack/team-core
salt/daemons/ @saltstack/team-core
salt/pillar/ @saltstack/team-core
salt/loader.py @saltstack/team-core
salt/payload.py @saltstack/team-core
salt/**/master* @saltstack/team-core
salt/**/minion* @saltstack/team-core
# Team Cloud
salt/cloud/ @saltstack/team-cloud
salt/utils/openstack/ @saltstack/team-cloud
salt/utils/aws.py @saltstack/team-cloud
salt/**/*cloud* @saltstack/team-cloud
# Team NetAPI
salt/cli/api.py @saltstack/team-netapi
salt/client/netapi.py @saltstack/team-netapi
salt/netapi/ @saltstack/team-netapi
# Team Network
salt/proxy/ @saltstack/team-proxy
# Team SPM
salt/cli/spm.py @saltstack/team-spm
salt/spm/ @saltstack/team-spm
# Team SSH
salt/cli/ssh.py @saltstack/team-ssh
salt/client/ssh/ @saltstack/team-ssh
salt/runners/ssh.py @saltstack/team-ssh
salt/**/thin.py @saltstack/team-ssh
# Team State
salt/state.py @saltstack/team-state
# Team Transport
salt/transport/ @saltstack/team-transport
salt/utils/zeromq.py @saltstack/team-transport
# Team Windows
salt/**/*win* @saltstack/team-windows

View file

@ -89,7 +89,7 @@ if Defined x (
if %Python%==2 (
Set "PyDir=C:\Python27"
) else (
Set "PyDir=C:\Program Files\Python35"
Set "PyDir=C:\Python35"
)
Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts"

View file

@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") {
DownloadFileWithProgress $url $file
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ."
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru
$p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower())))
#==============================================================================
# Update PIP and SetupTools
# caching depends on environmant variable SALT_PIP_LOCAL_CACHE
# caching depends on environment variable SALT_PIP_LOCAL_CACHE
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Updating PIP and SetupTools . . ."
@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) {
#==============================================================================
# Install pypi resources using pip
# caching depends on environmant variable SALT_REQ_LOCAL_CACHE
# caching depends on environment variable SALT_REQ_LOCAL_CACHE
#==============================================================================
Write-Output " ----------------------------------------------------------------"
Write-Output " - $script_name :: Installing pypi resources using pip . . ."
@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) {
Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install"
}
#==============================================================================
# Move PyWin32 DLL's to site-packages\win32
#==============================================================================
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32"
# Remove pythonwin directory
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse
# Remove PyWin32 PostInstall and testall Scripts
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse
#==============================================================================
# Install PyYAML with CLoader
# This has to be a compiled binary to get the CLoader

View file

@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") {
DownloadFileWithProgress $url $file
Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ."
$p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru
$p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru
}
#------------------------------------------------------------------------------
@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
# Remove pywin32_system32 directory
Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ."
@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32"
Write-Output " - $script_name :: Removing pythonwin Directory . . ."
Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse
# Remove PyWin32 PostInstall and testall Scripts
Write-Output " - $script_name :: Removing PyWin32 scripts . . ."
Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse
#==============================================================================
# Fix PyCrypto
#==============================================================================

View file

@ -56,7 +56,7 @@ if %Python%==2 (
Set "PyVerMajor=2"
Set "PyVerMinor=7"
) else (
Set "PyDir=C:\Program Files\Python35"
Set "PyDir=C:\Python35"
Set "PyVerMajor=3"
Set "PyVerMinor=5"
)

View file

@ -16,9 +16,10 @@ if %errorLevel%==0 (
)
echo.
:CheckPython2
if exist "\Python27" goto RemovePython2
if exist "\Program Files\Python35" goto RemovePython3
goto eof
goto CheckPython3
:RemovePython2
rem Uninstall Python 2.7
@ -47,25 +48,30 @@ goto eof
goto eof
:CheckPython3
if exist "\Python35" goto RemovePython3
goto eof
:RemovePython3
echo %0 :: Uninstalling Python 3 ...
echo ---------------------------------------------------------------------
:: 64 bit
if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" (
echo %0 :: - 3.5.3 64bit
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall
"%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive
)
:: 32 bit
if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" (
echo %0 :: - 3.5.3 32bit
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall
"%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive
)
rem wipe the Python directory
echo %0 :: Removing the C:\Program Files\Python35 Directory ...
echo %0 :: Removing the C:\Python35 Directory ...
echo ---------------------------------------------------------------------
rd /s /q "C:\Program Files\Python35"
rd /s /q "C:\Python35"
if %errorLevel%==0 (
echo Successful
) else (

View file

@ -19,9 +19,9 @@ Function Get-Settings {
"Python2Dir" = "C:\Python27"
"Scripts2Dir" = "C:\Python27\Scripts"
"SitePkgs2Dir" = "C:\Python27\Lib\site-packages"
"Python3Dir" = "C:\Program Files\Python35"
"Scripts3Dir" = "C:\Program Files\Python35\Scripts"
"SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages"
"Python3Dir" = "C:\Python35"
"Scripts3Dir" = "C:\Python35\Scripts"
"SitePkgs3Dir" = "C:\Python35\Lib\site-packages"
"DownloadDir" = "$env:Temp\DevSalt"
}
# The script deletes the DownLoadDir (above) for each install.

View file

@ -200,7 +200,7 @@ class LoadAuth(object):
'''
if not self.authenticate_eauth(load):
return {}
fstr = '{0}.auth'.format(load['eauth'])
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
tok = str(hash_type(os.urandom(512)).hexdigest())
t_path = os.path.join(self.opts['token_dir'], tok)
@ -224,8 +224,9 @@ class LoadAuth(object):
acl_ret = self.__get_acl(load)
tdata['auth_list'] = acl_ret
if 'groups' in load:
tdata['groups'] = load['groups']
groups = self.get_groups(load)
if groups:
tdata['groups'] = groups
try:
with salt.utils.files.set_umask(0o177):
@ -345,7 +346,7 @@ class LoadAuth(object):
return False
return True
def get_auth_list(self, load):
def get_auth_list(self, load, token=None):
'''
Retrieve access list for the user specified in load.
The list is built by eauth module or from master eauth configuration.
@ -353,30 +354,37 @@ class LoadAuth(object):
list if the user has no rights to execute anything on this master and returns non-empty list
if user is allowed to execute particular functions.
'''
# Get auth list from token
if token and self.opts['keep_acl_in_token'] and 'auth_list' in token:
return token['auth_list']
# Get acl from eauth module.
auth_list = self.__get_acl(load)
if auth_list is not None:
return auth_list
if load['eauth'] not in self.opts['external_auth']:
eauth = token['eauth'] if token else load['eauth']
if eauth not in self.opts['external_auth']:
# No matching module is allowed in config
log.warning('Authorization failure occurred.')
return None
name = self.load_name(load) # The username we are attempting to auth with
groups = self.get_groups(load) # The groups this user belongs to
eauth_config = self.opts['external_auth'][load['eauth']]
if groups is None or groups is False:
if token:
name = token['name']
groups = token.get('groups')
else:
name = self.load_name(load) # The username we are attempting to auth with
groups = self.get_groups(load) # The groups this user belongs to
eauth_config = self.opts['external_auth'][eauth]
if not groups:
groups = []
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
for group in groups:
if group == group_config:
group_auth_match = True
if group_config.rstrip('%') in groups:
group_auth_match = True
break
# If a group_auth_match is set it means only that we have a
# user which matches at least one or more of the groups defined
# in the configuration file.

View file

@ -306,7 +306,7 @@ def groups(username, **kwargs):
'''
group_list = []
bind = _bind(username, kwargs['password'],
bind = _bind(username, kwargs.get('password'),
anonymous=_config('anonymous', mandatory=False))
if bind:
log.debug('ldap bind to determine group membership succeeded!')
@ -371,7 +371,7 @@ def groups(username, **kwargs):
search_results = bind.search_s(search_base,
ldap.SCOPE_SUBTREE,
search_string,
[_config('accountattributename'), 'cn'])
[_config('accountattributename'), 'cn', _config('groupattribute')])
for _, entry in search_results:
if username in entry[_config('accountattributename')]:
group_list.append(entry['cn'][0])

View file

@ -24,7 +24,6 @@ import logging
# Import salt libs
from salt.exceptions import SaltCloudSystemExit
import salt.config as config
import salt.utils.cloud as cloud
# Import Third Party Libs
try:
@ -136,7 +135,7 @@ def create(vm_info):
)
log.debug("Going to fire event: starting create")
cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_info['name']),
@ -151,7 +150,7 @@ def create(vm_info):
'clone_from': vm_info['clonefrom']
}
cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_info['name']),
@ -174,10 +173,10 @@ def create(vm_info):
vm_info['key_filename'] = key_filename
vm_info['ssh_host'] = ip
res = cloud.bootstrap(vm_info, __opts__)
res = __utils__['cloud.bootstrap'](vm_info)
vm_result.update(res)
cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'created machine',
'salt/cloud/{0}/created'.format(vm_info['name']),
@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None):
"private_ips",
"public_ips",
]
return cloud.list_nodes_select(
return __utils__['cloud.list_nodes_select'](
list_nodes_full('function'), attributes, call,
)
@ -278,7 +277,7 @@ def list_nodes_select(call=None):
"""
Return a list of the VMs that are on the provider, with select fields
"""
return cloud.list_nodes_select(
return __utils__['cloud.list_nodes_select'](
list_nodes_full('function'), __opts__['query.selection'], call,
)
@ -306,7 +305,7 @@ def destroy(name, call=None):
if not vb_machine_exists(name):
return "{0} doesn't exist and can't be deleted".format(name)
cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
@ -317,7 +316,7 @@ def destroy(name, call=None):
vb_destroy_machine(name)
cloud.fire_event(
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),

View file

@ -55,7 +55,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S'
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
_DFLT_LOG_FMT_LOGFILE = (
'%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
)
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']

View file

@ -1055,12 +1055,7 @@ class LocalFuncs(object):
return dict(error=dict(name=err_name,
message='Authentication failure of type "token" occurred.'))
username = token['name']
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
load['eauth'] = token['eauth']
load['username'] = username
auth_list = self.loadauth.get_auth_list(load)
auth_list = self.loadauth.get_auth_list(load, token)
else:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
@ -1102,12 +1097,7 @@ class LocalFuncs(object):
return dict(error=dict(name=err_name,
message='Authentication failure of type "token" occurred.'))
username = token['name']
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
load['eauth'] = token['eauth']
load['username'] = username
auth_list = self.loadauth.get_auth_list(load)
auth_list = self.loadauth.get_auth_list(load, token)
elif 'eauth' in load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
@ -1217,12 +1207,7 @@ class LocalFuncs(object):
return ''
# Get acl from eauth module.
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
extra['eauth'] = token['eauth']
extra['username'] = token['name']
auth_list = self.loadauth.get_auth_list(extra)
auth_list = self.loadauth.get_auth_list(extra, token)
# Authorize the request
if not self.ckminions.auth_check(

View file

@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`.
from __future__ import absolute_import
# Import python libs
import json
import os
import socket
@ -47,14 +48,28 @@ def _search(prefix="latest/"):
Recursively look up all grains in the metadata server
'''
ret = {}
for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'):
linedata = http.query(os.path.join(HOST, prefix))
if 'body' not in linedata:
return ret
for line in linedata['body'].split('\n'):
if line.endswith('/'):
ret[line[:-1]] = _search(prefix=os.path.join(prefix, line))
elif prefix == 'latest/':
# (gtmanfred) The first level should have a forward slash since
# they have stuff underneath. This will not be doubled up though,
# because lines ending with a slash are checked first.
ret[line] = _search(prefix=os.path.join(prefix, line + '/'))
elif '=' in line:
key, value = line.split('=')
ret[value] = _search(prefix=os.path.join(prefix, key))
else:
ret[line] = http.query(os.path.join(HOST, prefix, line))['body']
retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None)
# (gtmanfred) This try except block is slightly faster than
# checking if the string starts with a curly brace
try:
ret[line] = json.loads(retdata)
except ValueError:
ret[line] = retdata
return ret

View file

@ -447,8 +447,8 @@ def optional_args(proxy=None):
device2:
True
'''
opt_args = _get_device_grain('optional_args', proxy=proxy)
if _FORBIDDEN_OPT_ARGS:
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
return {'optional_args': opt_args}

View file

@ -1705,12 +1705,7 @@ class ClearFuncs(object):
message='Authentication failure of type "token" occurred.'))
# Authorize
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
clear_load['eauth'] = token['eauth']
clear_load['username'] = token['name']
auth_list = self.loadauth.get_auth_list(clear_load)
auth_list = self.loadauth.get_auth_list(clear_load, token)
if not self.ckminions.runner_check(auth_list, clear_load['fun']):
return dict(error=dict(name='TokenAuthenticationError',
@ -1774,12 +1769,7 @@ class ClearFuncs(object):
message='Authentication failure of type "token" occurred.'))
# Authorize
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
clear_load['eauth'] = token['eauth']
clear_load['username'] = token['name']
auth_list = self.loadauth.get_auth_list(clear_load)
auth_list = self.loadauth.get_auth_list(clear_load, token)
if not self.ckminions.wheel_check(auth_list, clear_load['fun']):
return dict(error=dict(name='TokenAuthenticationError',
message=('Authentication failure of type "token" occurred for '
@ -1900,12 +1890,7 @@ class ClearFuncs(object):
return ''
# Get acl
if self.opts['keep_acl_in_token'] and 'auth_list' in token:
auth_list = token['auth_list']
else:
extra['eauth'] = token['eauth']
extra['username'] = token['name']
auth_list = self.loadauth.get_auth_list(extra)
auth_list = self.loadauth.get_auth_list(extra, token)
# Authorize the request
if not self.ckminions.auth_check(

View file

@ -446,11 +446,15 @@ def config(name, config, edit=True):
salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]"
'''
configs = []
for entry in config:
key = next(six.iterkeys(entry))
configs = _parse_config(entry[key], key)
if edit:
with salt.utils.fopen(name, 'w') as configfile:
configfile.write('# This file is managed by Salt.\n')
configfile.write(configs)
return configs
configs.append(_parse_config(entry[key], key))
# Python auto-correct line endings
configstext = "\n".join(configs)
if edit:
with salt.utils.fopen(name, 'w') as configfile:
configfile.write('# This file is managed by Salt.\n')
configfile.write(configstext)
return configstext

View file

@ -232,6 +232,7 @@ except ImportError:
# pylint: enable=import-error
HAS_NSENTER = bool(salt.utils.which('nsenter'))
HUB_PREFIX = 'docker.io/'
# Set up logging
log = logging.getLogger(__name__)
@ -1486,6 +1487,43 @@ def list_tags():
return sorted(ret)
def resolve_tag(name, tags=None):
'''
.. versionadded:: 2017.7.2,Oxygen
Given an image tag, check the locally-pulled tags (using
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>`) and return
the matching tag. This helps disambiguate differences on some platforms
where images from the Docker Hub are prefixed with ``docker.io/``. If an
image name with no tag is passed, a tag of ``latest`` is assumed.
If the specified image is not pulled locally, this function will return
``False``.
tags
An optional Python list of tags to check against. If passed, then
:py:func:`docker.list_tags <salt.modules.dockermod.list_tags>` will not
be run to get a list of tags. This is useful when resolving a number of
tags at the same time.
CLI Examples:
.. code-block:: bash
salt myminion docker.resolve_tag busybox
salt myminion docker.resolve_tag busybox:latest
'''
tag_name = ':'.join(salt.utils.docker.get_repo_tag(name))
if tags is None:
tags = list_tags()
if tag_name in tags:
return tag_name
full_name = HUB_PREFIX + tag_name
if not name.startswith(HUB_PREFIX) and full_name in tags:
return full_name
return False
def logs(name):
'''
Returns the logs for the container. Equivalent to running the ``docker

View file

@ -29,6 +29,7 @@ from salt.modules.inspectlib.entities import (AllowedDir, IgnoredDir, Package,
PayloadFile, PackageCfgFile)
import salt.utils
import salt.utils.path
from salt.utils import fsutils
from salt.utils import reinit_crypto
from salt.exceptions import CommandExecutionError
@ -311,7 +312,7 @@ class Inspector(EnvLoader):
continue
if not valid or not os.path.exists(obj) or not os.access(obj, os.R_OK):
continue
if os.path.islink(obj):
if salt.utils.path.islink(obj):
links.append(obj)
elif os.path.isdir(obj):
dirs.append(obj)

View file

@ -17,11 +17,14 @@
# Import python libs
from __future__ import absolute_import
import os
import grp
import pwd
from xml.dom import minidom
import platform
import socket
try:
import grp
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils

View file

@ -1455,6 +1455,8 @@ def _parser():
add_arg('--or-mark', dest='or-mark', action='append')
add_arg('--xor-mark', dest='xor-mark', action='append')
add_arg('--set-mark', dest='set-mark', action='append')
add_arg('--nfmask', dest='nfmask', action='append')
add_arg('--ctmask', dest='ctmask', action='append')
## CONNSECMARK
add_arg('--save', dest='save', action='append')
add_arg('--restore', dest='restore', action='append')

View file

@ -837,6 +837,9 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
# Modify IP spec to IIS 7.5 format
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
# win 2008 uses the following format: ip!port and not ip!port!
if iis7path.endswith("!"):
iis7path = iis7path[:-1]
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(iis7path),
@ -1255,6 +1258,9 @@ def set_container_setting(name, container, settings):
salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools'
settings="{'managedPipeLineMode': 'Integrated'}"
'''
identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'}
identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'}
ps_cmd = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
@ -1281,6 +1287,10 @@ def set_container_setting(name, container, settings):
except ValueError:
value = "'{0}'".format(settings[setting])
# Map to numeric to support server 2008
if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys():
value = identityType_map2numeric[settings[setting]]
ps_cmd.extend(['Set-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
@ -1300,6 +1310,10 @@ def set_container_setting(name, container, settings):
failed_settings = dict()
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(new_settings[setting]):
failed_settings[setting] = settings[setting]

View file

@ -2835,7 +2835,8 @@ def _findOptionValueInSeceditFile(option):
_reader = codecs.open(_tfile, 'r', encoding='utf-16')
_secdata = _reader.readlines()
_reader.close()
_ret = __salt__['file.remove'](_tfile)
if __salt__['file.file_exists'](_tfile):
_ret = __salt__['file.remove'](_tfile)
for _line in _secdata:
if _line.startswith(option):
return True, _line.split('=')[1].strip()
@ -2856,16 +2857,20 @@ def _importSeceditConfig(infdata):
_tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
'salt-secedit-config-{0}.inf'.format(_d))
# make sure our temp files don't already exist
_ret = __salt__['file.remove'](_tSdbfile)
_ret = __salt__['file.remove'](_tInfFile)
if __salt__['file.file_exists'](_tSdbfile):
_ret = __salt__['file.remove'](_tSdbfile)
if __salt__['file.file_exists'](_tInfFile):
_ret = __salt__['file.remove'](_tInfFile)
# add the inf data to the file, win_file sure could use the write() function
_ret = __salt__['file.touch'](_tInfFile)
_ret = __salt__['file.append'](_tInfFile, infdata)
# run secedit to make the change
_ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile))
# cleanup our temp files
_ret = __salt__['file.remove'](_tSdbfile)
_ret = __salt__['file.remove'](_tInfFile)
if __salt__['file.file_exists'](_tSdbfile):
_ret = __salt__['file.remove'](_tSdbfile)
if __salt__['file.file_exists'](_tInfFile):
_ret = __salt__['file.remove'](_tInfFile)
return True
except Exception as e:
log.debug('error occurred while trying to import secedit data')
@ -4174,8 +4179,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
existing_data = ''
base_policy_settings = {}
policy_data = _policy_info()
#//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]
#policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]')
policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]'
try:
if admx_policy_definitions is None or adml_policy_resources is None:
@ -4206,8 +4209,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
this_valuename = None
if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled':
log.debug('time to disable {0}'.format(admPolicy))
#this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace})
this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace})
this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
if this_policy:
this_policy = this_policy[0]
if 'class' in this_policy.attrib:
@ -4318,7 +4320,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
log.error(msg.format(this_policy.attrib))
else:
log.debug('time to enable and set the policy "{0}"'.format(admPolicy))
#this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace})
this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace})
log.debug('found this_policy == {0}'.format(this_policy))
if this_policy:

View file

@ -973,7 +973,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# Version is ignored
salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3
If passed with a comma seperated list in the ``name`` parameter, the
If passed with a comma separated list in the ``name`` parameter, the
version will apply to all packages in the list.
CLI Example:
@ -1282,7 +1282,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
# Build cmd and arguments
# cmd and arguments must be seperated for use with the task scheduler
# cmd and arguments must be separated for use with the task scheduler
if use_msiexec:
cmd = msiexec
arguments = ['/i', cached_pkg]
@ -1313,7 +1313,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# Run Scheduled Task
# Special handling for installing salt
if pkg_name in ['salt-minion', 'salt-minion-py3']:
if re.search(r'salt[\s_.-]*minion',
pkg_name,
flags=re.IGNORECASE + re.UNICODE) is not None:
ret[pkg_name] = {'install status': 'task started'}
if not __salt__['task.run'](name='update-salt-software'):
log.error('Failed to install {0}'.format(pkg_name))
@ -1345,7 +1347,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
else:
# Combine cmd and arguments
cmd = [cmd].extend(arguments)
cmd = [cmd]
cmd.extend(arguments)
# Launch the command
result = __salt__['cmd.run_all'](cmd,

View file

@ -20,7 +20,7 @@ try:
except ImportError as exc:
cpy_error = exc
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy'
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_cherrypy'
logger = logging.getLogger(__virtualname__)
cpy_min = '3.2.2'

View file

@ -10,7 +10,7 @@ import os
import salt.auth
from salt.utils.versions import StrictVersion as _StrictVersion
__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_tornado'
__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_tornado'
logger = logging.getLogger(__virtualname__)

View file

@ -37,6 +37,7 @@ import salt.utils.dictupdate
import salt.utils.event
import salt.utils.url
import salt.utils.process
import salt.utils.files
import salt.syspaths as syspaths
from salt.utils import immutabletypes
from salt.template import compile_template, compile_template_str
@ -146,6 +147,13 @@ def _gen_tag(low):
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _clean_tag(tag):
'''
Make tag name safe for filenames
'''
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {'name': 'listen_{0}'.format(name),
'__id__': 'listen_{0}'.format(id_),
@ -1695,7 +1703,7 @@ class State(object):
trb)
}
troot = os.path.join(self.opts['cachedir'], self.jid)
tfile = os.path.join(troot, tag)
tfile = os.path.join(troot, _clean_tag(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
@ -2047,7 +2055,7 @@ class State(object):
proc = running[tag].get('proc')
if proc:
if not proc.is_alive():
ret_cache = os.path.join(self.opts['cachedir'], self.jid, tag)
ret_cache = os.path.join(self.opts['cachedir'], self.jid, _clean_tag(tag))
if not os.path.isfile(ret_cache):
ret = {'result': False,
'comment': 'Parallel process failed to return',

View file

@ -116,7 +116,7 @@ entry on the minion already contains a numeric value, then using the ``random``
keyword will not modify it.
Added the opportunity to set a job with a special keyword like '@reboot' or
'@hourly'.
'@hourly'. Quotes must be used, otherwise PyYAML will strip the '@' sign.
.. code-block:: yaml
@ -303,7 +303,8 @@ def present(name,
edits. This defaults to the state id
special
A special keyword to specify periodicity (eg. @reboot, @hourly...)
A special keyword to specify periodicity (eg. @reboot, @hourly...).
Quotes must be used, otherwise PyYAML will strip the '@' sign.
.. versionadded:: 2016.3.0
'''
@ -389,7 +390,8 @@ def absent(name,
edits. This defaults to the state id
special
The special keyword used in the job (eg. @reboot, @hourly...)
The special keyword used in the job (eg. @reboot, @hourly...).
Quotes must be used, otherwise PyYAML will strip the '@' sign.
'''
### NOTE: The keyword arguments in **kwargs are ignored in this state, but
### cannot be removed from the function definition, otherwise the use

View file

@ -135,13 +135,14 @@ def present(name,
.. versionadded:: 2016.11.0
sls
Allow for building images with ``dockerng.sls_build`` by specify the
SLS files to build with. This can be a list or comma-seperated string.
Allow for building of image with :py:func:`docker.sls_build
<salt.modules.dockermod.sls_build>` by specifying the SLS files with
which to build. This can be a list or comma-seperated string.
.. code-block:: yaml
myuser/myimage:mytag:
dockerng.image_present:
docker_image.present:
- sls:
- webapp1
- webapp2
@ -151,12 +152,14 @@ def present(name,
.. versionadded: 2017.7.0
base
Base image with which to start ``dockerng.sls_build``
Base image with which to start :py:func:`docker.sls_build
<salt.modules.dockermod.sls_build>`
.. versionadded: 2017.7.0
saltenv
environment from which to pull sls files for ``dockerng.sls_build``.
Environment from which to pull SLS files for :py:func:`docker.sls_build
<salt.modules.dockermod.sls_build>`
.. versionadded: 2017.7.0
'''
@ -169,11 +172,14 @@ def present(name,
ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.'
return ret
# Ensure that we have repo:tag notation
image = ':'.join(salt.utils.docker.get_repo_tag(name))
all_tags = __salt__['docker.list_tags']()
resolved_tag = __salt__['docker.resolve_tag'](image)
if image in all_tags:
if resolved_tag is False:
# Specified image is not present
image_info = None
else:
# Specified image is present
if not force:
ret['result'] = True
ret['comment'] = 'Image \'{0}\' already present'.format(name)
@ -185,8 +191,6 @@ def present(name,
ret['comment'] = \
'Unable to get info for image \'{0}\': {1}'.format(name, exc)
return ret
else:
image_info = None
if build or sls:
action = 'built'
@ -197,15 +201,15 @@ def present(name,
if __opts__['test']:
ret['result'] = None
if (image in all_tags and force) or image not in all_tags:
if (resolved_tag is not False and force) or resolved_tag is False:
ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action)
return ret
if build:
try:
image_update = __salt__['docker.build'](path=build,
image=image,
dockerfile=dockerfile)
image=image,
dockerfile=dockerfile)
except Exception as exc:
ret['comment'] = (
'Encountered error building {0} as {1}: {2}'
@ -219,10 +223,10 @@ def present(name,
if isinstance(sls, list):
sls = ','.join(sls)
try:
image_update = __salt__['dockerng.sls_build'](name=image,
base=base,
mods=sls,
saltenv=saltenv)
image_update = __salt__['docker.sls_build'](name=image,
base=base,
mods=sls,
saltenv=saltenv)
except Exception as exc:
ret['comment'] = (
'Encountered error using sls {0} for building {1}: {2}'
@ -252,10 +256,8 @@ def present(name,
client_timeout=client_timeout
)
except Exception as exc:
ret['comment'] = (
'Encountered error pulling {0}: {1}'
.format(image, exc)
)
ret['comment'] = \
'Encountered error pulling {0}: {1}'.format(image, exc)
return ret
if (image_info is not None and image_info['Id'][:12] == image_update
.get('Layers', {})
@ -267,7 +269,7 @@ def present(name,
# Only add to the changes dict if layers were pulled
ret['changes'] = image_update
ret['result'] = image in __salt__['docker.list_tags']()
ret['result'] = bool(__salt__['docker.resolve_tag'](image))
if not ret['result']:
# This shouldn't happen, failure to pull should be caught above
@ -345,23 +347,16 @@ def absent(name=None, images=None, force=False):
ret['comment'] = 'One of \'name\' and \'images\' must be provided'
return ret
elif images is not None:
targets = []
for target in images:
try:
targets.append(':'.join(salt.utils.docker.get_repo_tag(target)))
except TypeError:
# Don't stomp on images with unicode characters in Python 2,
# only force image to be a str if it wasn't already (which is
# very unlikely).
targets.append(':'.join(salt.utils.docker.get_repo_tag(str(target))))
targets = images
elif name:
try:
targets = [':'.join(salt.utils.docker.get_repo_tag(name))]
except TypeError:
targets = [':'.join(salt.utils.docker.get_repo_tag(str(name)))]
targets = [name]
pre_tags = __salt__['docker.list_tags']()
to_delete = [x for x in targets if x in pre_tags]
to_delete = []
for target in targets:
resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags)
if resolved_tag is not False:
to_delete.append(resolved_tag)
log.debug('targets = {0}'.format(targets))
log.debug('to_delete = {0}'.format(to_delete))

View file

@ -1553,7 +1553,7 @@ def managed(name,
the salt master and potentially run through a templating system.
name
The location of the file to manage
The location of the file to manage, as an absolute path.
source
The source file to download to the minion, this source file can be
@ -1723,13 +1723,15 @@ def managed(name,
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``.
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
@ -2558,7 +2560,7 @@ def directory(name,
Ensure that a named directory is present and has the right perms
name
The location to create or manage a directory
The location to create or manage a directory, as an absolute path
user
The user to own the directory; this defaults to the user salt is

View file

@ -481,7 +481,6 @@ def container_setting(name, container, settings=None):
:param str container: The type of IIS container. The container types are:
AppPools, Sites, SslBindings
:param str settings: A dictionary of the setting names and their values.
Example of usage for the ``AppPools`` container:
.. code-block:: yaml
@ -510,6 +509,8 @@ def container_setting(name, container, settings=None):
logFile.period: Daily
limits.maxUrlSegments: 32
'''
identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'}
ret = {'name': name,
'changes': {},
'comment': str(),
@ -529,6 +530,10 @@ def container_setting(name, container, settings=None):
container=container,
settings=settings.keys())
for setting in settings:
# map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys():
settings[setting] = identityType_map2string[settings[setting]]
if str(settings[setting]) != str(current_settings[setting]):
ret_settings['changes'][setting] = {'old': current_settings[setting],
'new': settings[setting]}
@ -541,8 +546,8 @@ def container_setting(name, container, settings=None):
ret['changes'] = ret_settings
return ret
__salt__['win_iis.set_container_setting'](name=name, container=container,
settings=settings)
__salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings)
new_settings = __salt__['win_iis.get_container_setting'](name=name,
container=container,
settings=settings.keys())

View file

@ -2098,7 +2098,7 @@ def is_true(value=None):
pass
# Now check for truthiness
if isinstance(value, (int, float)):
if isinstance(value, (six.integer_types, float)):
return value > 0
elif isinstance(value, six.string_types):
return str(value).lower() == 'true'
@ -2874,7 +2874,7 @@ def repack_dictlist(data,
if val_cb is None:
val_cb = lambda x, y: y
valid_non_dict = (six.string_types, int, float)
valid_non_dict = (six.string_types, six.integer_types, float)
if isinstance(data, list):
for element in data:
if isinstance(element, valid_non_dict):

View file

@ -392,7 +392,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
service_url = prov_dict.get('service_url', 'amazonaws.com')
if not location:
location = get_location(opts, provider)
location = get_location(opts, prov_dict)
if endpoint is None:
if not requesturl:

View file

@ -293,12 +293,14 @@ def salt_config_to_yaml(configuration, line_break='\n'):
Dumper=SafeOrderedDumper)
def bootstrap(vm_, opts):
def bootstrap(vm_, opts=None):
'''
This is the primary entry point for logging into any system (POSIX or
Windows) to install Salt. It will make the decision on its own as to which
deploy function to call.
'''
if opts is None:
opts = __opts__
deploy_config = salt.config.get_cloud_config_value(
'deploy',
vm_, opts, default=False)

View file

@ -38,7 +38,6 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import third party libs
import salt.ext.six as six
VALID_PROVIDERS = ('pygit2', 'gitpython')
# Optional per-remote params that can only be used on a per-remote basis, and
# thus do not have defaults in salt/config.py.
PER_REMOTE_ONLY = ('name',)
@ -164,7 +163,7 @@ class GitProvider(object):
directly.
self.provider should be set in the sub-class' __init__ function before
invoking GitProvider.__init__().
invoking the parent class' __init__.
'''
def __init__(self, opts, remote, per_remote_defaults, per_remote_only,
override_params, cache_root, role='gitfs'):
@ -857,8 +856,10 @@ class GitPython(GitProvider):
def __init__(self, opts, remote, per_remote_defaults, per_remote_only,
override_params, cache_root, role='gitfs'):
self.provider = 'gitpython'
GitProvider.__init__(self, opts, remote, per_remote_defaults,
per_remote_only, override_params, cache_root, role)
super(GitPython, self).__init__(
opts, remote, per_remote_defaults, per_remote_only,
override_params, cache_root, role
)
def add_refspecs(self, *refspecs):
'''
@ -1192,8 +1193,10 @@ class Pygit2(GitProvider):
def __init__(self, opts, remote, per_remote_defaults, per_remote_only,
override_params, cache_root, role='gitfs'):
self.provider = 'pygit2'
GitProvider.__init__(self, opts, remote, per_remote_defaults,
per_remote_only, override_params, cache_root, role)
super(Pygit2, self).__init__(
opts, remote, per_remote_defaults, per_remote_only,
override_params, cache_root, role
)
def add_refspecs(self, *refspecs):
'''
@ -1877,11 +1880,17 @@ class Pygit2(GitProvider):
fp_.write(blob.data)
GIT_PROVIDERS = {
'pygit2': Pygit2,
'gitpython': GitPython,
}
class GitBase(object):
'''
Base class for gitfs/git_pillar
'''
def __init__(self, opts, valid_providers=VALID_PROVIDERS, cache_root=None):
def __init__(self, opts, git_providers=None, cache_root=None):
'''
IMPORTANT: If specifying a cache_root, understand that this is also
where the remotes will be cloned. A non-default cache_root is only
@ -1889,8 +1898,9 @@ class GitBase(object):
out into the winrepo locations and not within the cachedir.
'''
self.opts = opts
self.valid_providers = valid_providers
self.get_provider()
self.git_providers = git_providers if git_providers is not None \
else GIT_PROVIDERS
self.verify_provider()
if cache_root is not None:
self.cache_root = self.remote_root = cache_root
else:
@ -1948,7 +1958,7 @@ class GitBase(object):
self.remotes = []
for remote in remotes:
repo_obj = self.provider_class(
repo_obj = self.git_providers[self.provider](
self.opts,
remote,
per_remote_defaults,
@ -2202,7 +2212,7 @@ class GitBase(object):
# Hash file won't exist if no files have yet been served up
pass
def get_provider(self):
def verify_provider(self):
'''
Determine which provider to use
'''
@ -2223,12 +2233,12 @@ class GitBase(object):
# Should only happen if someone does something silly like
# set the provider to a numeric value.
desired_provider = str(desired_provider).lower()
if desired_provider not in self.valid_providers:
if desired_provider not in self.git_providers:
log.critical(
'Invalid {0}_provider \'{1}\'. Valid choices are: {2}'
.format(self.role,
desired_provider,
', '.join(self.valid_providers))
', '.join(self.git_providers))
)
failhard(self.role)
elif desired_provider == 'pygit2' and self.verify_pygit2():
@ -2241,17 +2251,13 @@ class GitBase(object):
.format(self.role)
)
failhard(self.role)
if self.provider == 'pygit2':
self.provider_class = Pygit2
elif self.provider == 'gitpython':
self.provider_class = GitPython
def verify_gitpython(self, quiet=False):
'''
Check if GitPython is available and at a compatible version (>= 0.3.0)
'''
def _recommend():
if HAS_PYGIT2 and 'pygit2' in self.valid_providers:
if HAS_PYGIT2 and 'pygit2' in self.git_providers:
log.error(_RECOMMEND_PYGIT2.format(self.role))
if not HAS_GITPYTHON:
@ -2262,7 +2268,7 @@ class GitBase(object):
)
_recommend()
return False
elif 'gitpython' not in self.valid_providers:
elif 'gitpython' not in self.git_providers:
return False
# pylint: disable=no-member
@ -2302,7 +2308,7 @@ class GitBase(object):
Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0.
'''
def _recommend():
if HAS_GITPYTHON and 'gitpython' in self.valid_providers:
if HAS_GITPYTHON and 'gitpython' in self.git_providers:
log.error(_RECOMMEND_GITPYTHON.format(self.role))
if not HAS_PYGIT2:
@ -2313,7 +2319,7 @@ class GitBase(object):
)
_recommend()
return False
elif 'pygit2' not in self.valid_providers:
elif 'pygit2' not in self.git_providers:
return False
# pylint: disable=no-member
@ -2432,7 +2438,7 @@ class GitFS(GitBase):
'''
def __init__(self, opts):
self.role = 'gitfs'
GitBase.__init__(self, opts)
super(GitFS, self).__init__(opts)
def dir_list(self, load):
'''
@ -2735,7 +2741,7 @@ class GitPillar(GitBase):
'''
def __init__(self, opts):
self.role = 'git_pillar'
GitBase.__init__(self, opts)
super(GitPillar, self).__init__(opts)
def checkout(self):
'''
@ -2837,7 +2843,7 @@ class WinRepo(GitBase):
'''
def __init__(self, opts, winrepo_dir):
self.role = 'winrepo'
GitBase.__init__(self, opts, cache_root=winrepo_dir)
super(WinRepo, self).__init__(opts, cache_root=winrepo_dir)
def checkout(self):
'''

View file

@ -9,8 +9,12 @@ import os
import shutil
import tempfile
import textwrap
import pwd
import logging
import stat
try:
import pwd
except ImportError:
pass
# Import 3rd-party libs
import yaml
@ -189,7 +193,6 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
self.integration_base_files = os.path.join(FILES, 'file', 'base')
# Create the dir if it doesn't already exist
try:
shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/')
except OSError:
@ -203,7 +206,12 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
if 'USERNAME' not in os.environ:
try:
os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name
import salt.utils
if salt.utils.is_windows():
import salt.utils.win_functions
os.environ['USERNAME'] = salt.utils.win_functions.get_current_user()
else:
os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name
except AttributeError:
log.error('Unable to get effective username, falling back to '
'\'root\'.')
@ -219,14 +227,18 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
Remove the temporary git repository and gitfs cache directory to ensure
a clean environment for each test.
'''
shutil.rmtree(self.tmp_repo_dir)
shutil.rmtree(self.tmp_cachedir)
shutil.rmtree(self.tmp_sock_dir)
shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error)
shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error)
shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error)
del self.tmp_repo_dir
del self.tmp_cachedir
del self.tmp_sock_dir
del self.integration_base_files
def _rmtree_error(self, func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def test_file_list(self):
ret = gitfs.file_list(LOAD)
self.assertIn('testfile', ret)

View file

@ -36,7 +36,8 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it execute a chef client run and return a dict
'''
self.assertDictEqual(chef.client(), {})
with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}):
self.assertDictEqual(chef.client(), {})
# 'solo' function tests: 1
@ -44,4 +45,5 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it execute a chef solo run and return a dict
'''
self.assertDictEqual(chef.solo('/dev/sda1'), {})
with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}):
self.assertDictEqual(chef.solo('/dev/sda1'), {})

View file

@ -679,9 +679,9 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret)
def test_images_with_empty_tags(self):
"""
'''
docker 1.12 reports also images without tags with `null`.
"""
'''
client = Mock()
client.api_version = '1.24'
client.images = Mock(
@ -724,3 +724,24 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin):
with patch.object(docker_mod, 'inspect_image', inspect_image_mock):
ret = docker_mod.compare_container('container1', 'container2')
self.assertEqual(ret, {})
def test_resolve_tag(self):
'''
Test the resolve_tag function
'''
with_prefix = 'docker.io/foo:latest'
no_prefix = 'bar:latest'
with patch.object(docker_mod,
'list_tags',
MagicMock(return_value=[with_prefix])):
self.assertEqual(docker_mod.resolve_tag('foo'), with_prefix)
self.assertEqual(docker_mod.resolve_tag('foo:latest'), with_prefix)
self.assertEqual(docker_mod.resolve_tag(with_prefix), with_prefix)
self.assertEqual(docker_mod.resolve_tag('foo:bar'), False)
with patch.object(docker_mod,
'list_tags',
MagicMock(return_value=[no_prefix])):
self.assertEqual(docker_mod.resolve_tag('bar'), no_prefix)
self.assertEqual(docker_mod.resolve_tag(no_prefix), no_prefix)
self.assertEqual(docker_mod.resolve_tag('bar:baz'), False)

View file

@ -5,7 +5,10 @@
# Import Python libs
from __future__ import absolute_import
import grp
try:
import grp
except ImportError:
pass
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
@ -13,10 +16,12 @@ from tests.support.unit import TestCase, skipIf
from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
# Import Salt Libs
import salt.utils
import salt.modules.groupadd as groupadd
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(salt.utils.is_windows(), "Module not available on Windows")
class GroupAddTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.groupadd

View file

@ -49,9 +49,15 @@ class InspectorCollectorTestCase(TestCase):
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
cachedir = os.sep + os.sep.join(['foo', 'cache'])
piddir = os.sep + os.sep.join(['foo', 'pid'])
inspector = Inspector(cachedir=cachedir, piddir=piddir, pidfilename='bar.pid')
self.assertEqual(
inspector.dbfile,
os.sep + os.sep.join(['foo', 'cache', '_minion_collector.db']))
self.assertEqual(
inspector.pidfile,
os.sep + os.sep.join(['foo', 'pid', 'bar.pid']))
def test_file_tree(self):
'''
@ -60,12 +66,29 @@ class InspectorCollectorTestCase(TestCase):
:return:
'''
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector = Inspector(cachedir=os.sep + 'test',
piddir=os.sep + 'test',
pidfilename='bar.pid')
tree_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'inspectlib', 'tree_test')
expected_tree = (['/a/a/dummy.a', '/a/b/dummy.b', '/b/b.1', '/b/b.2', '/b/b.3'],
['/a', '/a/a', '/a/b', '/a/c', '/b', '/c'],
['/a/a/dummy.ln.a', '/a/b/dummy.ln.b', '/a/c/b.1', '/b/b.4',
'/b/b.5', '/c/b.1', '/c/b.2', '/c/b.3'])
expected_tree = ([os.sep + os.sep.join(['a', 'a', 'dummy.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.b']),
os.sep + os.sep.join(['b', 'b.1']),
os.sep + os.sep.join(['b', 'b.2']),
os.sep + os.sep.join(['b', 'b.3'])],
[os.sep + 'a',
os.sep + os.sep.join(['a', 'a']),
os.sep + os.sep.join(['a', 'b']),
os.sep + os.sep.join(['a', 'c']),
os.sep + 'b',
os.sep + 'c'],
[os.sep + os.sep.join(['a', 'a', 'dummy.ln.a']),
os.sep + os.sep.join(['a', 'b', 'dummy.ln.b']),
os.sep + os.sep.join(['a', 'c', 'b.1']),
os.sep + os.sep.join(['b', 'b.4']),
os.sep + os.sep.join(['b', 'b.5']),
os.sep + os.sep.join(['c', 'b.1']),
os.sep + os.sep.join(['c', 'b.2']),
os.sep + os.sep.join(['c', 'b.3'])])
tree_result = []
for chunk in inspector._get_all_files(tree_root):
buff = []

View file

@ -0,0 +1,128 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jochen Breuer <jbreuer@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
try:
from salt.modules import kubernetes
except ImportError:
kubernetes = False
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(kubernetes is False, "Probably Kubernetes client lib is not installed. \
Skipping test_kubernetes.py")
class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.kubernetes
'''
def setup_loader_modules(self):
return {
kubernetes: {
'__salt__': {},
}
}
def test_nodes(self):
'''
Test node listing.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_node.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_node_name'}}]}}
)
self.assertEqual(kubernetes.nodes(), ['mock_node_name'])
self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_node().to_dict.called)
def test_deployments(self):
'''
Tests deployment listing.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"list_namespaced_deployment.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_deployment_name'}}]}}
)
self.assertEqual(kubernetes.deployments(), ['mock_deployment_name'])
self.assertTrue(
kubernetes.kubernetes.client.ExtensionsV1beta1Api().list_namespaced_deployment().to_dict.called)
def test_services(self):
'''
Tests services listing.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_namespaced_service.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_service_name'}}]}}
)
self.assertEqual(kubernetes.services(), ['mock_service_name'])
self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_namespaced_service().to_dict.called)
def test_pods(self):
'''
Tests pods listing.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.CoreV1Api.return_value = Mock(
**{"list_namespaced_pod.return_value.to_dict.return_value":
{'items': [{'metadata': {'name': 'mock_pod_name'}}]}}
)
self.assertEqual(kubernetes.pods(), ['mock_pod_name'])
self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().
list_namespaced_pod().to_dict.called)
def test_delete_deployments(self):
'''
Tests deployment creation.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="")
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"delete_namespaced_deployment.return_value.to_dict.return_value": {}}
)
self.assertEqual(kubernetes.delete_deployment("test"), {})
self.assertTrue(
kubernetes.kubernetes.client.ExtensionsV1beta1Api().
delete_namespaced_deployment().to_dict.called)
def test_create_deployments(self):
'''
Tests deployment creation.
:return:
'''
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
**{"create_namespaced_deployment.return_value.to_dict.return_value": {}}
)
self.assertEqual(kubernetes.create_deployment("test", "default", {}, {},
None, None, None), {})
self.assertTrue(
kubernetes.kubernetes.client.ExtensionsV1beta1Api().
create_namespaced_deployment().to_dict.called)

View file

@ -5,11 +5,15 @@
# Import python libs
from __future__ import absolute_import
import grp
HAS_GRP = True
try:
import grp
except ImportError:
HAS_GRP = False
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.unit import TestCase, skipIf
from tests.support.mock import MagicMock, patch
# Import Salt Libs
@ -17,6 +21,7 @@ import salt.modules.mac_group as mac_group
from salt.exceptions import SaltInvocationError, CommandExecutionError
@skipIf(not HAS_GRP, "Missing required library 'grp'")
class MacGroupTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for the salt.modules.mac_group module

View file

@ -2,10 +2,13 @@
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import python libs
from __future__ import absolute_import
import pwd
HAS_PWD = True
try:
import pwd
except ImportError:
HAS_PWD = False
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
@ -17,6 +20,7 @@ import salt.modules.mac_user as mac_user
from salt.exceptions import SaltInvocationError, CommandExecutionError
@skipIf(not HAS_PWD, "Missing required library 'pwd'")
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MacUserTestCase(TestCase, LoaderModuleMockMixin):
'''
@ -26,14 +30,15 @@ class MacUserTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {mac_user: {}}
mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon',
'/var/virusmails', '/usr/bin/false')),
pwd.struct_passwd(('_appleevents', '*', 55, 55,
'AppleEvents Daemon',
'/var/empty', '/usr/bin/false')),
pwd.struct_passwd(('_appowner', '*', 87, 87,
'Application Owner',
'/var/empty', '/usr/bin/false'))]
if HAS_PWD:
mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon',
'/var/virusmails', '/usr/bin/false')),
pwd.struct_passwd(('_appleevents', '*', 55, 55,
'AppleEvents Daemon',
'/var/empty', '/usr/bin/false')),
pwd.struct_passwd(('_appowner', '*', 87, 87,
'Application Owner',
'/var/empty', '/usr/bin/false'))]
mock_info_ret = {'shell': '/bin/bash', 'name': 'test', 'gid': 4376,
'groups': ['TEST_GROUP'], 'home': '/Users/foo',
'fullname': 'TEST USER', 'uid': 4376}

View file

@ -10,14 +10,13 @@ from __future__ import absolute_import
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
Mock,
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt Libs
from salt.exceptions import CommandExecutionError
import salt.modules.dockermod as docker_mod
import salt.states.docker_image as docker_state
@ -50,21 +49,19 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin):
if ``image:latest`` is already downloaded locally the state
should not report changes.
'''
docker_inspect_image = Mock(
return_value={'Id': 'abcdefghijk'})
docker_pull = Mock(
docker_inspect_image = MagicMock(return_value={'Id': 'abcdefghijkl'})
docker_pull = MagicMock(
return_value={'Layers':
{'Already_Pulled': ['abcdefghijk'],
{'Already_Pulled': ['abcdefghijkl'],
'Pulled': []},
'Status': 'Image is up to date for image:latest',
'Time_Elapsed': 1.1})
docker_list_tags = Mock(
return_value=['image:latest']
)
docker_list_tags = MagicMock(return_value=['image:latest'])
docker_resolve_tag = MagicMock(return_value='image:latest')
__salt__ = {'docker.list_tags': docker_list_tags,
'docker.pull': docker_pull,
'docker.inspect_image': docker_inspect_image,
}
'docker.resolve_tag': docker_resolve_tag}
with patch.dict(docker_state.__dict__,
{'__salt__': __salt__}):
ret = docker_state.present('image:latest', force=True)
@ -89,29 +86,24 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin):
if ``image:latest`` is not downloaded and force is true
should pull a new image successfuly.
'''
docker_inspect_image = Mock(
side_effect=CommandExecutionError(
'Error 404: No such image/container: image:latest'))
docker_pull = Mock(
docker_inspect_image = MagicMock(return_value={'Id': '1234567890ab'})
docker_pull = MagicMock(
return_value={'Layers':
{'Already_Pulled': ['abcdefghijk'],
'Pulled': ['abcdefghijk']},
'Status': "Image 'image:latest' was pulled",
'Time_Elapsed': 1.1})
docker_list_tags = Mock(
side_effect=[[], ['image:latest']]
)
{'Pulled': ['abcdefghijkl']},
'Status': "Image 'image:latest' was pulled",
'Time_Elapsed': 1.1})
docker_list_tags = MagicMock(side_effect=[[], ['image:latest']])
docker_resolve_tag = MagicMock(return_value='image:latest')
__salt__ = {'docker.list_tags': docker_list_tags,
'docker.pull': docker_pull,
'docker.inspect_image': docker_inspect_image,
}
'docker.resolve_tag': docker_resolve_tag}
with patch.dict(docker_state.__dict__,
{'__salt__': __salt__}):
ret = docker_state.present('image:latest', force=True)
self.assertEqual(ret,
{'changes': {
'Layers': {'Already_Pulled': ['abcdefghijk'],
'Pulled': ['abcdefghijk']},
'Layers': {'Pulled': ['abcdefghijkl']},
'Status': "Image 'image:latest' was pulled",
'Time_Elapsed': 1.1},
'result': True,

View file

@ -66,7 +66,7 @@ class TestGitFSProvider(TestCase):
('git_pillar', salt.utils.gitfs.GitPillar),
('winrepo', salt.utils.gitfs.WinRepo)):
key = '{0}_provider'.format(role_name)
for provider in salt.utils.gitfs.VALID_PROVIDERS:
for provider in salt.utils.gitfs.GIT_PROVIDERS:
verify = 'verify_gitpython'
mock1 = _get_mock(verify, provider)
with patch.object(role_class, verify, mock1):