Merge branch '2015.8' into '2016.3'

Conflicts:
  - salt/minion.py
  - salt/states/user.py
This commit is contained in:
rallytime 2016-04-15 13:58:50 -06:00
commit bbec183474
14 changed files with 158 additions and 89 deletions

View file

@ -12,6 +12,19 @@ No. Salt is 100% committed to being open-source, including all of our APIs. It
is developed under the `Apache 2.0 license`_, allowing it to be used in both
open and proprietary projects.
To expand on this a little:
There is much argument over the actual definition of "open core". From our standpoint, Salt is open source because
1. It is a standalone product that that anyone is free to use.
2. It is developed in the open with contributions accepted from the community for the good of the project.
3. There are no features of Salt itself that are restricted to separate proprietary products distributed by SaltStack, Inc.
4. Because of our Apache 2.0 license, Salt can be used as the foundation for a project or even a proprietary tool.
5. Our APIs are open and documented (any lack of documentation is an oversight as opposed to an intentional decision by SaltStack the company) and available for use by anyone.
SaltStack the company does make proprietary products which use Salt and its libraries, like company is free to do, but we do so via the APIs, NOT by forking Salt and creating a different, closed-source version of it for paying customers.
.. _`Apache 2.0 license`: http://www.apache.org/licenses/LICENSE-2.0.html
I think I found a bug! What should I do?

View file

@ -1456,6 +1456,36 @@ information can be found in the :ref:`GitFS Walkthrough
- v1.*
- 'mybranch\d+'
.. conf_master:: gitfs_global_lock
``gitfs_global_lock``
*********************
.. versionadded:: 2015.8.9
Default: ``True``
When set to ``False``, if there is an update lock for a gitfs remote and the
pid written to it is not running on the master, the lock file will be
automatically cleared and a new lock will be obtained. When set to ``True``,
Salt will simply log a warning when there is an update lock present.
On single-master deployments, disabling this option can help automatically deal
with instances where the master was shutdown/restarted during the middle of a
gitfs update, leaving a update lock in place.
However, on multi-master deployments with the gitfs cachedir shared via
`GlusterFS`__, nfs, or another network filesystem, it is strongly recommended
not to disable this option as doing so will cause lock files to be removed if
they were created by a different master.
.. code-block:: yaml
# Disable global lock
gitfs_global_lock: False
.. __: http://www.gluster.org/
GitFS Authentication Options
****************************
@ -2293,6 +2323,37 @@ SSH-based transport (if available) may be a better option.
git_pillar_ssl_verify: True
.. conf_master:: git_pillar_global_lock
``git_pillar_global_lock``
**************************
.. versionadded:: 2015.8.9
Default: ``True``
When set to ``False``, if there is an update/checkout lock for a git_pillar
remote and the pid written to it is not running on the master, the lock file
will be automatically cleared and a new lock will be obtained. When set to
``True``, Salt will simply log a warning when there is an lock present.
On single-master deployments, disabling this option can help automatically deal
with instances where the master was shutdown/restarted during the middle of a
git_pillar update/checkout, leaving a lock in place.
However, on multi-master deployments with the git_pillar cachedir shared via
`GlusterFS`__, nfs, or another network filesystem, it is strongly recommended
not to disable this option as doing so will cause lock files to be removed if
they were created by a different master.
.. code-block:: yaml
# Disable global lock
git_pillar_global_lock: False
.. __: http://www.gluster.org/
Git External Pillar Authentication Options
******************************************

View file

@ -123,6 +123,16 @@ instructions managed by one master will not agree with other masters.
The recommended way to sync these is to use a fileserver backend like gitfs or
to keep these files on shared storage.
.. important::
If using gitfs/git_pillar with the cachedir shared between masters using
`GlusterFS`_, nfs, or another network filesystem, and the masters are
running Salt 2015.5.9 or later, it is strongly recommended not to turn off
:conf_master:`gitfs_global_lock`/:conf_master:`git_pillar_global_lock` as
doing so will cause lock files to be removed if they were created by a
different master.
.. _GlusterFS: http://www.gluster.org/
Pillar_Roots
````````````

View file

@ -44,6 +44,7 @@ def main(argv):
display_help()
f = open( target, 'rb' ).read()
f = f.replace( search, replace )
f = f.replace( search.lower(), replace )
open( target, 'wb' ).write(f)
if __name__ == "__main__":

View file

@ -465,6 +465,7 @@ VALID_OPTS = {
'git_pillar_env': str,
'git_pillar_root': str,
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': str,
'git_pillar_password': str,
'git_pillar_insecure_auth': bool,
@ -484,6 +485,7 @@ VALID_OPTS = {
'gitfs_env_whitelist': list,
'gitfs_env_blacklist': list,
'gitfs_ssl_verify': bool,
'gitfs_global_lock': bool,
'hgfs_remotes': list,
'hgfs_mountpoint': str,
'hgfs_root': str,
@ -887,6 +889,7 @@ DEFAULT_MINION_OPTS = {
'git_pillar_env': '',
'git_pillar_root': '',
'git_pillar_ssl_verify': False,
'git_pillar_global_lock': True,
'git_pillar_user': '',
'git_pillar_password': '',
'git_pillar_insecure_auth': False,
@ -905,6 +908,7 @@ DEFAULT_MINION_OPTS = {
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'gitfs_global_lock': True,
'gitfs_ssl_verify': False,
'hash_type': 'md5',
'disable_modules': [],
@ -1074,6 +1078,7 @@ DEFAULT_MASTER_OPTS = {
'git_pillar_env': '',
'git_pillar_root': '',
'git_pillar_ssl_verify': False,
'git_pillar_global_lock': True,
'git_pillar_user': '',
'git_pillar_password': '',
'git_pillar_insecure_auth': False,
@ -1092,6 +1097,7 @@ DEFAULT_MASTER_OPTS = {
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'gitfs_global_lock': True,
'gitfs_ssl_verify': False,
'hgfs_remotes': [],
'hgfs_mountpoint': '',

View file

@ -460,10 +460,11 @@ class AsyncAuth(object):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts,
crypt='clear',
io_loop=self.io_loop)
error = None
while True:
try:
creds = yield self.sign_in(channel=channel)
except SaltClientError:
except SaltClientError as error:
break
if creds == 'retry':
if self.opts.get('caller'):
@ -483,9 +484,9 @@ class AsyncAuth(object):
del AsyncAuth.creds_map[self.__key(self.opts)]
except KeyError:
pass
self._authenticate_future.set_exception(
SaltClientError('Attempt to authenticate with the salt master failed')
)
if not error:
error = SaltClientError('Attempt to authenticate with the salt master failed')
self._authenticate_future.set_exception(error)
else:
AsyncAuth.creds_map[self.__key(self.opts)] = creds
self._creds = creds
@ -1057,7 +1058,7 @@ class SAuth(AsyncAuth):
if safe:
log.warning('SaltReqTimeoutError: {0}'.format(e))
return 'retry'
raise SaltClientError('Attempt to authenticate with the salt master failed')
raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error')
if 'load' in payload:
if 'ret' in payload['load']:

View file

@ -15,14 +15,6 @@ framer jobcleaner be active first setup
frame fsclean
enter
do salt raet maint fileserver clean
go clearfslocks
frame clearfslocks
enter
do salt raet maint fileserver clear locks
go cleargitpillarlocks
frame cleargitpillarlocks
enter
do salt raet maint git pillar clear locks
go start
frame start
do salt raet maint old jobs clear

View file

@ -111,40 +111,6 @@ class SaltRaetMaintFileserverClean(ioflo.base.deeding.Deed):
salt.daemons.masterapi.clean_fsbackend(self.opts.value)
class SaltRaetMaintFileserverClearLocks(ioflo.base.deeding.Deed):
'''
Clear the fileserver backend caches
FloScript:
do salt raet maint fileserver clear locks at enter
'''
Ioinits = {'opts': '.salt.opts'}
def action(self):
'''
Clean!
'''
salt.daemons.masterapi.clear_fsbackend_locks(self.opts.value)
class SaltRaetMaintGitPillarClearLocks(ioflo.base.deeding.Deed):
'''
Clear the fileserver backend caches
FloScript:
do salt raet maint git pillar clear locks at enter
'''
Ioinits = {'opts': '.salt.opts'}
def action(self):
'''
Clean!
'''
salt.daemons.masterapi.clear_git_pillar_locks(self.opts.value)
class SaltRaetMaintOldJobsClear(ioflo.base.deeding.Deed):
'''
Iterate over the jobs directory and clean out the old jobs

View file

@ -30,7 +30,6 @@ import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.gitfs
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
@ -138,36 +137,6 @@ def clean_fsbackend(opts):
)
def clear_fsbackend_locks(opts):
'''
Clear any locks from configured backends
'''
for back_name in ('git', 'hg', 'svn'):
if back_name in opts['fileserver_backend']:
full_name = back_name + 'fs'
backend = getattr(salt.fileserver, full_name, None)
if backend is None:
log.warning('Unable to access %s backend', full_name)
continue
backend.__opts__ = opts
backend.clear_lock()
def clear_git_pillar_locks(opts):
'''
Clear any update/checkout locks present in git_pillar remotes
'''
for ext_pillar in opts.get('ext_pillar', []):
pillar_type = next(iter(ext_pillar))
if pillar_type == 'git' and isinstance(ext_pillar[pillar_type], list):
pillar = salt.utils.gitfs.GitPillar(opts)
pillar.init_remotes(ext_pillar[pillar_type],
git_pillar.PER_REMOTE_OVERRIDES)
for lock_type in ('update', 'checkout'):
for remote in pillar.remotes:
remote.clear_lock(lock_type=lock_type)
def clean_expired_tokens(opts):
'''
Clean expired tokens from the master

View file

@ -226,10 +226,6 @@ class Maintenance(SignalHandlingMultiprocessingProcess):
last = int(time.time())
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
# Clear any locks set for the active fileserver backends
salt.daemons.masterapi.clear_fsbackend_locks(self.opts)
# Clear any locks set for git_pillar
salt.daemons.masterapi.clear_git_pillar_locks(self.opts)
# Clean out pub auth
salt.daemons.masterapi.clean_pub_auth(self.opts)

View file

@ -82,7 +82,7 @@ def __virtual__():
'Fedora >= 15 uses systemd, will not load rh_service.py '
'as virtual \'service\''
)
if __grains__['os'] in ('RedHat', 'CentOS', 'ScientificLinux', 'OEL'):
if __grains__['os'] in ('RedHat', 'CentOS', 'ScientificLinux', 'OEL', 'CloudLinux'):
if osrelease_major >= 7:
return (
False,

View file

@ -13,7 +13,7 @@ these states. Here is some example SLS:
- humanname: CentOS-$releasever - Base
- mirrorlist: http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
- comments:
- '#http://mirror.centos.org/centos/$releasever/os/$basearch/'
- 'http://mirror.centos.org/centos/$releasever/os/$basearch/'
- gpgcheck: 1
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6

View file

@ -537,7 +537,10 @@ def present(name,
if 'shadow.info' in __salt__:
for key in spost:
if lshad[key] != spost[key]:
ret['changes'][key] = spost[key]
if key == 'passwd':
ret['changes'][key] = 'XXX-REDACTED-XXX'
else:
ret['changes'][key] = spost[key]
if __grains__['kernel'] in ('OpenBSD', 'FreeBSD') and lcpost != lcpre:
ret['changes']['loginclass'] = lcpost
if ret['changes']:

View file

@ -59,6 +59,7 @@ import salt.utils
import salt.utils.itertools
import salt.utils.url
import salt.fileserver
from salt.utils.process import os_is_running as pid_exists
from salt.exceptions import FileserverConfigError, GitLockError
from salt.utils.event import tagify
@ -404,12 +405,62 @@ class GitProvider(object):
os.O_CREAT | os.O_EXCL | os.O_WRONLY)
with os.fdopen(fh_, 'w'):
# Write the lock file and close the filehandle
pass
os.write(fh_, str(os.getpid()))
except (OSError, IOError) as exc:
if exc.errno == errno.EEXIST:
if failhard:
raise
return None
with salt.utils.fopen(self._get_lock_file(lock_type), 'r') as fd_:
try:
pid = int(fd_.readline().rstrip())
except ValueError:
# Lock file is empty, set pid to 0 so it evaluates as
# False.
pid = 0
#if self.opts.get("gitfs_global_lock") or pid and pid_exists(int(pid)):
global_lock_key = self.role + '_global_lock'
lock_file = self._get_lock_file(lock_type=lock_type)
if self.opts[global_lock_key]:
msg = (
'{0} is enabled and {1} lockfile {2} is present for '
'{3} remote \'{4}\'.'.format(
global_lock_key,
lock_type,
lock_file,
self.role,
self.id,
)
)
if pid:
msg += ' Process {0} obtained the lock'.format(pid)
if not pid_exists(pid):
msg += (' but this process is not running. The '
'update may have been interrupted. If '
'using multi-master with shared gitfs '
'cache, the lock may have been obtained '
'by another master.')
log.warning(msg)
if failhard:
raise
return
elif pid and pid_exists(pid):
log.warning('Process %d has a %s %s lock (%s)',
pid, self.role, lock_type, lock_file)
if failhard:
raise
return
else:
if pid:
log.warning(
'Process %d has a %s %s lock (%s), but this '
'process is not running. Cleaning up lock file.',
pid, self.role, lock_type, lock_file
)
success, fail = self.clear_lock()
if success:
return self._lock(lock_type='update',
failhard=failhard)
elif failhard:
raise
return
else:
msg = 'Unable to set {0} lock for {1} ({2}): {3} '.format(
lock_type,