mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Use one salt.utils.gitfs.GitFS instance per thread
This reduces some of the overhead caused by many concurrent fileclient requests from minions. Additionally, initializing remotes has been folded into the GitBase class' dunder init. Instantiating an instance and initializing its remotes were initially split so that an object could simply be created with the master opts so that the configuration was loaded and nothing else, allowing for certain cases (like clearing the cache files) where we didn't need to actually initialize the remotes. But this both A) presents a problem when the instance being used is a singleton, as you don't want to be re-initializing the remotes all the time, and B) suppressing initialization can be (and is now being) done via a new argument to the dunder init.
This commit is contained in:
parent
78e46b3a68
commit
3e96225210
11 changed files with 227 additions and 144 deletions
|
@ -69,12 +69,11 @@ def init_git_pillar(opts):
|
|||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=git_pillar.PER_REMOTE_ONLY)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
|
|
|
@ -71,6 +71,15 @@ log = logging.getLogger(__name__)
|
|||
__virtualname__ = 'git'
|
||||
|
||||
|
||||
def _gitfs(init_remotes=True):
|
||||
return salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
init_remotes=init_remotes)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the desired provider module is present and gitfs is enabled
|
||||
|
@ -79,7 +88,7 @@ def __virtual__():
|
|||
if __virtualname__ not in __opts__['fileserver_backend']:
|
||||
return False
|
||||
try:
|
||||
salt.utils.gitfs.GitFS(__opts__)
|
||||
_gitfs(init_remotes=False)
|
||||
# Initialization of the GitFS object did not fail, so we know we have
|
||||
# valid configuration syntax and that a valid provider was detected.
|
||||
return __virtualname__
|
||||
|
@ -92,18 +101,14 @@ def clear_cache():
|
|||
'''
|
||||
Completely clear gitfs cache
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
return gitfs.clear_cache()
|
||||
return _gitfs(init_remotes=False).clear_cache()
|
||||
|
||||
|
||||
def clear_lock(remote=None, lock_type='update'):
|
||||
'''
|
||||
Clear update.lk
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.clear_lock(remote=remote, lock_type=lock_type)
|
||||
return _gitfs().clear_lock(remote=remote, lock_type=lock_type)
|
||||
|
||||
|
||||
def lock(remote=None):
|
||||
|
@ -114,30 +119,21 @@ def lock(remote=None):
|
|||
information, or a pattern. If the latter, then remotes for which the URL
|
||||
matches the pattern will be locked.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.lock(remote=remote)
|
||||
return _gitfs().lock(remote=remote)
|
||||
|
||||
|
||||
def update():
|
||||
'''
|
||||
Execute a git fetch on all of the repos
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
gitfs.update()
|
||||
_gitfs().update()
|
||||
|
||||
|
||||
def envs(ignore_cache=False):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.envs(ignore_cache=ignore_cache)
|
||||
return _gitfs().envs(ignore_cache=ignore_cache)
|
||||
|
||||
|
||||
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
|
@ -145,10 +141,7 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
|||
Find the first file to match the path and ref, read the file out of git
|
||||
and send the path to the newly cached file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
return _gitfs().find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
|
||||
|
||||
def init():
|
||||
|
@ -156,29 +149,21 @@ def init():
|
|||
Initialize remotes. This is only used by the master's pre-flight checks,
|
||||
and is not invoked by GitFS.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
_gitfs()
|
||||
|
||||
|
||||
def serve_file(load, fnd):
|
||||
'''
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.serve_file(load, fnd)
|
||||
return _gitfs().serve_file(load, fnd)
|
||||
|
||||
|
||||
def file_hash(load, fnd):
|
||||
'''
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_hash(load, fnd)
|
||||
return _gitfs().file_hash(load, fnd)
|
||||
|
||||
|
||||
def file_list(load):
|
||||
|
@ -186,10 +171,7 @@ def file_list(load):
|
|||
Return a list of all files on the file server in a specified
|
||||
environment (specified as a key within the load dict).
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_list(load)
|
||||
return _gitfs().file_list(load)
|
||||
|
||||
|
||||
def file_list_emptydirs(load): # pylint: disable=W0613
|
||||
|
@ -204,17 +186,11 @@ def dir_list(load):
|
|||
'''
|
||||
Return a list of all directories on the master
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.dir_list(load)
|
||||
return _gitfs().dir_list(load)
|
||||
|
||||
|
||||
def symlink_list(load):
|
||||
'''
|
||||
Return a dict of all symlinks based on a given path in the repo
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.symlink_list(load)
|
||||
return _gitfs().symlink_list(load)
|
||||
|
|
|
@ -487,11 +487,11 @@ class Master(SMaster):
|
|||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
new_opts,
|
||||
repo[u'git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
|
|
|
@ -891,11 +891,11 @@ class Pillar(object):
|
|||
# Avoid circular import
|
||||
import salt.utils.gitfs
|
||||
import salt.pillar.git_pillar
|
||||
git_pillar = salt.utils.gitfs.GitPillar(self.opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
self.opts,
|
||||
self.ext['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_pillar.fetch_remotes()
|
||||
except TypeError:
|
||||
# Handle malformed ext_pillar
|
||||
|
|
|
@ -348,12 +348,6 @@ from salt.ext import six
|
|||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.pillar.git_pillar.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -371,7 +365,7 @@ def __virtual__():
|
|||
return False
|
||||
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
salt.utils.gitfs.GitPillar(__opts__, init_remotes=False)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
|
@ -387,8 +381,11 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument
|
|||
opts = copy.deepcopy(__opts__)
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
git_pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
repos,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
|
|
|
@ -328,11 +328,14 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
salt.utils.args.invalid_kwargs(kwargs)
|
||||
|
||||
if role == 'gitfs':
|
||||
git_objects = [salt.utils.gitfs.GitFS(__opts__)]
|
||||
git_objects[0].init_remotes(
|
||||
__opts__['gitfs_remotes'],
|
||||
salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
salt.fileserver.gitfs.PER_REMOTE_ONLY)
|
||||
git_objects = [
|
||||
salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY
|
||||
)
|
||||
]
|
||||
elif role == 'git_pillar':
|
||||
git_objects = []
|
||||
for ext_pillar in __opts__['ext_pillar']:
|
||||
|
@ -340,11 +343,11 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
if key == 'git':
|
||||
if not isinstance(ext_pillar['git'], list):
|
||||
continue
|
||||
obj = salt.utils.gitfs.GitPillar(__opts__)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
ext_pillar['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_objects.append(obj)
|
||||
elif role == 'winrepo':
|
||||
winrepo_dir = __opts__['winrepo_dir']
|
||||
|
@ -355,11 +358,12 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
(winrepo_remotes, winrepo_dir),
|
||||
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng'])
|
||||
):
|
||||
obj = salt.utils.gitfs.WinRepo(__opts__, base_dir)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.WinRepo(
|
||||
__opts__,
|
||||
remotes,
|
||||
salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
salt.runners.winrepo.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
git_objects.append(obj)
|
||||
else:
|
||||
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
|
||||
|
|
|
@ -66,10 +66,11 @@ def update(branch=None, repo=None):
|
|||
if pillar_type != 'git':
|
||||
continue
|
||||
pillar_conf = ext_pillar[pillar_type]
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
pillar_conf,
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
|
|
|
@ -32,7 +32,7 @@ log = logging.getLogger(__name__)
|
|||
PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.GitBase.__init__ will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.runners.winrepo.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
@ -216,9 +216,12 @@ def update_git_repos(opts=None, clean=False, masterless=False):
|
|||
else:
|
||||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
|
||||
winrepo.init_remotes(
|
||||
remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
winrepo = salt.utils.gitfs.WinRepo(
|
||||
opts,
|
||||
remotes,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
winrepo.fetch_remotes()
|
||||
# Since we're not running update(), we need to manually call
|
||||
# clear_old_remotes() to remove directories from remotes that
|
||||
|
|
|
@ -17,6 +17,8 @@ import shutil
|
|||
import stat
|
||||
import subprocess
|
||||
import time
|
||||
import tornado.ioloop
|
||||
import weakref
|
||||
from datetime import datetime
|
||||
|
||||
# Import salt libs
|
||||
|
@ -1923,12 +1925,47 @@ class GitBase(object):
|
|||
'''
|
||||
Base class for gitfs/git_pillar
|
||||
'''
|
||||
def __init__(self, opts, git_providers=None, cache_root=None):
|
||||
def __init__(self, opts, remotes=None, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
'''
|
||||
IMPORTANT: If specifying a cache_root, understand that this is also
|
||||
where the remotes will be cloned. A non-default cache_root is only
|
||||
really designed right now for winrepo, as its repos need to be checked
|
||||
out into the winrepo locations and not within the cachedir.
|
||||
|
||||
As of the Oxygen release cycle, the classes used to interface with
|
||||
Pygit2 and GitPython can be overridden by passing the git_providers
|
||||
argument when spawning a class instance. This allows for one to write
|
||||
classes which inherit from salt.utils.gitfs.Pygit2 or
|
||||
salt.utils.gitfs.GitPython, and then direct one of the GitBase
|
||||
subclasses (GitFS, GitPillar, WinRepo) to use the custom class. For
|
||||
example:
|
||||
|
||||
.. code-block:: Python
|
||||
|
||||
import salt.utils.gitfs
|
||||
from salt.fileserver.gitfs import PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY
|
||||
|
||||
class CustomPygit2(salt.utils.gitfs.Pygit2):
|
||||
def fetch_remotes(self):
|
||||
...
|
||||
Alternate fetch behavior here
|
||||
...
|
||||
|
||||
git_providers = {
|
||||
'pygit2': CustomPygit2,
|
||||
'gitpython': salt.utils.gitfs.GitPython,
|
||||
}
|
||||
|
||||
gitfs = salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
git_providers=git_providers)
|
||||
|
||||
gitfs.fetch_remotes()
|
||||
'''
|
||||
self.opts = opts
|
||||
self.git_providers = git_providers if git_providers is not None \
|
||||
|
@ -1944,8 +1981,13 @@ class GitBase(object):
|
|||
self.hash_cachedir = salt.utils.path.join(self.cache_root, 'hash')
|
||||
self.file_list_cachedir = salt.utils.path.join(
|
||||
self.opts['cachedir'], 'file_lists', self.role)
|
||||
if init_remotes:
|
||||
self.init_remotes(
|
||||
remotes if remotes is not None else [],
|
||||
per_remote_overrides,
|
||||
per_remote_only)
|
||||
|
||||
def init_remotes(self, remotes, per_remote_overrides,
|
||||
def init_remotes(self, remotes, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY):
|
||||
'''
|
||||
Initialize remotes
|
||||
|
@ -2469,9 +2511,51 @@ class GitFS(GitBase):
|
|||
'''
|
||||
Functionality specific to the git fileserver backend
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.role = 'gitfs'
|
||||
super(GitFS, self).__init__(opts)
|
||||
role = 'gitfs'
|
||||
instance_map = weakref.WeakKeyDictionary()
|
||||
|
||||
def __new__(cls, opts, remotes=None, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
'''
|
||||
If we are not initializing remotes (such as in cases where we just want
|
||||
to load the config so that we can run clear_cache), then just return a
|
||||
new __init__'ed object. Otherwise, check the instance map and re-use an
|
||||
instance if one exists for the current process. Weak references are
|
||||
used to ensure that we garbage collect instances for threads which have
|
||||
exited.
|
||||
'''
|
||||
# No need to get the ioloop reference if we're not initializing remotes
|
||||
io_loop = tornado.ioloop.IOLoop.current() if init_remotes else None
|
||||
if not init_remotes or io_loop not in cls.instance_map:
|
||||
# We only evaluate the second condition in this if statement if
|
||||
# we're initializing remotes, so we won't get here unless io_loop
|
||||
# is something other than None.
|
||||
obj = object.__new__(cls)
|
||||
super(GitFS, obj).__init__(
|
||||
opts,
|
||||
remotes if remotes is not None else [],
|
||||
per_remote_overrides=per_remote_overrides,
|
||||
per_remote_only=per_remote_only,
|
||||
git_providers=git_providers if git_providers is not None
|
||||
else GIT_PROVIDERS,
|
||||
cache_root=cache_root,
|
||||
init_remotes=init_remotes)
|
||||
if not init_remotes:
|
||||
log.debug('Created gitfs object with uninitialized remotes')
|
||||
else:
|
||||
log.debug('Created gitfs object for process %s', os.getpid())
|
||||
# Add to the instance map so we can re-use later
|
||||
cls.instance_map[io_loop] = obj
|
||||
return obj
|
||||
log.debug('Re-using gitfs object for process %s', os.getpid())
|
||||
return cls.instance_map[io_loop]
|
||||
|
||||
def __init__(self, opts, remotes, per_remote_overrides=(), # pylint: disable=super-init-not-called
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
# Initialization happens above in __new__(), so don't do anything here
|
||||
pass
|
||||
|
||||
def dir_list(self, load):
|
||||
'''
|
||||
|
@ -2753,9 +2837,7 @@ class GitPillar(GitBase):
|
|||
'''
|
||||
Functionality specific to the git external pillar
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.role = 'git_pillar'
|
||||
super(GitPillar, self).__init__(opts)
|
||||
role = 'git_pillar'
|
||||
|
||||
def checkout(self):
|
||||
'''
|
||||
|
@ -2843,9 +2925,7 @@ class WinRepo(GitBase):
|
|||
'''
|
||||
Functionality specific to the winrepo runner
|
||||
'''
|
||||
def __init__(self, opts, winrepo_dir):
|
||||
self.role = 'winrepo'
|
||||
super(WinRepo, self).__init__(opts, cache_root=winrepo_dir)
|
||||
role = 'winrepo'
|
||||
|
||||
def checkout(self):
|
||||
'''
|
||||
|
|
|
@ -5,10 +5,12 @@
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import textwrap
|
||||
import tornado.ioloop
|
||||
import logging
|
||||
import stat
|
||||
try:
|
||||
|
@ -40,18 +42,26 @@ import salt.utils.win_functions
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TMP_SOCK_DIR = tempfile.mkdtemp(dir=TMP)
|
||||
TMP_REPO_DIR = os.path.join(TMP, 'gitfs_root')
|
||||
INTEGRATION_BASE_FILES = os.path.join(FILES, 'file', 'base')
|
||||
|
||||
|
||||
def _rmtree_error(func, path, excinfo):
|
||||
os.chmod(path, stat.S_IWRITE)
|
||||
func(path)
|
||||
|
||||
|
||||
@skipIf(not HAS_GITPYTHON, 'GitPython is not installed')
|
||||
class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
def setup_loader_modules(self):
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP)
|
||||
return {
|
||||
gitfs: {
|
||||
'__opts__': {
|
||||
'cachedir': self.tmp_cachedir,
|
||||
'sock_dir': self.tmp_sock_dir,
|
||||
'sock_dir': TMP_SOCK_DIR,
|
||||
'gitfs_root': 'salt',
|
||||
'fileserver_backend': ['git'],
|
||||
'gitfs_base': 'master',
|
||||
|
@ -81,9 +91,17 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
|||
}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Clear the instance map so that we make sure to create a new instance
|
||||
# for this test class.
|
||||
try:
|
||||
del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmp_cachedir)
|
||||
shutil.rmtree(self.tmp_sock_dir)
|
||||
|
||||
def test_per_saltenv_config(self):
|
||||
opts_override = textwrap.dedent('''
|
||||
|
@ -109,10 +127,11 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
|||
- mountpoint: abc
|
||||
''')
|
||||
with patch.dict(gitfs.__opts__, yaml.safe_load(opts_override)):
|
||||
git_fs = salt.utils.gitfs.GitFS(gitfs.__opts__)
|
||||
git_fs.init_remotes(
|
||||
git_fs = salt.utils.gitfs.GitFS(
|
||||
gitfs.__opts__,
|
||||
gitfs.__opts__['gitfs_remotes'],
|
||||
gitfs.PER_REMOTE_OVERRIDES, gitfs.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=gitfs.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=gitfs.PER_REMOTE_ONLY)
|
||||
|
||||
# repo1 (branch: foo)
|
||||
# The mountpoint should take the default (from gitfs_mountpoint), while
|
||||
|
@ -169,14 +188,12 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
|||
|
||||
def setup_loader_modules(self):
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_repo_dir = os.path.join(TMP, 'gitfs_root')
|
||||
return {
|
||||
gitfs: {
|
||||
'__opts__': {
|
||||
'cachedir': self.tmp_cachedir,
|
||||
'sock_dir': self.tmp_sock_dir,
|
||||
'gitfs_remotes': ['file://' + self.tmp_repo_dir],
|
||||
'sock_dir': TMP_SOCK_DIR,
|
||||
'gitfs_remotes': ['file://' + TMP_REPO_DIR],
|
||||
'gitfs_root': '',
|
||||
'fileserver_backend': ['git'],
|
||||
'gitfs_base': 'master',
|
||||
|
@ -206,26 +223,26 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
|||
}
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
We don't want to check in another .git dir into GH because that just gets messy.
|
||||
Instead, we'll create a temporary repo on the fly for the tests to examine.
|
||||
'''
|
||||
if not gitfs.__virtual__():
|
||||
self.skipTest("GitFS could not be loaded. Skipping GitFS tests!")
|
||||
self.integration_base_files = os.path.join(FILES, 'file', 'base')
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Clear the instance map so that we make sure to create a new instance
|
||||
# for this test class.
|
||||
try:
|
||||
del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Create the dir if it doesn't already exist
|
||||
try:
|
||||
shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/')
|
||||
shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/')
|
||||
except OSError:
|
||||
# We probably caught an error because files already exist. Ignore
|
||||
pass
|
||||
|
||||
try:
|
||||
repo = git.Repo(self.tmp_repo_dir)
|
||||
repo = git.Repo(TMP_REPO_DIR)
|
||||
except git.exc.InvalidGitRepositoryError:
|
||||
repo = git.Repo.init(self.tmp_repo_dir)
|
||||
repo = git.Repo.init(TMP_REPO_DIR)
|
||||
|
||||
if 'USERNAME' not in os.environ:
|
||||
try:
|
||||
|
@ -238,9 +255,19 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
|||
'\'root\'.')
|
||||
os.environ['USERNAME'] = 'root'
|
||||
|
||||
repo.index.add([x for x in os.listdir(self.tmp_repo_dir)
|
||||
repo.index.add([x for x in os.listdir(TMP_REPO_DIR)
|
||||
if x != '.git'])
|
||||
repo.index.commit('Test')
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
We don't want to check in another .git dir into GH because that just
|
||||
gets messy. Instead, we'll create a temporary repo on the fly for the
|
||||
tests to examine.
|
||||
'''
|
||||
if not gitfs.__virtual__():
|
||||
self.skipTest("GitFS could not be loaded. Skipping GitFS tests!")
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
gitfs.update()
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -248,17 +275,11 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
|||
Remove the temporary git repository and gitfs cache directory to ensure
|
||||
a clean environment for each test.
|
||||
'''
|
||||
shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error)
|
||||
shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error)
|
||||
shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error)
|
||||
del self.tmp_repo_dir
|
||||
del self.tmp_cachedir
|
||||
del self.tmp_sock_dir
|
||||
del self.integration_base_files
|
||||
|
||||
def _rmtree_error(self, func, path, excinfo):
|
||||
os.chmod(path, stat.S_IWRITE)
|
||||
func(path)
|
||||
try:
|
||||
shutil.rmtree(self.tmp_cachedir, onerror=_rmtree_error)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
def test_file_list(self):
|
||||
ret = gitfs.file_list(LOAD)
|
||||
|
|
|
@ -37,18 +37,19 @@ class TestGitFSProvider(TestCase):
|
|||
MagicMock(return_value=True)):
|
||||
with patch.object(role_class, 'verify_pygit2',
|
||||
MagicMock(return_value=False)):
|
||||
args = [OPTS]
|
||||
args = [OPTS, {}]
|
||||
kwargs = {'init_remotes': False}
|
||||
if role_name == 'winrepo':
|
||||
args.append('/tmp/winrepo-dir')
|
||||
kwargs['cache_root'] = '/tmp/winrepo-dir'
|
||||
with patch.dict(OPTS, {key: provider}):
|
||||
# Try to create an instance with uppercase letters in
|
||||
# provider name. If it fails then a
|
||||
# FileserverConfigError will be raised, so no assert is
|
||||
# necessary.
|
||||
role_class(*args)
|
||||
# Now try to instantiate an instance with all lowercase
|
||||
# letters. Again, no need for an assert here.
|
||||
role_class(*args)
|
||||
role_class(*args, **kwargs)
|
||||
# Now try to instantiate an instance with all lowercase
|
||||
# letters. Again, no need for an assert here.
|
||||
role_class(*args, **kwargs)
|
||||
|
||||
def test_valid_provider(self):
|
||||
'''
|
||||
|
@ -73,12 +74,13 @@ class TestGitFSProvider(TestCase):
|
|||
verify = 'verify_pygit2'
|
||||
mock2 = _get_mock(verify, provider)
|
||||
with patch.object(role_class, verify, mock2):
|
||||
args = [OPTS]
|
||||
args = [OPTS, {}]
|
||||
kwargs = {'init_remotes': False}
|
||||
if role_name == 'winrepo':
|
||||
args.append('/tmp/winrepo-dir')
|
||||
kwargs['cache_root'] = '/tmp/winrepo-dir'
|
||||
|
||||
with patch.dict(OPTS, {key: provider}):
|
||||
role_class(*args)
|
||||
role_class(*args, **kwargs)
|
||||
|
||||
with patch.dict(OPTS, {key: 'foo'}):
|
||||
# Set the provider name to a known invalid provider
|
||||
|
@ -86,5 +88,5 @@ class TestGitFSProvider(TestCase):
|
|||
self.assertRaises(
|
||||
FileserverConfigError,
|
||||
role_class,
|
||||
*args
|
||||
)
|
||||
*args,
|
||||
**kwargs)
|
||||
|
|
Loading…
Add table
Reference in a new issue