mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '3006.x' into test_fix
This commit is contained in:
commit
e6bba0ddba
11 changed files with 979 additions and 253 deletions
1
changelog/65816.fixed.md
Normal file
1
changelog/65816.fixed.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix for GitFS failure to unlock lock file, and resource cleanup for process SIGTERM
|
|
@ -39,6 +39,7 @@ import salt.utils.pkg.rpm
|
||||||
import salt.utils.platform
|
import salt.utils.platform
|
||||||
import salt.utils.stringutils
|
import salt.utils.stringutils
|
||||||
from salt.utils.network import _clear_interfaces, _get_interfaces
|
from salt.utils.network import _clear_interfaces, _get_interfaces
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
from salt.utils.platform import linux_distribution as _linux_distribution
|
from salt.utils.platform import linux_distribution as _linux_distribution
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -3048,13 +3049,7 @@ def get_machine_id():
|
||||||
if platform.system() == "AIX":
|
if platform.system() == "AIX":
|
||||||
return _aix_get_machine_id()
|
return _aix_get_machine_id()
|
||||||
|
|
||||||
locations = ["/etc/machine-id", "/var/lib/dbus/machine-id"]
|
return _get_machine_identifier()
|
||||||
existing_locations = [loc for loc in locations if os.path.exists(loc)]
|
|
||||||
if not existing_locations:
|
|
||||||
return {}
|
|
||||||
else:
|
|
||||||
with salt.utils.files.fopen(existing_locations[0]) as machineid:
|
|
||||||
return {"machine_id": machineid.read().strip()}
|
|
||||||
|
|
||||||
|
|
||||||
def cwd():
|
def cwd():
|
||||||
|
|
|
@ -381,7 +381,8 @@ def fopen(*args, **kwargs):
|
||||||
# Workaround callers with bad buffering setting for binary files
|
# Workaround callers with bad buffering setting for binary files
|
||||||
if kwargs.get("buffering") == 1 and "b" in kwargs.get("mode", ""):
|
if kwargs.get("buffering") == 1 and "b" in kwargs.get("mode", ""):
|
||||||
log.debug(
|
log.debug(
|
||||||
"Line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used"
|
"Line buffering (buffering=1) isn't supported in binary mode, "
|
||||||
|
"the default buffer size will be used"
|
||||||
)
|
)
|
||||||
kwargs["buffering"] = io.DEFAULT_BUFFER_SIZE
|
kwargs["buffering"] = io.DEFAULT_BUFFER_SIZE
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ import io
|
||||||
import logging
|
import logging
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
|
import pathlib
|
||||||
import shlex
|
import shlex
|
||||||
import shutil
|
import shutil
|
||||||
import stat
|
import stat
|
||||||
|
@ -32,6 +33,7 @@ import salt.utils.hashutils
|
||||||
import salt.utils.itertools
|
import salt.utils.itertools
|
||||||
import salt.utils.path
|
import salt.utils.path
|
||||||
import salt.utils.platform
|
import salt.utils.platform
|
||||||
|
import salt.utils.process
|
||||||
import salt.utils.stringutils
|
import salt.utils.stringutils
|
||||||
import salt.utils.url
|
import salt.utils.url
|
||||||
import salt.utils.user
|
import salt.utils.user
|
||||||
|
@ -41,7 +43,7 @@ from salt.config import DEFAULT_MASTER_OPTS as _DEFAULT_MASTER_OPTS
|
||||||
from salt.exceptions import FileserverConfigError, GitLockError, get_error_message
|
from salt.exceptions import FileserverConfigError, GitLockError, get_error_message
|
||||||
from salt.utils.event import tagify
|
from salt.utils.event import tagify
|
||||||
from salt.utils.odict import OrderedDict
|
from salt.utils.odict import OrderedDict
|
||||||
from salt.utils.process import os_is_running as pid_exists
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
from salt.utils.versions import Version
|
from salt.utils.versions import Version
|
||||||
|
|
||||||
VALID_REF_TYPES = _DEFAULT_MASTER_OPTS["gitfs_ref_types"]
|
VALID_REF_TYPES = _DEFAULT_MASTER_OPTS["gitfs_ref_types"]
|
||||||
|
@ -81,6 +83,14 @@ _INVALID_REPO = (
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
HAS_PSUTIL = False
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
HAS_PSUTIL = True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
# pylint: disable=import-error
|
# pylint: disable=import-error
|
||||||
try:
|
try:
|
||||||
if (
|
if (
|
||||||
|
@ -248,6 +258,11 @@ class GitProvider:
|
||||||
def _val_cb(x, y):
|
def _val_cb(x, y):
|
||||||
return str(y)
|
return str(y)
|
||||||
|
|
||||||
|
# get machine_identifier
|
||||||
|
self.mach_id = _get_machine_identifier().get(
|
||||||
|
"machine_id", "no_machine_id_available"
|
||||||
|
)
|
||||||
|
|
||||||
self.global_saltenv = salt.utils.data.repack_dictlist(
|
self.global_saltenv = salt.utils.data.repack_dictlist(
|
||||||
self.opts.get(f"{self.role}_saltenv", []),
|
self.opts.get(f"{self.role}_saltenv", []),
|
||||||
strict=True,
|
strict=True,
|
||||||
|
@ -510,6 +525,17 @@ class GitProvider:
|
||||||
os.makedirs(self._salt_working_dir)
|
os.makedirs(self._salt_working_dir)
|
||||||
self.fetch_request_check()
|
self.fetch_request_check()
|
||||||
|
|
||||||
|
if HAS_PSUTIL:
|
||||||
|
cur_pid = os.getpid()
|
||||||
|
process = psutil.Process(cur_pid)
|
||||||
|
dgm_process_dir = dir(process)
|
||||||
|
cache_dir = self.opts.get("cachedir", None)
|
||||||
|
gitfs_active = self.opts.get("gitfs_remotes", None)
|
||||||
|
if cache_dir and gitfs_active:
|
||||||
|
salt.utils.process.register_cleanup_finalize_function(
|
||||||
|
gitfs_finalize_cleanup, cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def get_cache_basehash(self):
|
def get_cache_basehash(self):
|
||||||
return self._cache_basehash
|
return self._cache_basehash
|
||||||
|
|
||||||
|
@ -751,7 +777,12 @@ class GitProvider:
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
if exc.errno == errno.ENOENT:
|
if exc.errno == errno.ENOENT:
|
||||||
# No lock file present
|
# No lock file present
|
||||||
pass
|
msg = (
|
||||||
|
f"Attempt to remove lock {self.url} for file ({lock_file}) "
|
||||||
|
f"which does not exist, exception : {exc} "
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
|
||||||
elif exc.errno == errno.EISDIR:
|
elif exc.errno == errno.EISDIR:
|
||||||
# Somehow this path is a directory. Should never happen
|
# Somehow this path is a directory. Should never happen
|
||||||
# unless some wiseguy manually creates a directory at this
|
# unless some wiseguy manually creates a directory at this
|
||||||
|
@ -763,8 +794,9 @@ class GitProvider:
|
||||||
else:
|
else:
|
||||||
_add_error(failed, exc)
|
_add_error(failed, exc)
|
||||||
else:
|
else:
|
||||||
msg = "Removed {} lock for {} remote '{}'".format(
|
msg = (
|
||||||
lock_type, self.role, self.id
|
f"Removed {lock_type} lock for {self.role} remote '{self.id}' "
|
||||||
|
f"on machine_id '{self.mach_id}'"
|
||||||
)
|
)
|
||||||
log.debug(msg)
|
log.debug(msg)
|
||||||
success.append(msg)
|
success.append(msg)
|
||||||
|
@ -903,7 +935,19 @@ class GitProvider:
|
||||||
self._get_lock_file(lock_type="update"),
|
self._get_lock_file(lock_type="update"),
|
||||||
self.role,
|
self.role,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
log.warning(
|
||||||
|
"Update lock file generated an unexpected exception for %s remote '%s', "
|
||||||
|
"The lock file %s for %s type=update operation, exception: %s .",
|
||||||
|
self.role,
|
||||||
|
self.id,
|
||||||
|
self._get_lock_file(lock_type="update"),
|
||||||
|
self.role,
|
||||||
|
str(exc),
|
||||||
|
)
|
||||||
return False
|
return False
|
||||||
|
except NotImplementedError as exc:
|
||||||
|
log.warning("fetch got NotImplementedError exception %s", exc)
|
||||||
|
|
||||||
def _lock(self, lock_type="update", failhard=False):
|
def _lock(self, lock_type="update", failhard=False):
|
||||||
"""
|
"""
|
||||||
|
@ -929,7 +973,11 @@ class GitProvider:
|
||||||
)
|
)
|
||||||
with os.fdopen(fh_, "wb"):
|
with os.fdopen(fh_, "wb"):
|
||||||
# Write the lock file and close the filehandle
|
# Write the lock file and close the filehandle
|
||||||
os.write(fh_, salt.utils.stringutils.to_bytes(str(os.getpid())))
|
os.write(
|
||||||
|
fh_,
|
||||||
|
salt.utils.stringutils.to_bytes(f"{os.getpid()}\n{self.mach_id}\n"),
|
||||||
|
)
|
||||||
|
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
if exc.errno == errno.EEXIST:
|
if exc.errno == errno.EEXIST:
|
||||||
with salt.utils.files.fopen(self._get_lock_file(lock_type), "r") as fd_:
|
with salt.utils.files.fopen(self._get_lock_file(lock_type), "r") as fd_:
|
||||||
|
@ -941,40 +989,66 @@ class GitProvider:
|
||||||
# Lock file is empty, set pid to 0 so it evaluates as
|
# Lock file is empty, set pid to 0 so it evaluates as
|
||||||
# False.
|
# False.
|
||||||
pid = 0
|
pid = 0
|
||||||
|
try:
|
||||||
|
mach_id = salt.utils.stringutils.to_unicode(
|
||||||
|
fd_.readline()
|
||||||
|
).rstrip()
|
||||||
|
except ValueError as exc:
|
||||||
|
# Lock file is empty, set machine id to 0 so it evaluates as
|
||||||
|
# False.
|
||||||
|
mach_id = 0
|
||||||
|
|
||||||
global_lock_key = self.role + "_global_lock"
|
global_lock_key = self.role + "_global_lock"
|
||||||
lock_file = self._get_lock_file(lock_type=lock_type)
|
lock_file = self._get_lock_file(lock_type=lock_type)
|
||||||
if self.opts[global_lock_key]:
|
if self.opts[global_lock_key]:
|
||||||
msg = (
|
msg = (
|
||||||
"{} is enabled and {} lockfile {} is present for "
|
f"{global_lock_key} is enabled and {lock_type} lockfile {lock_file} "
|
||||||
"{} remote '{}'.".format(
|
f"is present for {self.role} remote '{self.id}' on machine_id "
|
||||||
global_lock_key,
|
f"{self.mach_id} with pid '{pid}'."
|
||||||
lock_type,
|
|
||||||
lock_file,
|
|
||||||
self.role,
|
|
||||||
self.id,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if pid:
|
if pid:
|
||||||
msg += f" Process {pid} obtained the lock"
|
msg += f" Process {pid} obtained the lock"
|
||||||
if not pid_exists(pid):
|
if self.mach_id or mach_id:
|
||||||
msg += (
|
msg += f" for machine_id {mach_id}, current machine_id {self.mach_id}"
|
||||||
" but this process is not running. The "
|
|
||||||
"update may have been interrupted. If "
|
if not salt.utils.process.os_is_running(pid):
|
||||||
"using multi-master with shared gitfs "
|
if self.mach_id != mach_id:
|
||||||
"cache, the lock may have been obtained "
|
msg += (
|
||||||
"by another master."
|
" but this process is not running. The "
|
||||||
)
|
"update may have been interrupted. If "
|
||||||
|
"using multi-master with shared gitfs "
|
||||||
|
"cache, the lock may have been obtained "
|
||||||
|
f"by another master, with machine_id {mach_id}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
msg += (
|
||||||
|
" but this process is not running. The "
|
||||||
|
"update may have been interrupted. "
|
||||||
|
" Given this process is for the same machine"
|
||||||
|
" the lock will be reallocated to new process "
|
||||||
|
)
|
||||||
|
log.warning(msg)
|
||||||
|
success, fail = self._clear_lock()
|
||||||
|
if success:
|
||||||
|
return self.__lock(
|
||||||
|
lock_type="update", failhard=failhard
|
||||||
|
)
|
||||||
|
elif failhard:
|
||||||
|
raise
|
||||||
|
return
|
||||||
|
|
||||||
log.warning(msg)
|
log.warning(msg)
|
||||||
if failhard:
|
if failhard:
|
||||||
raise
|
raise
|
||||||
return
|
return
|
||||||
elif pid and pid_exists(pid):
|
elif pid and salt.utils.process.os_is_running(pid):
|
||||||
log.warning(
|
log.warning(
|
||||||
"Process %d has a %s %s lock (%s)",
|
"Process %d has a %s %s lock (%s) on machine_id %s",
|
||||||
pid,
|
pid,
|
||||||
self.role,
|
self.role,
|
||||||
lock_type,
|
lock_type,
|
||||||
lock_file,
|
lock_file,
|
||||||
|
self.mach_id,
|
||||||
)
|
)
|
||||||
if failhard:
|
if failhard:
|
||||||
raise
|
raise
|
||||||
|
@ -982,12 +1056,13 @@ class GitProvider:
|
||||||
else:
|
else:
|
||||||
if pid:
|
if pid:
|
||||||
log.warning(
|
log.warning(
|
||||||
"Process %d has a %s %s lock (%s), but this "
|
"Process %d has a %s %s lock (%s) on machine_id %s, but this "
|
||||||
"process is not running. Cleaning up lock file.",
|
"process is not running. Cleaning up lock file.",
|
||||||
pid,
|
pid,
|
||||||
self.role,
|
self.role,
|
||||||
lock_type,
|
lock_type,
|
||||||
lock_file,
|
lock_file,
|
||||||
|
self.mach_id,
|
||||||
)
|
)
|
||||||
success, fail = self._clear_lock()
|
success, fail = self._clear_lock()
|
||||||
if success:
|
if success:
|
||||||
|
@ -996,12 +1071,14 @@ class GitProvider:
|
||||||
raise
|
raise
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
msg = "Unable to set {} lock for {} ({}): {} ".format(
|
msg = (
|
||||||
lock_type, self.id, self._get_lock_file(lock_type), exc
|
f"Unable to set {lock_type} lock for {self.id} "
|
||||||
|
f"({self._get_lock_file(lock_type)}) on machine_id {self.mach_id}: {exc}"
|
||||||
)
|
)
|
||||||
log.error(msg, exc_info=True)
|
log.error(msg, exc_info=True)
|
||||||
raise GitLockError(exc.errno, msg)
|
raise GitLockError(exc.errno, msg)
|
||||||
msg = f"Set {lock_type} lock for {self.role} remote '{self.id}'"
|
|
||||||
|
msg = f"Set {lock_type} lock for {self.role} remote '{self.id}' on machine_id '{self.mach_id}'"
|
||||||
log.debug(msg)
|
log.debug(msg)
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
@ -1018,6 +1095,15 @@ class GitProvider:
|
||||||
try:
|
try:
|
||||||
result = self._lock(lock_type="update")
|
result = self._lock(lock_type="update")
|
||||||
except GitLockError as exc:
|
except GitLockError as exc:
|
||||||
|
log.warning(
|
||||||
|
"Update lock file generated an unexpected exception for %s remote '%s', "
|
||||||
|
"The lock file %s for %s type=update operation, exception: %s .",
|
||||||
|
self.role,
|
||||||
|
self.id,
|
||||||
|
self._get_lock_file(lock_type="update"),
|
||||||
|
self.role,
|
||||||
|
str(exc),
|
||||||
|
)
|
||||||
failed.append(exc.strerror)
|
failed.append(exc.strerror)
|
||||||
else:
|
else:
|
||||||
if result is not None:
|
if result is not None:
|
||||||
|
@ -1027,7 +1113,8 @@ class GitProvider:
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def gen_lock(self, lock_type="update", timeout=0, poll_interval=0.5):
|
def gen_lock(self, lock_type="update", timeout=0, poll_interval=0.5):
|
||||||
"""
|
"""
|
||||||
Set and automatically clear a lock
|
Set and automatically clear a lock,
|
||||||
|
should be called from a context, for example: with self.gen_lock()
|
||||||
"""
|
"""
|
||||||
if not isinstance(lock_type, str):
|
if not isinstance(lock_type, str):
|
||||||
raise GitLockError(errno.EINVAL, f"Invalid lock_type '{lock_type}'")
|
raise GitLockError(errno.EINVAL, f"Invalid lock_type '{lock_type}'")
|
||||||
|
@ -1048,17 +1135,23 @@ class GitProvider:
|
||||||
if poll_interval > timeout:
|
if poll_interval > timeout:
|
||||||
poll_interval = timeout
|
poll_interval = timeout
|
||||||
|
|
||||||
lock_set = False
|
lock_set1 = False
|
||||||
|
lock_set2 = False
|
||||||
try:
|
try:
|
||||||
time_start = time.time()
|
time_start = time.time()
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
self._lock(lock_type=lock_type, failhard=True)
|
self._lock(lock_type=lock_type, failhard=True)
|
||||||
lock_set = True
|
lock_set1 = True
|
||||||
yield
|
# docs state need to yield a single value, lock_set will do
|
||||||
|
yield lock_set1
|
||||||
|
|
||||||
# Break out of his loop once we've yielded the lock, to
|
# Break out of his loop once we've yielded the lock, to
|
||||||
# avoid continued attempts to iterate and establish lock
|
# avoid continued attempts to iterate and establish lock
|
||||||
|
# just ensuring lock_set is true (belts and braces)
|
||||||
|
lock_set2 = True
|
||||||
break
|
break
|
||||||
|
|
||||||
except (OSError, GitLockError) as exc:
|
except (OSError, GitLockError) as exc:
|
||||||
if not timeout or time.time() - time_start > timeout:
|
if not timeout or time.time() - time_start > timeout:
|
||||||
raise GitLockError(exc.errno, exc.strerror)
|
raise GitLockError(exc.errno, exc.strerror)
|
||||||
|
@ -1074,7 +1167,13 @@ class GitProvider:
|
||||||
time.sleep(poll_interval)
|
time.sleep(poll_interval)
|
||||||
continue
|
continue
|
||||||
finally:
|
finally:
|
||||||
if lock_set:
|
if lock_set1 or lock_set2:
|
||||||
|
msg = (
|
||||||
|
f"Attempting to remove '{lock_type}' lock for "
|
||||||
|
f"'{self.role}' remote '{self.id}' due to lock_set1 "
|
||||||
|
f"'{lock_set1}' or lock_set2 '{lock_set2}'"
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
self.clear_lock(lock_type=lock_type)
|
self.clear_lock(lock_type=lock_type)
|
||||||
|
|
||||||
def init_remote(self):
|
def init_remote(self):
|
||||||
|
@ -1364,9 +1463,7 @@ class GitPython(GitProvider):
|
||||||
# function.
|
# function.
|
||||||
raise GitLockError(
|
raise GitLockError(
|
||||||
exc.errno,
|
exc.errno,
|
||||||
"Checkout lock exists for {} remote '{}'".format(
|
f"Checkout lock exists for {self.role} remote '{self.id}'",
|
||||||
self.role, self.id
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
log.error(
|
log.error(
|
||||||
|
@ -1715,9 +1812,7 @@ class Pygit2(GitProvider):
|
||||||
# function.
|
# function.
|
||||||
raise GitLockError(
|
raise GitLockError(
|
||||||
exc.errno,
|
exc.errno,
|
||||||
"Checkout lock exists for {} remote '{}'".format(
|
f"Checkout lock exists for {self.role} remote '{self.id}'",
|
||||||
self.role, self.id
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
log.error(
|
log.error(
|
||||||
|
@ -2232,10 +2327,8 @@ class Pygit2(GitProvider):
|
||||||
if not self.ssl_verify:
|
if not self.ssl_verify:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
"pygit2 does not support disabling the SSL certificate "
|
"pygit2 does not support disabling the SSL certificate "
|
||||||
"check in versions prior to 0.23.2 (installed: {}). "
|
f"check in versions prior to 0.23.2 (installed: {PYGIT2_VERSION}). "
|
||||||
"Fetches for self-signed certificates will fail.".format(
|
"Fetches for self-signed certificates will fail."
|
||||||
PYGIT2_VERSION
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def verify_auth(self):
|
def verify_auth(self):
|
||||||
|
@ -2488,11 +2581,12 @@ class GitBase:
|
||||||
if self.provider in AUTH_PROVIDERS:
|
if self.provider in AUTH_PROVIDERS:
|
||||||
override_params += AUTH_PARAMS
|
override_params += AUTH_PARAMS
|
||||||
elif global_auth_params:
|
elif global_auth_params:
|
||||||
|
msg_auth_providers = "{}".format(", ".join(AUTH_PROVIDERS))
|
||||||
msg = (
|
msg = (
|
||||||
"{0} authentication was configured, but the '{1}' "
|
f"{self.role} authentication was configured, but the '{self.provider}' "
|
||||||
"{0}_provider does not support authentication. The "
|
f"{self.role}_provider does not support authentication. The "
|
||||||
"providers for which authentication is supported in {0} "
|
f"providers for which authentication is supported in {self.role} "
|
||||||
"are: {2}.".format(self.role, self.provider, ", ".join(AUTH_PROVIDERS))
|
f"are: {msg_auth_providers}."
|
||||||
)
|
)
|
||||||
if self.role == "gitfs":
|
if self.role == "gitfs":
|
||||||
msg += (
|
msg += (
|
||||||
|
@ -2664,6 +2758,7 @@ class GitBase:
|
||||||
success, failed = repo.clear_lock(lock_type=lock_type)
|
success, failed = repo.clear_lock(lock_type=lock_type)
|
||||||
cleared.extend(success)
|
cleared.extend(success)
|
||||||
errors.extend(failed)
|
errors.extend(failed)
|
||||||
|
|
||||||
return cleared, errors
|
return cleared, errors
|
||||||
|
|
||||||
def fetch_remotes(self, remotes=None):
|
def fetch_remotes(self, remotes=None):
|
||||||
|
@ -2875,15 +2970,13 @@ class GitBase:
|
||||||
errors = []
|
errors = []
|
||||||
if GITPYTHON_VERSION < GITPYTHON_MINVER:
|
if GITPYTHON_VERSION < GITPYTHON_MINVER:
|
||||||
errors.append(
|
errors.append(
|
||||||
"{} is configured, but the GitPython version is earlier than "
|
f"{self.role} is configured, but the GitPython version is earlier than "
|
||||||
"{}. Version {} detected.".format(
|
f"{GITPYTHON_MINVER}. Version {GITPYTHON_VERSION} detected."
|
||||||
self.role, GITPYTHON_MINVER, GITPYTHON_VERSION
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if not salt.utils.path.which("git"):
|
if not salt.utils.path.which("git"):
|
||||||
errors.append(
|
errors.append(
|
||||||
"The git command line utility is required when using the "
|
"The git command line utility is required when using the "
|
||||||
"'gitpython' {}_provider.".format(self.role)
|
f"'gitpython' {self.role}_provider."
|
||||||
)
|
)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
|
@ -2922,24 +3015,20 @@ class GitBase:
|
||||||
errors = []
|
errors = []
|
||||||
if PYGIT2_VERSION < PYGIT2_MINVER:
|
if PYGIT2_VERSION < PYGIT2_MINVER:
|
||||||
errors.append(
|
errors.append(
|
||||||
"{} is configured, but the pygit2 version is earlier than "
|
f"{self.role} is configured, but the pygit2 version is earlier than "
|
||||||
"{}. Version {} detected.".format(
|
f"{PYGIT2_MINVER}. Version {PYGIT2_VERSION} detected."
|
||||||
self.role, PYGIT2_MINVER, PYGIT2_VERSION
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if LIBGIT2_VERSION < LIBGIT2_MINVER:
|
if LIBGIT2_VERSION < LIBGIT2_MINVER:
|
||||||
errors.append(
|
errors.append(
|
||||||
"{} is configured, but the libgit2 version is earlier than "
|
f"{self.role} is configured, but the libgit2 version is earlier than "
|
||||||
"{}. Version {} detected.".format(
|
f"{LIBGIT2_MINVER}. Version {LIBGIT2_VERSION} detected."
|
||||||
self.role, LIBGIT2_MINVER, LIBGIT2_VERSION
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
if not getattr(pygit2, "GIT_FETCH_PRUNE", False) and not salt.utils.path.which(
|
if not getattr(pygit2, "GIT_FETCH_PRUNE", False) and not salt.utils.path.which(
|
||||||
"git"
|
"git"
|
||||||
):
|
):
|
||||||
errors.append(
|
errors.append(
|
||||||
"The git command line utility is required when using the "
|
"The git command line utility is required when using the "
|
||||||
"'pygit2' {}_provider.".format(self.role)
|
f"'pygit2' {self.role}_provider."
|
||||||
)
|
)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
|
@ -3252,10 +3341,11 @@ class GitFS(GitBase):
|
||||||
ret = {"hash_type": self.opts["hash_type"]}
|
ret = {"hash_type": self.opts["hash_type"]}
|
||||||
relpath = fnd["rel"]
|
relpath = fnd["rel"]
|
||||||
path = fnd["path"]
|
path = fnd["path"]
|
||||||
|
lc_hash_type = self.opts["hash_type"]
|
||||||
hashdest = salt.utils.path.join(
|
hashdest = salt.utils.path.join(
|
||||||
self.hash_cachedir,
|
self.hash_cachedir,
|
||||||
load["saltenv"],
|
load["saltenv"],
|
||||||
"{}.hash.{}".format(relpath, self.opts["hash_type"]),
|
f"{relpath}.hash.{lc_hash_type}",
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
with salt.utils.files.fopen(hashdest, "rb") as fp_:
|
with salt.utils.files.fopen(hashdest, "rb") as fp_:
|
||||||
|
@ -3290,13 +3380,14 @@ class GitFS(GitBase):
|
||||||
except OSError:
|
except OSError:
|
||||||
log.error("Unable to make cachedir %s", self.file_list_cachedir)
|
log.error("Unable to make cachedir %s", self.file_list_cachedir)
|
||||||
return []
|
return []
|
||||||
|
lc_path_adj = load["saltenv"].replace(os.path.sep, "_|-")
|
||||||
list_cache = salt.utils.path.join(
|
list_cache = salt.utils.path.join(
|
||||||
self.file_list_cachedir,
|
self.file_list_cachedir,
|
||||||
"{}.p".format(load["saltenv"].replace(os.path.sep, "_|-")),
|
f"{lc_path_adj}.p",
|
||||||
)
|
)
|
||||||
w_lock = salt.utils.path.join(
|
w_lock = salt.utils.path.join(
|
||||||
self.file_list_cachedir,
|
self.file_list_cachedir,
|
||||||
".{}.w".format(load["saltenv"].replace(os.path.sep, "_|-")),
|
f".{lc_path_adj}.w",
|
||||||
)
|
)
|
||||||
cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
|
cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
|
||||||
self.opts, form, list_cache, w_lock
|
self.opts, form, list_cache, w_lock
|
||||||
|
@ -3560,3 +3651,100 @@ class WinRepo(GitBase):
|
||||||
cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
|
cachedir = self.do_checkout(repo, fetch_on_fail=fetch_on_fail)
|
||||||
if cachedir is not None:
|
if cachedir is not None:
|
||||||
self.winrepo_dirs[repo.id] = cachedir
|
self.winrepo_dirs[repo.id] = cachedir
|
||||||
|
|
||||||
|
|
||||||
|
def gitfs_finalize_cleanup(cache_dir):
|
||||||
|
"""
|
||||||
|
Clean up finalize processes that used gitfs
|
||||||
|
"""
|
||||||
|
cur_pid = os.getpid()
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
|
|
||||||
|
# need to clean up any resources left around like lock files if using gitfs
|
||||||
|
# example: lockfile
|
||||||
|
# /var/cache/salt/master/gitfs/work/NlJQs6Pss_07AugikCrmqfmqEFrfPbCDBqGLBiCd3oU=/_/update.lk
|
||||||
|
# check for gitfs file locks to ensure no resource leaks
|
||||||
|
# last chance to clean up any missed unlock droppings
|
||||||
|
cache_dir = pathlib.Path(cache_dir + "/gitfs/work")
|
||||||
|
if cache_dir.exists and cache_dir.is_dir():
|
||||||
|
file_list = list(cache_dir.glob("**/*.lk"))
|
||||||
|
file_del_list = []
|
||||||
|
file_pid = 0
|
||||||
|
file_mach_id = 0
|
||||||
|
try:
|
||||||
|
for file_name in file_list:
|
||||||
|
with salt.utils.files.fopen(file_name, "r") as fd_:
|
||||||
|
try:
|
||||||
|
file_pid = int(
|
||||||
|
salt.utils.stringutils.to_unicode(fd_.readline()).rstrip()
|
||||||
|
)
|
||||||
|
except ValueError:
|
||||||
|
# Lock file is empty, set pid to 0 so it evaluates as False.
|
||||||
|
file_pid = 0
|
||||||
|
try:
|
||||||
|
file_mach_id = salt.utils.stringutils.to_unicode(
|
||||||
|
fd_.readline()
|
||||||
|
).rstrip()
|
||||||
|
except ValueError:
|
||||||
|
# Lock file is empty, set mach_id to 0 so it evaluates False.
|
||||||
|
file_mach_id = 0
|
||||||
|
|
||||||
|
if cur_pid == file_pid:
|
||||||
|
if mach_id != file_mach_id:
|
||||||
|
if not file_mach_id:
|
||||||
|
msg = (
|
||||||
|
f"gitfs lock file for pid '{file_pid}' does not "
|
||||||
|
"contain a machine id, deleting lock file which may "
|
||||||
|
"affect if using multi-master with shared gitfs cache, "
|
||||||
|
"the lock may have been obtained by another master "
|
||||||
|
"recommend updating Salt version on other masters to a "
|
||||||
|
"version which insert machine identification in lock a file."
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
file_del_list.append((file_name, file_pid, file_mach_id))
|
||||||
|
else:
|
||||||
|
file_del_list.append((file_name, file_pid, file_mach_id))
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
log.debug("gitfs lock file: %s not found", file_name)
|
||||||
|
|
||||||
|
for file_name, file_pid, file_mach_id in file_del_list:
|
||||||
|
try:
|
||||||
|
os.remove(file_name)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno == errno.ENOENT:
|
||||||
|
# No lock file present
|
||||||
|
msg = (
|
||||||
|
"SIGTERM clean up of resources attempted to remove lock "
|
||||||
|
f"file {file_name}, pid '{file_pid}', machine identifier "
|
||||||
|
f"'{mach_id}' but it did not exist, exception : {exc} "
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
|
||||||
|
elif exc.errno == errno.EISDIR:
|
||||||
|
# Somehow this path is a directory. Should never happen
|
||||||
|
# unless some wiseguy manually creates a directory at this
|
||||||
|
# path, but just in case, handle it.
|
||||||
|
try:
|
||||||
|
shutil.rmtree(file_name)
|
||||||
|
except OSError as exc:
|
||||||
|
msg = (
|
||||||
|
f"SIGTERM clean up of resources, lock file '{file_name}'"
|
||||||
|
f", pid '{file_pid}', machine identifier '{file_mach_id}'"
|
||||||
|
f"was a directory, removed directory, exception : '{exc}'"
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
else:
|
||||||
|
msg = (
|
||||||
|
"SIGTERM clean up of resources, unable to remove lock file "
|
||||||
|
f"'{file_name}', pid '{file_pid}', machine identifier "
|
||||||
|
f"'{file_mach_id}', exception : '{exc}'"
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
else:
|
||||||
|
msg = (
|
||||||
|
"SIGTERM clean up of resources, removed lock file "
|
||||||
|
f"'{file_name}', pid '{file_pid}', machine identifier "
|
||||||
|
f"'{file_mach_id}'"
|
||||||
|
)
|
||||||
|
log.debug(msg)
|
||||||
|
|
|
@ -239,3 +239,22 @@ def spawning_platform():
|
||||||
Salt, however, will force macOS to spawning by default on all python versions
|
Salt, however, will force macOS to spawning by default on all python versions
|
||||||
"""
|
"""
|
||||||
return multiprocessing.get_start_method(allow_none=False) == "spawn"
|
return multiprocessing.get_start_method(allow_none=False) == "spawn"
|
||||||
|
|
||||||
|
|
||||||
|
def get_machine_identifier():
|
||||||
|
"""
|
||||||
|
Provide the machine-id for machine/virtualization combination
|
||||||
|
"""
|
||||||
|
# pylint: disable=resource-leakage
|
||||||
|
# Provides:
|
||||||
|
# machine-id
|
||||||
|
locations = ["/etc/machine-id", "/var/lib/dbus/machine-id"]
|
||||||
|
existing_locations = [loc for loc in locations if os.path.exists(loc)]
|
||||||
|
if not existing_locations:
|
||||||
|
return {}
|
||||||
|
else:
|
||||||
|
# cannot use salt.utils.files.fopen due to circular dependency
|
||||||
|
with open(
|
||||||
|
existing_locations[0], encoding=__salt_system_encoding__
|
||||||
|
) as machineid:
|
||||||
|
return {"machine_id": machineid.read().strip()}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import salt.utils.path
|
||||||
import salt.utils.platform
|
import salt.utils.platform
|
||||||
import salt.utils.versions
|
import salt.utils.versions
|
||||||
from salt.ext.tornado import gen
|
from salt.ext.tornado import gen
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -46,6 +47,9 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_SETPROCTITLE = False
|
HAS_SETPROCTITLE = False
|
||||||
|
|
||||||
|
# Process finalization function list
|
||||||
|
_INTERNAL_PROCESS_FINALIZE_FUNCTION_LIST = []
|
||||||
|
|
||||||
|
|
||||||
def appendproctitle(name):
|
def appendproctitle(name):
|
||||||
"""
|
"""
|
||||||
|
@ -207,7 +211,7 @@ def get_process_info(pid=None):
|
||||||
|
|
||||||
# pid_exists can have false positives
|
# pid_exists can have false positives
|
||||||
# for example Windows reserves PID 5 in a hack way
|
# for example Windows reserves PID 5 in a hack way
|
||||||
# another reasons is the the process requires kernel permissions
|
# another reasons is the process requires kernel permissions
|
||||||
try:
|
try:
|
||||||
raw_process_info.status()
|
raw_process_info.status()
|
||||||
except psutil.NoSuchProcess:
|
except psutil.NoSuchProcess:
|
||||||
|
@ -525,11 +529,14 @@ class ProcessManager:
|
||||||
target=tgt, args=args, kwargs=kwargs, name=name or tgt.__qualname__
|
target=tgt, args=args, kwargs=kwargs, name=name or tgt.__qualname__
|
||||||
)
|
)
|
||||||
|
|
||||||
|
process.register_finalize_method(cleanup_finalize_process, args, kwargs)
|
||||||
|
|
||||||
if isinstance(process, SignalHandlingProcess):
|
if isinstance(process, SignalHandlingProcess):
|
||||||
with default_signals(signal.SIGINT, signal.SIGTERM):
|
with default_signals(signal.SIGINT, signal.SIGTERM):
|
||||||
process.start()
|
process.start()
|
||||||
else:
|
else:
|
||||||
process.start()
|
process.start()
|
||||||
|
|
||||||
log.debug("Started '%s' with pid %s", process.name, process.pid)
|
log.debug("Started '%s' with pid %s", process.name, process.pid)
|
||||||
self._process_map[process.pid] = {
|
self._process_map[process.pid] = {
|
||||||
"tgt": tgt,
|
"tgt": tgt,
|
||||||
|
@ -537,6 +544,7 @@ class ProcessManager:
|
||||||
"kwargs": kwargs,
|
"kwargs": kwargs,
|
||||||
"Process": process,
|
"Process": process,
|
||||||
}
|
}
|
||||||
|
|
||||||
return process
|
return process
|
||||||
|
|
||||||
def restart_process(self, pid):
|
def restart_process(self, pid):
|
||||||
|
@ -685,6 +693,7 @@ class ProcessManager:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
p_map["Process"].terminate()
|
p_map["Process"].terminate()
|
||||||
|
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
if exc.errno not in (errno.ESRCH, errno.EACCES):
|
if exc.errno not in (errno.ESRCH, errno.EACCES):
|
||||||
raise
|
raise
|
||||||
|
@ -1069,6 +1078,21 @@ class SignalHandlingProcess(Process):
|
||||||
msg += "SIGTERM"
|
msg += "SIGTERM"
|
||||||
msg += ". Exiting"
|
msg += ". Exiting"
|
||||||
log.debug(msg)
|
log.debug(msg)
|
||||||
|
|
||||||
|
# Run any registered process finalization routines
|
||||||
|
for method, args, kwargs in self._finalize_methods:
|
||||||
|
try:
|
||||||
|
method(*args, **kwargs)
|
||||||
|
except Exception: # pylint: disable=broad-except
|
||||||
|
log.exception(
|
||||||
|
"Failed to run finalize callback on %s; method=%r; args=%r; and kwargs=%r",
|
||||||
|
self,
|
||||||
|
method,
|
||||||
|
args,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
if HAS_PSUTIL:
|
if HAS_PSUTIL:
|
||||||
try:
|
try:
|
||||||
process = psutil.Process(os.getpid())
|
process = psutil.Process(os.getpid())
|
||||||
|
@ -1084,6 +1108,7 @@ class SignalHandlingProcess(Process):
|
||||||
self.pid,
|
self.pid,
|
||||||
os.getpid(),
|
os.getpid(),
|
||||||
)
|
)
|
||||||
|
|
||||||
except psutil.NoSuchProcess:
|
except psutil.NoSuchProcess:
|
||||||
log.warning(
|
log.warning(
|
||||||
"Unable to kill children of process %d, it does not exist."
|
"Unable to kill children of process %d, it does not exist."
|
||||||
|
@ -1155,3 +1180,57 @@ class SubprocessList:
|
||||||
self.processes.remove(proc)
|
self.processes.remove(proc)
|
||||||
self.count -= 1
|
self.count -= 1
|
||||||
log.debug("Subprocess %s cleaned up", proc.name)
|
log.debug("Subprocess %s cleaned up", proc.name)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_finalize_process(*args, **kwargs):
|
||||||
|
"""
|
||||||
|
Generic process to allow for any registered process cleanup routines to execute.
|
||||||
|
|
||||||
|
While class Process has a register_finalize_method, when a process is looked up by pid
|
||||||
|
using psutil.Process, there is no method available to register a cleanup process.
|
||||||
|
|
||||||
|
Hence, this function is added as part of the add_process to allow usage of other cleanup processes
|
||||||
|
which cannot be added by the register_finalize_method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Run any registered process cleanup routines
|
||||||
|
for method, args, kwargs in _INTERNAL_PROCESS_FINALIZE_FUNCTION_LIST:
|
||||||
|
log.debug(
|
||||||
|
"cleanup_finalize_process, method=%r, args=%r, kwargs=%r",
|
||||||
|
method,
|
||||||
|
args,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
method(*args, **kwargs)
|
||||||
|
except Exception: # pylint: disable=broad-except
|
||||||
|
log.exception(
|
||||||
|
"Failed to run registered function finalize callback; method=%r; args=%r; and kwargs=%r",
|
||||||
|
method,
|
||||||
|
args,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
def register_cleanup_finalize_function(function, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Register a function to run as process terminates
|
||||||
|
|
||||||
|
While class Process has a register_finalize_method, when a process is looked up by pid
|
||||||
|
using psutil.Process, there is no method available to register a cleanup process.
|
||||||
|
|
||||||
|
Hence, this function can be used to register a function to allow cleanup processes
|
||||||
|
which cannot be added by class Process register_finalize_method.
|
||||||
|
|
||||||
|
Note: there is no deletion, since it is assummed that if something is registered, it will continue to be used
|
||||||
|
"""
|
||||||
|
log.debug(
|
||||||
|
"register_cleanup_finalize_function entry, function=%r, args=%r, kwargs=%r",
|
||||||
|
function,
|
||||||
|
args,
|
||||||
|
kwargs,
|
||||||
|
)
|
||||||
|
finalize_function_tuple = (function, args, kwargs)
|
||||||
|
if finalize_function_tuple not in _INTERNAL_PROCESS_FINALIZE_FUNCTION_LIST:
|
||||||
|
_INTERNAL_PROCESS_FINALIZE_FUNCTION_LIST.append(finalize_function_tuple)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import pytest
|
||||||
from salt.fileserver.gitfs import PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
from salt.fileserver.gitfs import PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
||||||
from salt.utils.gitfs import GitFS, GitPython, Pygit2
|
from salt.utils.gitfs import GitFS, GitPython, Pygit2
|
||||||
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
|
|
||||||
pytestmark = [
|
pytestmark = [
|
||||||
pytest.mark.slow_test,
|
pytest.mark.slow_test,
|
||||||
|
@ -248,17 +249,24 @@ def _test_lock(opts):
|
||||||
g.fetch_remotes()
|
g.fetch_remotes()
|
||||||
assert len(g.remotes) == 1
|
assert len(g.remotes) == 1
|
||||||
repo = g.remotes[0]
|
repo = g.remotes[0]
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
||||||
assert repo.lock() == (
|
assert repo.lock() == (
|
||||||
[
|
[
|
||||||
"Set update lock for gitfs remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Set update lock for gitfs remote "
|
||||||
|
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
assert os.path.isfile(repo._get_lock_file())
|
assert os.path.isfile(repo._get_lock_file())
|
||||||
assert repo.clear_lock() == (
|
assert repo.clear_lock() == (
|
||||||
[
|
[
|
||||||
"Removed update lock for gitfs remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Removed update lock for gitfs remote "
|
||||||
|
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import pytest
|
||||||
from salt.pillar.git_pillar import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
from salt.pillar.git_pillar import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
||||||
from salt.utils.gitfs import GitPillar, GitPython, Pygit2
|
from salt.utils.gitfs import GitPillar, GitPython, Pygit2
|
||||||
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
|
|
||||||
pytestmark = [
|
pytestmark = [
|
||||||
pytest.mark.windows_whitelisted,
|
pytest.mark.windows_whitelisted,
|
||||||
|
@ -339,17 +340,24 @@ def _test_lock(opts):
|
||||||
p.fetch_remotes()
|
p.fetch_remotes()
|
||||||
assert len(p.remotes) == 1
|
assert len(p.remotes) == 1
|
||||||
repo = p.remotes[0]
|
repo = p.remotes[0]
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
||||||
assert repo.lock() == (
|
assert repo.lock() == (
|
||||||
[
|
[
|
||||||
"Set update lock for git_pillar remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Set update lock for git_pillar remote "
|
||||||
|
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
assert os.path.isfile(repo._get_lock_file())
|
assert os.path.isfile(repo._get_lock_file())
|
||||||
assert repo.clear_lock() == (
|
assert repo.clear_lock() == (
|
||||||
[
|
[
|
||||||
"Removed update lock for git_pillar remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Removed update lock for git_pillar remote "
|
||||||
|
f"'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import pytest
|
||||||
from salt.runners.winrepo import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
from salt.runners.winrepo import GLOBAL_ONLY, PER_REMOTE_ONLY, PER_REMOTE_OVERRIDES
|
||||||
from salt.utils.gitfs import GitPython, Pygit2, WinRepo
|
from salt.utils.gitfs import GitPython, Pygit2, WinRepo
|
||||||
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
from salt.utils.immutabletypes import ImmutableDict, ImmutableList
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
|
|
||||||
pytestmark = [
|
pytestmark = [
|
||||||
pytest.mark.slow_test,
|
pytest.mark.slow_test,
|
||||||
|
@ -130,6 +131,7 @@ def test_pygit2_remote_map(pygit2_winrepo_opts):
|
||||||
|
|
||||||
|
|
||||||
def _test_lock(opts):
|
def _test_lock(opts):
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
w = _get_winrepo(
|
w = _get_winrepo(
|
||||||
opts,
|
opts,
|
||||||
"https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
"https://github.com/saltstack/salt-test-pillar-gitfs.git",
|
||||||
|
@ -140,14 +142,18 @@ def _test_lock(opts):
|
||||||
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
assert repo.get_salt_working_dir() in repo._get_lock_file()
|
||||||
assert repo.lock() == (
|
assert repo.lock() == (
|
||||||
[
|
[
|
||||||
"Set update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Set update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
assert os.path.isfile(repo._get_lock_file())
|
assert os.path.isfile(repo._get_lock_file())
|
||||||
assert repo.clear_lock() == (
|
assert repo.clear_lock() == (
|
||||||
[
|
[
|
||||||
"Removed update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git'"
|
(
|
||||||
|
f"Removed update lock for winrepo remote 'https://github.com/saltstack/salt-test-pillar-gitfs.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
)
|
)
|
||||||
|
|
596
tests/pytests/unit/utils/test_gitfs_locks.py
Normal file
596
tests/pytests/unit/utils/test_gitfs_locks.py
Normal file
|
@ -0,0 +1,596 @@
|
||||||
|
"""
|
||||||
|
These only test the provider selection and verification logic, they do not init
|
||||||
|
any remotes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import signal
|
||||||
|
import time
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from saltfactories.utils import random_string
|
||||||
|
|
||||||
|
import salt.ext.tornado.ioloop
|
||||||
|
import salt.fileserver.gitfs
|
||||||
|
import salt.utils.files
|
||||||
|
import salt.utils.gitfs
|
||||||
|
import salt.utils.path
|
||||||
|
import salt.utils.platform
|
||||||
|
import salt.utils.process
|
||||||
|
from salt.utils.immutabletypes import freeze
|
||||||
|
from salt.utils.platform import get_machine_identifier as _get_machine_identifier
|
||||||
|
from salt.utils.verify import verify_env
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pwd
|
||||||
|
except ImportError:
|
||||||
|
import salt.utils.win_functions
|
||||||
|
|
||||||
|
pytestmark = [pytest.mark.skip_on_windows]
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_user():
|
||||||
|
"""
|
||||||
|
Get the user associated with the current process.
|
||||||
|
"""
|
||||||
|
if salt.utils.platform.is_windows():
|
||||||
|
return salt.utils.win_functions.get_current_user(with_domain=False)
|
||||||
|
return pwd.getpwuid(os.getuid())[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _clear_instance_map():
|
||||||
|
try:
|
||||||
|
del salt.utils.gitfs.GitFS.instance_map[
|
||||||
|
salt.ext.tornado.ioloop.IOLoop.current()
|
||||||
|
]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MyMockedGitProvider:
|
||||||
|
"""
|
||||||
|
mocked GitFS provider leveraging tmp_path
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
salt_factories_default_root_dir,
|
||||||
|
temp_salt_master,
|
||||||
|
temp_salt_minion,
|
||||||
|
tmp_path,
|
||||||
|
):
|
||||||
|
self._tmp_name = str(tmp_path)
|
||||||
|
|
||||||
|
self._root_dir = str(salt_factories_default_root_dir)
|
||||||
|
self._master_cfg = str(temp_salt_master.config["conf_file"])
|
||||||
|
self._minion_cfg = str(temp_salt_minion.config["conf_file"])
|
||||||
|
self._user = _get_user()
|
||||||
|
|
||||||
|
tmp_name = self._tmp_name.join("/git_test")
|
||||||
|
pathlib.Path(tmp_name).mkdir(exist_ok=True, parents=True)
|
||||||
|
|
||||||
|
class MockedProvider(
|
||||||
|
salt.utils.gitfs.GitProvider
|
||||||
|
): # pylint: disable=abstract-method
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
opts,
|
||||||
|
remote,
|
||||||
|
per_remote_defaults,
|
||||||
|
per_remote_only,
|
||||||
|
override_params,
|
||||||
|
cache_root,
|
||||||
|
role="gitfs",
|
||||||
|
):
|
||||||
|
self.provider = "mocked"
|
||||||
|
self.fetched = False
|
||||||
|
super().__init__(
|
||||||
|
opts,
|
||||||
|
remote,
|
||||||
|
per_remote_defaults,
|
||||||
|
per_remote_only,
|
||||||
|
override_params,
|
||||||
|
cache_root,
|
||||||
|
role,
|
||||||
|
)
|
||||||
|
|
||||||
|
def init_remote(self):
|
||||||
|
self.gitdir = salt.utils.path.join(tmp_name, ".git")
|
||||||
|
self.repo = True
|
||||||
|
new = False
|
||||||
|
return new
|
||||||
|
|
||||||
|
def envs(self):
|
||||||
|
return ["base"]
|
||||||
|
|
||||||
|
def _fetch(self):
|
||||||
|
self.fetched = True
|
||||||
|
|
||||||
|
# Clear the instance map so that we make sure to create a new instance
|
||||||
|
# for this test class.
|
||||||
|
_clear_instance_map()
|
||||||
|
|
||||||
|
git_providers = {
|
||||||
|
"mocked": MockedProvider,
|
||||||
|
}
|
||||||
|
gitfs_remotes = ["file://repo1.git", {"file://repo2.git": [{"name": "repo2"}]}]
|
||||||
|
|
||||||
|
self.opts = self.get_temp_config(
|
||||||
|
"master",
|
||||||
|
gitfs_remotes=gitfs_remotes,
|
||||||
|
verified_gitfs_provider="mocked",
|
||||||
|
)
|
||||||
|
self.main_class = salt.utils.gitfs.GitFS(
|
||||||
|
self.opts,
|
||||||
|
self.opts["gitfs_remotes"],
|
||||||
|
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||||
|
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY,
|
||||||
|
git_providers=git_providers,
|
||||||
|
)
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
# Providers are preserved with GitFS's instance_map
|
||||||
|
for remote in self.main_class.remotes:
|
||||||
|
remote.fetched = False
|
||||||
|
del self.main_class
|
||||||
|
|
||||||
|
def get_temp_config(self, config_for, **config_overrides):
|
||||||
|
|
||||||
|
rootdir = config_overrides.get("root_dir", self._root_dir)
|
||||||
|
|
||||||
|
if not pathlib.Path(rootdir).exists():
|
||||||
|
pathlib.Path(rootdir).mkdir(exist_ok=True, parents=True)
|
||||||
|
|
||||||
|
conf_dir = config_overrides.pop(
|
||||||
|
"conf_dir", str(pathlib.PurePath(rootdir).joinpath("conf"))
|
||||||
|
)
|
||||||
|
|
||||||
|
for key in ("cachedir", "pki_dir", "sock_dir"):
|
||||||
|
if key not in config_overrides:
|
||||||
|
config_overrides[key] = key
|
||||||
|
if "log_file" not in config_overrides:
|
||||||
|
config_overrides["log_file"] = f"logs/{config_for}.log".format()
|
||||||
|
if "user" not in config_overrides:
|
||||||
|
config_overrides["user"] = self._user
|
||||||
|
config_overrides["root_dir"] = rootdir
|
||||||
|
|
||||||
|
cdict = self.get_config(
|
||||||
|
config_for,
|
||||||
|
from_scratch=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if config_for in ("master", "client_config"):
|
||||||
|
rdict = salt.config.apply_master_config(config_overrides, cdict)
|
||||||
|
if config_for == "minion":
|
||||||
|
minion_id = (
|
||||||
|
config_overrides.get("id")
|
||||||
|
or config_overrides.get("minion_id")
|
||||||
|
or cdict.get("id")
|
||||||
|
or cdict.get("minion_id")
|
||||||
|
or random_string("temp-minion-")
|
||||||
|
)
|
||||||
|
config_overrides["minion_id"] = config_overrides["id"] = minion_id
|
||||||
|
rdict = salt.config.apply_minion_config(
|
||||||
|
config_overrides, cdict, cache_minion_id=False, minion_id=minion_id
|
||||||
|
)
|
||||||
|
|
||||||
|
verify_env(
|
||||||
|
[
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("minions"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("minions_pre"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("minions_rejected"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("minions_denied"),
|
||||||
|
pathlib.PurePath(rdict["cachedir"]).joinpath("jobs"),
|
||||||
|
pathlib.PurePath(rdict["cachedir"]).joinpath("tokens"),
|
||||||
|
pathlib.PurePath(rdict["root_dir"]).joinpath("cache", "tokens"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("accepted"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("rejected"),
|
||||||
|
pathlib.PurePath(rdict["pki_dir"]).joinpath("pending"),
|
||||||
|
pathlib.PurePath(rdict["log_file"]).parent,
|
||||||
|
rdict["sock_dir"],
|
||||||
|
conf_dir,
|
||||||
|
],
|
||||||
|
self._user,
|
||||||
|
root_dir=rdict["root_dir"],
|
||||||
|
)
|
||||||
|
|
||||||
|
rdict["conf_file"] = pathlib.PurePath(conf_dir).joinpath(config_for)
|
||||||
|
with salt.utils.files.fopen(rdict["conf_file"], "w") as wfh:
|
||||||
|
salt.utils.yaml.safe_dump(rdict, wfh, default_flow_style=False)
|
||||||
|
return rdict
|
||||||
|
|
||||||
|
def get_config(
|
||||||
|
self,
|
||||||
|
config_for,
|
||||||
|
from_scratch=False,
|
||||||
|
):
|
||||||
|
if from_scratch:
|
||||||
|
if config_for in ("master"):
|
||||||
|
return salt.config.master_config(self._master_cfg)
|
||||||
|
elif config_for in ("minion"):
|
||||||
|
return salt.config.minion_config(self._minion_cfg)
|
||||||
|
elif config_for == "client_config":
|
||||||
|
return salt.config_client_config(self._master_cfg)
|
||||||
|
if config_for not in ("master", "minion", "client_config"):
|
||||||
|
if config_for in ("master"):
|
||||||
|
return freeze(salt.config.master_config(self._master_cfg))
|
||||||
|
elif config_for in ("minion"):
|
||||||
|
return freeze(salt.config.minion_config(self._minion_cfg))
|
||||||
|
elif config_for == "client_config":
|
||||||
|
return freeze(salt.config.client_config(self._master_cfg))
|
||||||
|
|
||||||
|
log.error(
|
||||||
|
"Should not reach this section of code for get_config, missing support for input config_for %s",
|
||||||
|
config_for,
|
||||||
|
)
|
||||||
|
|
||||||
|
# at least return master's config
|
||||||
|
return freeze(salt.config.master_config(self._master_cfg))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def config_dir(self):
|
||||||
|
return str(pathlib.PurePath(self._master_cfg).parent)
|
||||||
|
|
||||||
|
def get_config_dir(self):
|
||||||
|
log.warning("Use the config_dir attribute instead of calling get_config_dir()")
|
||||||
|
return self.config_dir
|
||||||
|
|
||||||
|
def get_config_file_path(self, filename):
|
||||||
|
if filename == "master":
|
||||||
|
return str(self._master_cfg)
|
||||||
|
|
||||||
|
if filename == "minion":
|
||||||
|
return str(self._minion_cfg)
|
||||||
|
|
||||||
|
return str(self._master_cfg)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def master_opts(self):
|
||||||
|
"""
|
||||||
|
Return the options used for the master
|
||||||
|
"""
|
||||||
|
return self.get_config("master")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def minion_opts(self):
|
||||||
|
"""
|
||||||
|
Return the options used for the minion
|
||||||
|
"""
|
||||||
|
return self.get_config("minion")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def main_class(
|
||||||
|
salt_factories_default_root_dir,
|
||||||
|
temp_salt_master,
|
||||||
|
temp_salt_minion,
|
||||||
|
tmp_path,
|
||||||
|
):
|
||||||
|
my_git_base = MyMockedGitProvider(
|
||||||
|
salt_factories_default_root_dir,
|
||||||
|
temp_salt_master,
|
||||||
|
temp_salt_minion,
|
||||||
|
tmp_path,
|
||||||
|
)
|
||||||
|
yield my_git_base.main_class
|
||||||
|
|
||||||
|
my_git_base.cleanup()
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_all(main_class):
|
||||||
|
main_class.update()
|
||||||
|
assert len(main_class.remotes) == 2, "Wrong number of remotes"
|
||||||
|
assert main_class.remotes[0].fetched
|
||||||
|
assert main_class.remotes[1].fetched
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_by_name(main_class):
|
||||||
|
main_class.update("repo2")
|
||||||
|
assert len(main_class.remotes) == 2, "Wrong number of remotes"
|
||||||
|
assert not main_class.remotes[0].fetched
|
||||||
|
assert main_class.remotes[1].fetched
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_by_id_and_name(main_class):
|
||||||
|
main_class.update([("file://repo1.git", None)])
|
||||||
|
assert len(main_class.remotes) == 2, "Wrong number of remotes"
|
||||||
|
assert main_class.remotes[0].fetched
|
||||||
|
assert not main_class.remotes[1].fetched
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_cachedir_basename(main_class):
|
||||||
|
assert main_class.remotes[0].get_cache_basename() == "_"
|
||||||
|
assert main_class.remotes[1].get_cache_basename() == "_"
|
||||||
|
|
||||||
|
|
||||||
|
def test_git_provider_mp_lock_and_clear_lock(main_class):
|
||||||
|
"""
|
||||||
|
Check that lock is released after provider.lock()
|
||||||
|
and that lock is released after provider.clear_lock()
|
||||||
|
"""
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
provider.lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
provider.clear_lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_mp_lock_timeout(main_class):
|
||||||
|
"""
|
||||||
|
Check that lock will time out if master lock is locked.
|
||||||
|
"""
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
# Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
try:
|
||||||
|
# git provider should raise timeout error to avoid lock race conditions
|
||||||
|
pytest.raises(TimeoutError, provider.lock)
|
||||||
|
finally:
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_mp_clear_lock_timeout(main_class):
|
||||||
|
"""
|
||||||
|
Check that clear lock will time out if master lock is locked.
|
||||||
|
"""
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
# Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
try:
|
||||||
|
# git provider should raise timeout error to avoid lock race conditions
|
||||||
|
pytest.raises(TimeoutError, provider.clear_lock)
|
||||||
|
finally:
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_mp_gen_lock(main_class, caplog):
|
||||||
|
"""
|
||||||
|
Check that gen_lock is obtains lock, and then releases, provider.lock()
|
||||||
|
"""
|
||||||
|
# get machine_identifier
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
|
cur_pid = os.getpid()
|
||||||
|
|
||||||
|
test_msg1 = (
|
||||||
|
f"Set update lock for gitfs remote 'file://repo1.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
|
test_msg2 = (
|
||||||
|
"Attempting to remove 'update' lock for 'gitfs' remote 'file://repo1.git' "
|
||||||
|
"due to lock_set1 'True' or lock_set2"
|
||||||
|
)
|
||||||
|
test_msg3 = f"Removed update lock for gitfs remote 'file://repo1.git' on machine_id '{mach_id}'"
|
||||||
|
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
|
||||||
|
# loop seeing if the test can be made to mess up a lock/unlock sequence
|
||||||
|
max_count = 10000
|
||||||
|
count = 0
|
||||||
|
while count < max_count:
|
||||||
|
count = count + 1
|
||||||
|
caplog.clear()
|
||||||
|
with caplog.at_level(logging.DEBUG):
|
||||||
|
provider.fetch()
|
||||||
|
|
||||||
|
assert test_msg1 in caplog.text
|
||||||
|
assert test_msg2 in caplog.text
|
||||||
|
assert test_msg3 in caplog.text
|
||||||
|
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_mp_lock_dead_pid(main_class, caplog):
|
||||||
|
"""
|
||||||
|
Check that lock obtains lock, if previous pid in lock file doesn't exist for same machine id
|
||||||
|
"""
|
||||||
|
# get machine_identifier
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
|
cur_pid = os.getpid()
|
||||||
|
|
||||||
|
test_msg1 = (
|
||||||
|
f"Set update lock for gitfs remote 'file://repo1.git' on machine_id '{mach_id}'"
|
||||||
|
)
|
||||||
|
test_msg3 = f"Removed update lock for gitfs remote 'file://repo1.git' on machine_id '{mach_id}'"
|
||||||
|
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
provider.lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
|
||||||
|
# get lock file and manipulate it for a dead pid
|
||||||
|
file_name = provider._get_lock_file("update")
|
||||||
|
dead_pid = 1234 # give it non-existant pid
|
||||||
|
test_msg2 = (
|
||||||
|
f"gitfs_global_lock is enabled and update lockfile {file_name} "
|
||||||
|
"is present for gitfs remote 'file://repo1.git' on machine_id "
|
||||||
|
f"{mach_id} with pid '{dead_pid}'. Process {dead_pid} obtained "
|
||||||
|
f"the lock for machine_id {mach_id}, current machine_id {mach_id} "
|
||||||
|
"but this process is not running. The update may have been "
|
||||||
|
"interrupted. Given this process is for the same machine the "
|
||||||
|
"lock will be reallocated to new process"
|
||||||
|
)
|
||||||
|
|
||||||
|
# remove existing lock file and write fake lock file with bad pid
|
||||||
|
assert pathlib.Path(file_name).is_file()
|
||||||
|
pathlib.Path(file_name).unlink()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# write lock file similar to salt/utils/gitfs.py
|
||||||
|
fh_ = os.open(file_name, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
|
||||||
|
with os.fdopen(fh_, "wb"):
|
||||||
|
# Write the lock file and close the filehandle
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes(str(dead_pid)))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes("\n"))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes(str(mach_id)))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes("\n"))
|
||||||
|
|
||||||
|
except OSError as exc:
|
||||||
|
log.error(
|
||||||
|
"Failed to write fake dead pid lock file %s, exception %s", file_name, exc
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
caplog.clear()
|
||||||
|
with caplog.at_level(logging.DEBUG):
|
||||||
|
provider.lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
provider.clear_lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
assert test_msg1 in caplog.text
|
||||||
|
assert test_msg2 in caplog.text
|
||||||
|
assert test_msg3 in caplog.text
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_mp_lock_bad_machine(main_class, caplog):
|
||||||
|
"""
|
||||||
|
Check that lock obtains lock, if previous pid in lock file doesn't exist for same machine id
|
||||||
|
"""
|
||||||
|
# get machine_identifier
|
||||||
|
mach_id = _get_machine_identifier().get("machine_id", "no_machine_id_available")
|
||||||
|
cur_pid = os.getpid()
|
||||||
|
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
provider.lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
|
||||||
|
# get lock file and manipulate it for a dead pid
|
||||||
|
file_name = provider._get_lock_file("update")
|
||||||
|
bad_mach_id = "abcedf0123456789" # give it non-existant pid
|
||||||
|
|
||||||
|
test_msg1 = (
|
||||||
|
f"gitfs_global_lock is enabled and update lockfile {file_name} "
|
||||||
|
"is present for gitfs remote 'file://repo1.git' on machine_id "
|
||||||
|
f"{mach_id} with pid '{cur_pid}'. Process {cur_pid} obtained "
|
||||||
|
f"the lock for machine_id {bad_mach_id}, current machine_id {mach_id}"
|
||||||
|
)
|
||||||
|
test_msg2 = f"Removed update lock for gitfs remote 'file://repo1.git' on machine_id '{mach_id}'"
|
||||||
|
|
||||||
|
# remove existing lock file and write fake lock file with bad pid
|
||||||
|
assert pathlib.Path(file_name).is_file()
|
||||||
|
pathlib.Path(file_name).unlink()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# write lock file similar to salt/utils/gitfs.py
|
||||||
|
fh_ = os.open(file_name, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
|
||||||
|
with os.fdopen(fh_, "wb"):
|
||||||
|
# Write the lock file and close the filehandle
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes(str(cur_pid)))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes("\n"))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes(str(bad_mach_id)))
|
||||||
|
os.write(fh_, salt.utils.stringutils.to_bytes("\n"))
|
||||||
|
|
||||||
|
except OSError as exc:
|
||||||
|
log.error(
|
||||||
|
"Failed to write fake dead pid lock file %s, exception %s", file_name, exc
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
caplog.clear()
|
||||||
|
with caplog.at_level(logging.DEBUG):
|
||||||
|
provider.lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
provider.clear_lock()
|
||||||
|
# check that lock has been released
|
||||||
|
assert provider._master_lock.acquire(timeout=5)
|
||||||
|
provider._master_lock.release()
|
||||||
|
|
||||||
|
assert test_msg1 in caplog.text
|
||||||
|
assert test_msg2 in caplog.text
|
||||||
|
caplog.clear()
|
||||||
|
|
||||||
|
|
||||||
|
class KillProcessTest(salt.utils.process.SignalHandlingProcess):
|
||||||
|
"""
|
||||||
|
Test process for which to kill and check lock resources are cleaned up
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, provider, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.provider = provider
|
||||||
|
self.opts = provider.opts
|
||||||
|
self.threads = {}
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""
|
||||||
|
Start the test process to kill
|
||||||
|
"""
|
||||||
|
self.provider.lock()
|
||||||
|
lockfile = self.provider._get_lock_file()
|
||||||
|
log.debug("KillProcessTest acquried lock file %s", lockfile)
|
||||||
|
|
||||||
|
killtest_pid = os.getpid()
|
||||||
|
|
||||||
|
# check that lock has been released
|
||||||
|
assert self.provider._master_lock.acquire(timeout=5)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
tsleep = 1
|
||||||
|
time.sleep(tsleep) # give time for kill by sigterm
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.slow_test
|
||||||
|
@pytest.mark.skip_unless_on_linux
|
||||||
|
@pytest.mark.timeout_unless_on_windows(120)
|
||||||
|
def test_git_provider_sigterm_cleanup(main_class):
|
||||||
|
"""
|
||||||
|
Start process which will obtain lock, and leave it locked
|
||||||
|
then kill the process via SIGTERM and ensure locked resources are cleaned up
|
||||||
|
"""
|
||||||
|
provider = main_class.remotes[0]
|
||||||
|
|
||||||
|
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
|
||||||
|
procmgr = salt.utils.process.ProcessManager(wait_for_kill=1)
|
||||||
|
proc = procmgr.add_process(KillProcessTest, args=(provider,), name="test_kill")
|
||||||
|
|
||||||
|
while not proc.is_alive():
|
||||||
|
time.sleep(1) # give some time for it to be started
|
||||||
|
|
||||||
|
procmgr.run(asynchronous=True)
|
||||||
|
|
||||||
|
time.sleep(2) # give some time for it to terminate
|
||||||
|
|
||||||
|
# child process should be alive
|
||||||
|
file_name = provider._get_lock_file("update")
|
||||||
|
|
||||||
|
assert pathlib.Path(file_name).exists()
|
||||||
|
assert pathlib.Path(file_name).is_file()
|
||||||
|
|
||||||
|
procmgr.terminate() # sends a SIGTERM
|
||||||
|
|
||||||
|
time.sleep(2) # give some time for it to terminate
|
||||||
|
|
||||||
|
assert not proc.is_alive()
|
||||||
|
assert not pathlib.Path(file_name).exists()
|
|
@ -1,175 +0,0 @@
|
||||||
"""
|
|
||||||
These only test the provider selection and verification logic, they do not init
|
|
||||||
any remotes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
import salt.ext.tornado.ioloop
|
|
||||||
import salt.fileserver.gitfs
|
|
||||||
import salt.utils.files
|
|
||||||
import salt.utils.gitfs
|
|
||||||
import salt.utils.path
|
|
||||||
import salt.utils.platform
|
|
||||||
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
|
|
||||||
from tests.support.unit import TestCase
|
|
||||||
|
|
||||||
|
|
||||||
def _clear_instance_map():
|
|
||||||
try:
|
|
||||||
del salt.utils.gitfs.GitFS.instance_map[
|
|
||||||
salt.ext.tornado.ioloop.IOLoop.current()
|
|
||||||
]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestGitBase(TestCase, AdaptedConfigurationTestCaseMixin):
|
|
||||||
def setUp(self):
|
|
||||||
self._tmp_dir = tempfile.TemporaryDirectory()
|
|
||||||
tmp_name = self._tmp_dir.name
|
|
||||||
|
|
||||||
class MockedProvider(
|
|
||||||
salt.utils.gitfs.GitProvider
|
|
||||||
): # pylint: disable=abstract-method
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
opts,
|
|
||||||
remote,
|
|
||||||
per_remote_defaults,
|
|
||||||
per_remote_only,
|
|
||||||
override_params,
|
|
||||||
cache_root,
|
|
||||||
role="gitfs",
|
|
||||||
):
|
|
||||||
self.provider = "mocked"
|
|
||||||
self.fetched = False
|
|
||||||
super().__init__(
|
|
||||||
opts,
|
|
||||||
remote,
|
|
||||||
per_remote_defaults,
|
|
||||||
per_remote_only,
|
|
||||||
override_params,
|
|
||||||
cache_root,
|
|
||||||
role,
|
|
||||||
)
|
|
||||||
|
|
||||||
def init_remote(self):
|
|
||||||
self.gitdir = salt.utils.path.join(tmp_name, ".git")
|
|
||||||
self.repo = True
|
|
||||||
new = False
|
|
||||||
return new
|
|
||||||
|
|
||||||
def envs(self):
|
|
||||||
return ["base"]
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
self.fetched = True
|
|
||||||
|
|
||||||
git_providers = {
|
|
||||||
"mocked": MockedProvider,
|
|
||||||
}
|
|
||||||
gitfs_remotes = ["file://repo1.git", {"file://repo2.git": [{"name": "repo2"}]}]
|
|
||||||
self.opts = self.get_temp_config(
|
|
||||||
"master", gitfs_remotes=gitfs_remotes, verified_gitfs_provider="mocked"
|
|
||||||
)
|
|
||||||
self.main_class = salt.utils.gitfs.GitFS(
|
|
||||||
self.opts,
|
|
||||||
self.opts["gitfs_remotes"],
|
|
||||||
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
|
||||||
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY,
|
|
||||||
git_providers=git_providers,
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
# Clear the instance map so that we make sure to create a new instance
|
|
||||||
# for this test class.
|
|
||||||
_clear_instance_map()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
# Providers are preserved with GitFS's instance_map
|
|
||||||
for remote in self.main_class.remotes:
|
|
||||||
remote.fetched = False
|
|
||||||
del self.main_class
|
|
||||||
self._tmp_dir.cleanup()
|
|
||||||
|
|
||||||
def test_update_all(self):
|
|
||||||
self.main_class.update()
|
|
||||||
self.assertEqual(len(self.main_class.remotes), 2, "Wrong number of remotes")
|
|
||||||
self.assertTrue(self.main_class.remotes[0].fetched)
|
|
||||||
self.assertTrue(self.main_class.remotes[1].fetched)
|
|
||||||
|
|
||||||
def test_update_by_name(self):
|
|
||||||
self.main_class.update("repo2")
|
|
||||||
self.assertEqual(len(self.main_class.remotes), 2, "Wrong number of remotes")
|
|
||||||
self.assertFalse(self.main_class.remotes[0].fetched)
|
|
||||||
self.assertTrue(self.main_class.remotes[1].fetched)
|
|
||||||
|
|
||||||
def test_update_by_id_and_name(self):
|
|
||||||
self.main_class.update([("file://repo1.git", None)])
|
|
||||||
self.assertEqual(len(self.main_class.remotes), 2, "Wrong number of remotes")
|
|
||||||
self.assertTrue(self.main_class.remotes[0].fetched)
|
|
||||||
self.assertFalse(self.main_class.remotes[1].fetched)
|
|
||||||
|
|
||||||
def test_get_cachedir_basename(self):
|
|
||||||
self.assertEqual(
|
|
||||||
self.main_class.remotes[0].get_cache_basename(),
|
|
||||||
"_",
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
self.main_class.remotes[1].get_cache_basename(),
|
|
||||||
"_",
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_git_provider_mp_lock(self):
|
|
||||||
"""
|
|
||||||
Check that lock is released after provider.lock()
|
|
||||||
"""
|
|
||||||
provider = self.main_class.remotes[0]
|
|
||||||
provider.lock()
|
|
||||||
# check that lock has been released
|
|
||||||
self.assertTrue(provider._master_lock.acquire(timeout=5))
|
|
||||||
provider._master_lock.release()
|
|
||||||
|
|
||||||
def test_git_provider_mp_clear_lock(self):
|
|
||||||
"""
|
|
||||||
Check that lock is released after provider.clear_lock()
|
|
||||||
"""
|
|
||||||
provider = self.main_class.remotes[0]
|
|
||||||
provider.clear_lock()
|
|
||||||
# check that lock has been released
|
|
||||||
self.assertTrue(provider._master_lock.acquire(timeout=5))
|
|
||||||
provider._master_lock.release()
|
|
||||||
|
|
||||||
@pytest.mark.slow_test
|
|
||||||
@pytest.mark.timeout_unless_on_windows(120)
|
|
||||||
def test_git_provider_mp_lock_timeout(self):
|
|
||||||
"""
|
|
||||||
Check that lock will time out if master lock is locked.
|
|
||||||
"""
|
|
||||||
provider = self.main_class.remotes[0]
|
|
||||||
# Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
|
|
||||||
self.assertTrue(provider._master_lock.acquire(timeout=5))
|
|
||||||
try:
|
|
||||||
# git provider should raise timeout error to avoid lock race conditions
|
|
||||||
self.assertRaises(TimeoutError, provider.lock)
|
|
||||||
finally:
|
|
||||||
provider._master_lock.release()
|
|
||||||
|
|
||||||
@pytest.mark.slow_test
|
|
||||||
@pytest.mark.timeout_unless_on_windows(120)
|
|
||||||
def test_git_provider_mp_clear_lock_timeout(self):
|
|
||||||
"""
|
|
||||||
Check that clear lock will time out if master lock is locked.
|
|
||||||
"""
|
|
||||||
provider = self.main_class.remotes[0]
|
|
||||||
# Hijack the lock so git provider is fooled into thinking another instance is doing somthing.
|
|
||||||
self.assertTrue(provider._master_lock.acquire(timeout=5))
|
|
||||||
try:
|
|
||||||
# git provider should raise timeout error to avoid lock race conditions
|
|
||||||
self.assertRaises(TimeoutError, provider.clear_lock)
|
|
||||||
finally:
|
|
||||||
provider._master_lock.release()
|
|
Loading…
Add table
Reference in a new issue