Migrate multimaster tests to PyTest

This commit is contained in:
Pedro Algarvio 2020-11-21 22:00:08 +00:00
parent bd466bf147
commit 2477ebfb35
24 changed files with 661 additions and 1643 deletions

View file

@ -1 +0,0 @@
id: mm-master

View file

@ -1,4 +0,0 @@
id: mm-minion
master:
- localhost:64506
- localhost:64508

View file

@ -1,10 +0,0 @@
id: mm-sub-master
publish_port: 64507
ret_port: 64508
# These settings needed for tests on Windows which defaults
# to ipc_mode: tcp
tcp_master_pub_port: 64512
tcp_master_pull_port: 64513
tcp_master_publish_pull: 64514
tcp_master_workers: 64515

View file

@ -1,4 +0,0 @@
id: mm-sub-minion
master:
- localhost:64506
- localhost:64508

View file

@ -1,776 +0,0 @@
"""
Set up the Salt multimaster test suite
"""
# Import Python libs
import copy
import logging
import os
import shutil
import stat
import sys
import threading
import time
import salt.config
import salt.log.setup as salt_log_setup
import salt.utils.path
import salt.utils.platform
from salt.utils.immutabletypes import freeze
from salt.utils.verify import verify_env
# Import Salt libs
from tests.integration import (
SocketServerRequestHandler,
TestDaemon,
TestDaemonStartFailed,
ThreadedSocketServer,
get_unused_localhost_port,
)
from tests.support.parser import PNUM, print_header
# Import salt tests support dirs
from tests.support.paths import (
ENGINES_DIR,
FILES,
INTEGRATION_TEST_DIR,
LOG_HANDLERS_DIR,
SCRIPT_DIR,
TMP,
)
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, start_daemon
from tests.support.runtests import RUNTIME_VARS
try:
from salt.utils.odict import OrderedDict
except ImportError:
from collections import OrderedDict
log = logging.getLogger(__name__)
SALT_LOG_PORT = get_unused_localhost_port()
class MultimasterTestDaemon(TestDaemon):
"""
Set up the master and minion daemons, and run related cases
"""
def __enter__(self):
"""
Start a master and minion
"""
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(self.mm_master_opts)
# Set up PATH to mockbin
self._enter_mockbin()
self.master_targets = [self.mm_master_opts, self.mm_sub_master_opts]
self.minion_targets = {"mm-minion", "mm-sub-minion"}
if self.parser.options.transport == "zeromq":
self.start_zeromq_daemons()
elif self.parser.options.transport == "raet":
self.start_raet_daemons()
elif self.parser.options.transport == "tcp":
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
# if getattr(self.parser.options, 'ssh', False):
# self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
"~~~~~~~ Versions Report ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Versions Report ", inline=True)
print("\n".join(salt.version.versions_report()))
try:
print_header(
"~~~~~~~ Minion Grains Information ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Minion Grains Information ", inline=True)
grains = self.client.cmd("minion", "grains.items")
minion_opts = self.mm_minion_opts.copy()
minion_opts["color"] = self.parser.options.no_colors is False
salt.output.display_output(grains, "grains", minion_opts)
try:
print_header(
"=",
sep="=",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("", sep="=", inline=True)
try:
return self
finally:
self.post_setup_minions()
def __exit__(self, type, value, traceback):
"""
Kill the minion and master processes
"""
try:
if hasattr(self.sub_minion_process, "terminate"):
self.sub_minion_process.terminate()
else:
log.error("self.sub_minion_process can't be terminate.")
except AttributeError:
pass
try:
if hasattr(self.minion_process, "terminate"):
self.minion_process.terminate()
else:
log.error("self.minion_process can't be terminate.")
except AttributeError:
pass
try:
if hasattr(self.sub_master_process, "terminate"):
self.sub_master_process.terminate()
else:
log.error("self.sub_master_process can't be terminate.")
except AttributeError:
pass
try:
if hasattr(self.master_process, "terminate"):
self.master_process.terminate()
else:
log.error("self.master_process can't be terminate.")
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def start_zeromq_daemons(self):
"""
Fire up the daemons used for zeromq tests
"""
self.log_server = ThreadedSocketServer(
("localhost", SALT_LOG_PORT), SocketServerRequestHandler
)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-master ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name="salt-master",
daemon_id=self.mm_master_opts["id"],
daemon_log_prefix="salt-master/{}".format(self.mm_master_opts["id"]),
daemon_cli_script_name="master",
daemon_config=self.mm_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
# Clone the master key to sub-master's pki dir
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(self.mm_master_opts["pki_dir"], keyfile),
os.path.join(self.mm_sub_master_opts["pki_dir"], keyfile),
)
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting second salt-master ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.sub_master_process = start_daemon(
daemon_name="sub salt-master",
daemon_id=self.mm_master_opts["id"],
daemon_log_prefix="sub-salt-master/{}".format(
self.mm_sub_master_opts["id"]
),
daemon_cli_script_name="master",
daemon_config=self.mm_sub_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_SUB_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting second salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting second salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name="salt-minion",
daemon_id=self.mm_master_opts["id"],
daemon_log_prefix="salt-minion/{}".format(self.mm_minion_opts["id"]),
daemon_cli_script_name="minion",
daemon_config=self.mm_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name="sub salt-minion",
daemon_id=self.mm_master_opts["id"],
daemon_log_prefix="sub-salt-minion/{}".format(
self.mm_sub_minion_opts["id"]
),
daemon_cli_script_name="minion",
daemon_config=self.mm_sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_MM_SUB_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
start_tcp_daemons = start_zeromq_daemons
def wait_for_minions(self, start, timeout, sleep=5):
"""
Ensure all minions and masters (including sub-masters) are connected.
"""
success = [False] * len(self.master_targets)
while True:
for num, client in enumerate(self.clients):
if success[num]:
continue
try:
ret = self.client.run_job("*", "test.ping")
except salt.exceptions.SaltClientError:
ret = None
if ret and "minions" not in ret:
continue
if ret and sorted(ret["minions"]) == sorted(self.minion_targets):
success[num] = True
continue
if all(success):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
@property
def clients(self):
"""
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
"""
if "runtime_clients" not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS["runtime_clients"] = OrderedDict()
runtime_clients = RUNTIME_VARS.RUNTIME_CONFIGS["runtime_clients"]
for mopts in self.master_targets:
if mopts["id"] in runtime_clients:
continue
runtime_clients[mopts["id"]] = salt.client.get_local_client(mopts=mopts)
return runtime_clients
@property
def client(self):
return self.clients["mm-master"]
@classmethod
def transplant_configs(cls, transport="zeromq"):
os.makedirs(RUNTIME_VARS.TMP_MM_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR)
print(
" * Transplanting multimaster configuration files to '{}'".format(
RUNTIME_VARS.TMP_CONF_DIR
)
)
tests_known_hosts_file = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "salt_ssh_known_hosts"
)
# Primary master in multimaster environment
master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "master")
)
master_opts.update(
salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_master")
)
)
master_opts["known_hosts_file"] = tests_known_hosts_file
master_opts["cachedir"] = "cache"
master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts["config_dir"] = RUNTIME_VARS.TMP_MM_CONF_DIR
master_opts["root_dir"] = os.path.join(TMP, "rootdir-multimaster")
master_opts["pki_dir"] = "pki"
file_tree = {
"root_dir": os.path.join(FILES, "pillar", "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
master_opts["ext_pillar"].append({"file_tree": file_tree})
# Secondary master in multimaster environment
sub_master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "master")
)
sub_master_opts.update(
salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_sub_master")
)
)
sub_master_opts["known_hosts_file"] = tests_known_hosts_file
sub_master_opts["cachedir"] = "cache"
sub_master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_master_opts["config_dir"] = RUNTIME_VARS.TMP_MM_SUB_CONF_DIR
sub_master_opts["root_dir"] = os.path.join(TMP, "rootdir-sub-multimaster")
sub_master_opts["pki_dir"] = "pki"
sub_master_opts["ext_pillar"].append({"file_tree": copy.deepcopy(file_tree)})
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(
real_prefix, "Scripts", "virtualenv.exe"
)
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to both masters
minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "minion")
)
minion_opts.update(
salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_minion")
)
)
minion_opts["cachedir"] = "cache"
minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts["config_dir"] = RUNTIME_VARS.TMP_MM_CONF_DIR
minion_opts["root_dir"] = os.path.join(TMP, "rootdir-multimaster")
minion_opts["pki_dir"] = "pki"
minion_opts["hosts.file"] = os.path.join(TMP, "rootdir", "hosts")
minion_opts["aliases.file"] = os.path.join(TMP, "rootdir", "aliases")
if virtualenv_binary:
minion_opts["venv_bin"] = virtualenv_binary
# This sub_minion also connects to both masters
sub_minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
)
sub_minion_opts.update(
salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_sub_minion")
)
)
sub_minion_opts["cachedir"] = "cache"
sub_minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts["config_dir"] = RUNTIME_VARS.TMP_MM_SUB_CONF_DIR
sub_minion_opts["root_dir"] = os.path.join(TMP, "rootdir-sub-multimaster")
sub_minion_opts["pki_dir"] = "pki"
sub_minion_opts["hosts.file"] = os.path.join(TMP, "rootdir", "hosts")
sub_minion_opts["aliases.file"] = os.path.join(TMP, "rootdir", "aliases")
if virtualenv_binary:
sub_minion_opts["venv_bin"] = virtualenv_binary
if transport == "raet":
master_opts["transport"] = "raet"
master_opts["raet_port"] = 64506
sub_master_opts["transport"] = "raet"
sub_master_opts["raet_port"] = 64556
minion_opts["transport"] = "raet"
minion_opts["raet_port"] = 64510
sub_minion_opts["transport"] = "raet"
sub_minion_opts["raet_port"] = 64520
# syndic_master_opts['transport'] = 'raet'
if transport == "tcp":
master_opts["transport"] = "tcp"
sub_master_opts["transport"] = "tcp"
minion_opts["transport"] = "tcp"
sub_minion_opts["transport"] = "tcp"
# Set up config options that require internal data
master_opts["pillar_roots"] = sub_master_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
minion_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
master_opts["file_roots"] = sub_master_opts["file_roots"] = {
"base": [
os.path.join(FILES, "file", "base"),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
minion_opts["file_roots"] = {
"base": [
os.path.join(FILES, "file", "base"),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
master_opts.setdefault("reactor", []).append(
{"salt/minion/*/start": [os.path.join(FILES, "reactor-sync-minion.sls")]}
)
for opts_dict in (master_opts, sub_master_opts):
if "ext_pillar" not in opts_dict:
opts_dict["ext_pillar"] = []
if salt.utils.platform.is_windows():
opts_dict["ext_pillar"].append(
{"cmd_yaml": "type {}".format(os.path.join(FILES, "ext.yaml"))}
)
else:
opts_dict["ext_pillar"].append(
{"cmd_yaml": "cat {}".format(os.path.join(FILES, "ext.yaml"))}
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
for opts_dict in (master_opts, sub_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(INTEGRATION_TEST_DIR, "files", "extension_modules"),
new_extension_modules_path,
)
opts_dict["extension_modules"] = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(
opts_dict["root_dir"], "autosign_file"
)
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, "files", "autosign_file"),
new_autosign_file_path,
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ("hosts", "aliases"):
optname = "{}.file".format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
sub_master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
sub_master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
sub_minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
for conf in (master_opts, sub_master_opts, minion_opts, sub_minion_opts):
if "engines" not in conf:
conf["engines"] = []
conf["engines"].append({"salt_runtests": {}})
if "engines_dirs" not in conf:
conf["engines_dirs"] = []
conf["engines_dirs"].insert(0, ENGINES_DIR)
if "log_handlers_dirs" not in conf:
conf["log_handlers_dirs"] = []
conf["log_handlers_dirs"].insert(0, LOG_HANDLERS_DIR)
conf["runtests_log_port"] = SALT_LOG_PORT
conf["runtests_log_level"] = (
os.environ.get("TESTS_MIN_LOG_LEVEL_NAME") or "debug"
)
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
computed_config = copy.deepcopy(master_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, "master"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
copy.deepcopy(master_opts), wfh, default_flow_style=False
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, "master"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
copy.deepcopy(sub_master_opts), wfh, default_flow_style=False
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
copy.deepcopy(minion_opts), wfh, default_flow_style=False
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
copy.deepcopy(sub_minion_opts), wfh, default_flow_style=False
)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, "master")
)
sub_master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, "master")
)
minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_MM_CONF_DIR, "minion")
)
sub_minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_MM_SUB_CONF_DIR, "minion")
)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_master"] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_sub_master"] = freeze(sub_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_minion"] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_sub_minion"] = freeze(sub_minion_opts)
verify_env(
[
os.path.join(master_opts["pki_dir"], "minions"),
os.path.join(master_opts["pki_dir"], "minions_pre"),
os.path.join(master_opts["pki_dir"], "minions_rejected"),
os.path.join(master_opts["pki_dir"], "minions_denied"),
os.path.join(master_opts["cachedir"], "jobs"),
os.path.join(master_opts["cachedir"], "raet"),
os.path.join(master_opts["root_dir"], "cache", "tokens"),
os.path.join(master_opts["pki_dir"], "accepted"),
os.path.join(master_opts["pki_dir"], "rejected"),
os.path.join(master_opts["pki_dir"], "pending"),
os.path.join(master_opts["cachedir"], "raet"),
os.path.join(sub_master_opts["pki_dir"], "minions"),
os.path.join(sub_master_opts["pki_dir"], "minions_pre"),
os.path.join(sub_master_opts["pki_dir"], "minions_rejected"),
os.path.join(sub_master_opts["pki_dir"], "minions_denied"),
os.path.join(sub_master_opts["cachedir"], "jobs"),
os.path.join(sub_master_opts["cachedir"], "raet"),
os.path.join(sub_master_opts["root_dir"], "cache", "tokens"),
os.path.join(sub_master_opts["pki_dir"], "accepted"),
os.path.join(sub_master_opts["pki_dir"], "rejected"),
os.path.join(sub_master_opts["pki_dir"], "pending"),
os.path.join(sub_master_opts["cachedir"], "raet"),
os.path.join(minion_opts["pki_dir"], "accepted"),
os.path.join(minion_opts["pki_dir"], "rejected"),
os.path.join(minion_opts["pki_dir"], "pending"),
os.path.join(minion_opts["cachedir"], "raet"),
os.path.join(sub_minion_opts["pki_dir"], "accepted"),
os.path.join(sub_minion_opts["pki_dir"], "rejected"),
os.path.join(sub_minion_opts["pki_dir"], "pending"),
os.path.join(sub_minion_opts["cachedir"], "raet"),
os.path.dirname(master_opts["log_file"]),
minion_opts["extension_modules"],
sub_minion_opts["extension_modules"],
sub_minion_opts["pki_dir"],
master_opts["sock_dir"],
sub_master_opts["sock_dir"],
sub_minion_opts["sock_dir"],
minion_opts["sock_dir"],
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts["root_dir"],
)
cls.mm_master_opts = master_opts
cls.mm_sub_master_opts = sub_master_opts
cls.mm_minion_opts = minion_opts
cls.mm_sub_minion_opts = sub_minion_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
@classmethod
def config_location(cls):
return (RUNTIME_VARS.TMP_MM_CONF_DIR, RUNTIME_VARS.TMP_MM_SUB_CONF_DIR)

View file

@ -1 +0,0 @@
# -*- coding: utf-8 -*-

View file

@ -1,155 +0,0 @@
import logging
import os
import shutil
import tempfile
import time
import salt.config
import salt.version
from tests.support.case import MultimasterModuleCase
from tests.support.helpers import slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.unit import skipIf
try:
import pyinotify # pylint: disable=unused-import
HAS_PYINOTIFY = True
except ImportError:
HAS_PYINOTIFY = False
log = logging.getLogger(__name__)
@skipIf(not HAS_PYINOTIFY, "pyinotify is not available")
@skipIf(
salt.utils.platform.is_freebsd(),
"Skip on FreeBSD, IN_CREATE event is not supported",
)
class TestBeaconsInotify(MultimasterModuleCase, AdaptedConfigurationTestCaseMixin):
"""
Validate the inotify beacon in multimaster environment
"""
def setUp(self):
self.tmpdir = salt.utils.stringutils.to_unicode(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.tmpdir, ignore_errors=True)
@slowTest
def test_beacons_duplicate_53344(self):
# Also add a status beacon to use it for interval checks
res = self.run_function(
"beacons.add",
("inotify", [{"files": {self.tmpdir: {"mask": ["create"]}}}]),
master_tgt="mm-master",
)
log.debug("Inotify beacon add returned: %s", res)
self.assertTrue(res.get("result"))
self.addCleanup(
self.run_function, "beacons.delete", ("inotify",), master_tgt="mm-master"
)
res = self.run_function(
"beacons.add", ("status", [{"time": ["all"]}]), master_tgt="mm-master",
)
log.debug("Status beacon add returned: %s", res)
self.assertTrue(res.get("result"))
self.addCleanup(
self.run_function, "beacons.delete", ("status",), master_tgt="mm-master"
)
# Ensure beacons are added.
res = self.run_function(
"beacons.list", (), return_yaml=False, master_tgt="mm-master",
)
log.debug("Beacons list: %s", res)
self.assertEqual(
{
"inotify": [{"files": {self.tmpdir: {"mask": ["create"]}}}],
"status": [{"time": ["all"]}],
},
res,
)
file_path = os.path.join(self.tmpdir, "tmpfile")
master_listener = salt.utils.event.get_master_event(
self.mm_master_opts, sock_dir=self.mm_master_opts["sock_dir"]
)
self.addCleanup(master_listener.destroy)
sub_master_listener = salt.utils.event.get_master_event(
self.mm_sub_master_opts, sock_dir=self.mm_sub_master_opts["sock_dir"]
)
self.addCleanup(sub_master_listener.destroy)
# We have to wait beacon first execution that would configure the inotify watch.
# Since beacons will be executed both together waiting for the first status beacon event
# which will mean the inotify beacon was executed too.
start = time.time()
stop_at = start + self.mm_minion_opts["loop_interval"] * 2 + 60
event = sub_event = None
while True:
if time.time() > stop_at:
break
if not event:
event = master_listener.get_event(
full=True,
wait=1,
tag="salt/beacon/mm-minion/status",
match_type="startswith",
)
if sub_event is None:
sub_event = sub_master_listener.get_event(
full=True,
wait=1,
tag="salt/beacon/mm-minion/status",
match_type="startswith",
)
if event and sub_event:
break
log.debug("Status events received: %s, %s", event, sub_event)
if not event or not sub_event:
self.fail("Failed to receive at least one of the status events")
with salt.utils.files.fopen(file_path, "w") as f:
pass
start = time.time()
# Now in successful case this test will get results at most in 2 loop intervals.
# Waiting for 2 loops intervals + some seconds to the hardware stupidity.
stop_at = start + self.mm_minion_opts["loop_interval"] * 3 + 60
event = sub_event = None
expected_tag = salt.utils.stringutils.to_str(
"salt/beacon/mm-minion/inotify/{}".format(self.tmpdir)
)
while True:
if time.time() > stop_at:
break
if not event:
event = master_listener.get_event(
full=True, wait=1, tag=expected_tag, match_type="startswith"
)
if sub_event is None:
sub_event = sub_master_listener.get_event(
full=True, wait=1, tag=expected_tag, match_type="startswith"
)
if event and sub_event:
break
log.debug("Inotify events received: %s, %s", event, sub_event)
if not event or not sub_event:
self.fail("Failed to receive at least one of the inotify events")
# We can't determine the timestamp so remove it from results
if event:
del event["data"]["_stamp"]
if sub_event:
del sub_event["data"]["_stamp"]
expected = {
"data": {"path": file_path, "change": "IN_CREATE", "id": "mm-minion"},
"tag": expected_tag,
}
# It's better to compare both at once to see both responses in the error log.
self.assertEqual((expected, expected), (event, sub_event))

View file

@ -1,215 +0,0 @@
"""
tests.multimaster.conftest
~~~~~~~~~~~~~~~~~~~~~~~~~~
Multimaster PyTest prep routines
"""
import logging
import os
import pathlib
import shutil
import pytest
import salt.utils.files
from salt.serializers import yaml
from salt.utils.immutabletypes import freeze
from tests.support.runtests import RUNTIME_VARS
log = logging.getLogger(__name__)
@pytest.fixture(scope="package")
def ext_pillar_file_tree():
pillar_file_tree = {
"root_dir": str(pathlib.Path(RUNTIME_VARS.PILLAR_DIR) / "base" / "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
return {"file_tree": pillar_file_tree}
@pytest.fixture(scope="package")
def salt_mm_master(request, salt_factories, ext_pillar_file_tree):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_master")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
master_id = "mm-master"
root_dir = salt_factories.get_root_dir_for_daemon(master_id)
config_defaults["root_dir"] = str(root_dir)
config_defaults["ext_pillar"] = [ext_pillar_file_tree]
config_defaults["open_mode"] = True
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
factory = salt_factories.get_salt_master_daemon(
master_id,
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started():
yield factory
@pytest.fixture(scope="package")
def salt_mm_sub_master(salt_factories, salt_mm_master, ext_pillar_file_tree):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_sub_master")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
master_id = "mm-sub-master"
root_dir = salt_factories.get_root_dir_for_daemon(master_id)
config_defaults["root_dir"] = str(root_dir)
config_defaults["ext_pillar"] = [ext_pillar_file_tree]
config_defaults["open_mode"] = True
config_defaults["transport"] = salt_mm_master.config["transport"]
config_overrides = {
"file_roots": {
"base": [
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE],
},
}
factory = salt_factories.get_salt_master_daemon(
master_id,
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started():
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion(salt_mm_master, salt_mm_sub_master):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_minion")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_mm_master.config["transport"]
mm_master_port = salt_mm_master.config["ret_port"]
mm_sub_master_port = salt_mm_sub_master.config["ret_port"]
config_overrides = {
"master": [
"localhost:{}".format(mm_master_port),
"localhost:{}".format(mm_sub_master_port),
],
"test.foo": "baz",
}
factory = salt_mm_master.get_salt_minion_daemon(
"mm-minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started():
yield factory
@pytest.fixture(scope="package")
def salt_mm_sub_minion(salt_mm_master, salt_mm_sub_master):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "mm_sub_minion")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_mm_master.config["transport"]
mm_master_port = salt_mm_master.config["ret_port"]
mm_sub_master_port = salt_mm_sub_master.config["ret_port"]
config_overrides = {
"master": [
"localhost:{}".format(mm_master_port),
"localhost:{}".format(mm_sub_master_port),
],
"test.foo": "baz",
}
factory = salt_mm_sub_master.get_salt_minion_daemon(
"mm-sub-minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started():
yield factory
@pytest.fixture(scope="package", autouse=True)
def bridge_pytest_and_runtests( # pylint: disable=function-redefined
reap_stray_processes,
base_env_state_tree_root_dir,
prod_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_pillar_tree_root_dir,
salt_mm_master,
salt_mm_sub_master,
salt_mm_minion,
salt_mm_sub_minion,
):
# Make sure unittest2 uses the pytest generated configuration
RUNTIME_VARS.RUNTIME_CONFIGS["mm_master"] = freeze(salt_mm_master.config)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_minion"] = freeze(salt_mm_minion.config)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_sub_master"] = freeze(salt_mm_sub_master.config)
RUNTIME_VARS.RUNTIME_CONFIGS["mm_sub_minion"] = freeze(salt_mm_sub_minion.config)
# Make sure unittest2 classes know their paths
RUNTIME_VARS.TMP_MM_CONF_DIR = os.path.dirname(salt_mm_master.config["conf_file"])
RUNTIME_VARS.TMP_MM_MINION_CONF_DIR = os.path.dirname(
salt_mm_minion.config["conf_file"]
)
RUNTIME_VARS.TMP_MM_SUB_CONF_DIR = os.path.dirname(
salt_mm_sub_master.config["conf_file"]
)
RUNTIME_VARS.TMP_MM_SUB_MINION_CONF_DIR = os.path.dirname(
salt_mm_sub_minion.config["conf_file"]
)

View file

@ -1 +0,0 @@
# -*- coding: utf-8 -*-

View file

@ -1,78 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import salt.modules.iptables
from tests.support.case import MultimasterModuleCase, MultiMasterTestShellCase
from tests.support.helpers import destructiveTest, skip_if_not_root, slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.unit import skipIf
HAS_IPTABLES = salt.modules.iptables.__virtual__()
if isinstance(HAS_IPTABLES, tuple):
HAS_IPTABLES = HAS_IPTABLES[0]
@destructiveTest
@skip_if_not_root
@skipIf(not HAS_IPTABLES, "iptables command is not available")
class TestHandleEvents(
MultimasterModuleCase, MultiMasterTestShellCase, AdaptedConfigurationTestCaseMixin
):
"""
Validate the events handling in multimaster environment
"""
@slowTest
def test_minion_hangs_on_master_failure_50814(self):
"""
Check minion handling events for the alive master when another master is dead.
The case being checked here is described in details in issue #50814.
"""
disconnect_master_rule = "-i lo -p tcp --dport {0} -j DROP".format(
self.mm_master_opts["ret_port"]
)
# Disconnect the master.
res = self.run_function(
"iptables.append",
("filter", "INPUT", disconnect_master_rule),
master_tgt="mm-sub-master",
)
# Workaround slow beacons.list_available response
if not res:
res = self.run_function(
"iptables.append",
("filter", "INPUT", disconnect_master_rule),
master_tgt="mm-sub-master",
)
self.assertTrue(res)
try:
# Send an event. This would return okay.
res = self.run_function(
"event.send", ("myco/foo/bar",), master_tgt="mm-sub-master",
)
self.assertTrue(res)
# Send one more event. Minion was hanging on this. This is fixed by #53417
res = self.run_function(
"event.send", ("myco/foo/bar",), master_tgt="mm-sub-master", timeout=60,
)
self.assertTrue(
res,
"Minion is not responding to the second master after the first "
"one has gone. Check #50814 for details.",
)
finally:
# Remove the firewall rule taking master online back.
# Since minion could be not responsive now use `salt-call --local` for this.
res = self.run_call(
"iptables.delete filter INPUT rule='{0}'".format(
disconnect_master_rule
),
local=True,
timeout=30,
)
self.assertEqual(res, ["local:"])
# Ensure the master is back.
res = self.run_function(
"event.send", ("myco/foo/bar",), master_tgt="mm-master",
)
self.assertTrue(res)

View file

@ -1 +0,0 @@
# -*- coding: utf-8 -*-

View file

@ -1,83 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import salt.config
import salt.version
from tests.support.case import MultimasterModuleCase
from tests.support.helpers import slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
class TestModuleTest(MultimasterModuleCase, AdaptedConfigurationTestCaseMixin):
"""
Validate the test module
"""
def test_ping(self):
"""
test.ping
"""
self.assertEqual(self.run_function_all_masters("test.ping"), [True] * 2)
def test_echo(self):
"""
test.echo
"""
self.assertEqual(
self.run_function_all_masters("test.echo", ["text"]), ["text"] * 2
)
def test_version(self):
"""
test.version
"""
self.assertEqual(
self.run_function_all_masters("test.version"),
[salt.version.__saltstack_version__.string] * 2,
)
def test_conf_test(self):
"""
test.conf_test
"""
self.assertEqual(self.run_function_all_masters("test.conf_test"), ["baz"] * 2)
def test_get_opts(self):
"""
test.get_opts
"""
opts = salt.config.minion_config(self.get_config_file_path("mm_minion"))
ret = self.run_function_all_masters("test.get_opts")
self.assertEqual(ret[0]["cachedir"], opts["cachedir"])
self.assertEqual(ret[1]["cachedir"], opts["cachedir"])
def test_cross_test(self):
"""
test.cross_test
"""
self.assertTrue(self.run_function_all_masters("test.cross_test", ["test.ping"]))
def test_fib(self):
"""
test.fib
"""
ret = self.run_function_all_masters("test.fib", ["20"])
self.assertEqual(ret[0][0], 6765)
self.assertEqual(ret[1][0], 6765)
@slowTest
def test_collatz(self):
"""
test.collatz
"""
ret = self.run_function_all_masters("test.collatz", ["40"])
self.assertEqual(ret[0][0][-1], 2)
self.assertEqual(ret[1][0][-1], 2)
def test_outputter(self):
"""
test.outputter
"""
self.assertEqual(
self.run_function_all_masters("test.outputter", ["text"]), ["text"] * 2
)

View file

@ -0,0 +1,201 @@
import logging
import shutil
import time
import pytest
import salt.config
import salt.version
from tests.support.helpers import slowTest
try:
import pyinotify # pylint: disable=unused-import
HAS_PYINOTIFY = True
except ImportError:
HAS_PYINOTIFY = False
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.skipif(HAS_PYINOTIFY is False, reason="pyinotify is not available"),
pytest.mark.skipif(
salt.utils.platform.is_freebsd(),
reason="Skip on FreeBSD, IN_CREATE event is not supported",
),
]
@pytest.fixture(scope="module")
def inotify_test_path(tmp_path_factory):
test_path = tmp_path_factory.mktemp("inotify-tests")
try:
yield test_path
finally:
shutil.rmtree(str(test_path), ignore_errors=True)
@pytest.fixture(scope="module")
def event_listener(salt_factories):
return salt_factories.event_listener
@pytest.fixture(scope="module")
def setup_beacons(mm_master_1_salt_cli, salt_mm_minion_1, inotify_test_path):
start_time = time.time()
try:
# Add a status beacon to use for interval checks
ret = mm_master_1_salt_cli.run(
"beacons.add",
"inotify",
beacon_data=[{"files": {str(inotify_test_path): {"mask": ["create"]}}}],
minion_tgt=salt_mm_minion_1.id,
)
assert ret.exitcode == 0
log.debug("Inotify beacon add returned: %s", ret.json or ret.stdout)
assert ret.json
assert ret.json["result"] is True
ret = mm_master_1_salt_cli.run(
"beacons.add",
"status",
beacon_data=[{"time": ["all"]}],
minion_tgt=salt_mm_minion_1.id,
)
assert ret.exitcode == 0
log.debug("Status beacon add returned: %s", ret.json or ret.stdout)
assert ret.json
assert ret.json["result"] is True
ret = mm_master_1_salt_cli.run(
"beacons.list", return_yaml=False, minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
log.debug("Beacons list: %s", ret.json or ret.stdout)
assert ret.json
assert "inotify" in ret.json
assert ret.json["inotify"] == [
{"files": {str(inotify_test_path): {"mask": ["create"]}}}
]
assert "status" in ret.json
assert ret.json["status"] == [{"time": ["all"]}]
yield start_time
finally:
# Remove the added beacons
for beacon in ("inotify", "status"):
mm_master_1_salt_cli.run(
"beacons.delete", beacon, minion_tgt=salt_mm_minion_1.id
)
@slowTest
def test_beacons_duplicate_53344(
event_listener,
inotify_test_path,
salt_mm_minion_1,
salt_mm_master_1,
salt_mm_master_2,
setup_beacons,
):
# We have to wait beacon first execution that would configure the inotify watch.
# Since beacons will be executed both together, we wait for the status beacon event
# which means that, the inotify becacon was executed too
start_time = setup_beacons
stop_time = start_time + salt_mm_minion_1.config["loop_interval"] * 2 + 60
mm_master_1_event = mm_master_2_event = None
expected_tag = "salt/beacon/{}/status/*".format(salt_mm_minion_1.id)
mm_master_1_event_pattern = (salt_mm_master_1.id, expected_tag)
mm_master_2_event_pattern = (salt_mm_master_2.id, expected_tag)
while True:
if time.time() > stop_time:
pytest.fail(
"Failed to receive at least one of the status events. "
"Master 1 Event: {}; Master 2 Event: {}".format(
mm_master_1_event, mm_master_2_event
)
)
if not mm_master_1_event:
events = event_listener.get_events(
[mm_master_1_event_pattern], after_time=start_time
)
for event in events:
mm_master_1_event = event
break
if not mm_master_2_event:
events = event_listener.get_events(
[mm_master_2_event_pattern], after_time=start_time
)
for event in events:
mm_master_2_event = event
break
if mm_master_1_event and mm_master_2_event:
# We got all events back
break
time.sleep(0.5)
log.debug("Status events received: %s, %s", mm_master_1_event, mm_master_2_event)
# Let's trigger an inotify event
start_time = time.time()
file_path = inotify_test_path / "tmpfile"
file_path.write_text("")
log.warning(
"Test file to trigger the inotify event has been written to: %s", file_path
)
stop_time = start_time + salt_mm_minion_1.config["loop_interval"] * 3 + 60
# Now in successful case this test will get results at most in 3 loop intervals.
# Waiting for 3 loops intervals + some seconds to the hardware stupidity.
mm_master_1_event = mm_master_2_event = None
expected_tag = "salt/beacon/{}/inotify/{}".format(
salt_mm_minion_1.id, inotify_test_path
)
mm_master_1_event_pattern = (salt_mm_master_1.id, expected_tag)
mm_master_2_event_pattern = (salt_mm_master_2.id, expected_tag)
while True:
if time.time() > stop_time:
pytest.fail(
"Failed to receive at least one of the inotify events. "
"Master 1 Event: {}; Master 2 Event: {}".format(
mm_master_1_event, mm_master_2_event
)
)
if not mm_master_1_event:
events = event_listener.get_events(
[mm_master_1_event_pattern], after_time=start_time
)
for event in events:
mm_master_1_event = event
break
if not mm_master_2_event:
events = event_listener.get_events(
[mm_master_2_event_pattern], after_time=start_time
)
for event in events:
mm_master_2_event = event
break
if mm_master_1_event and mm_master_2_event:
# We got all events back
break
time.sleep(0.5)
log.debug("Inotify events received: %s, %s", mm_master_1_event, mm_master_2_event)
# We can't determine the timestamp so remove it from results
for event in (mm_master_1_event, mm_master_2_event):
del event.data["_stamp"]
expected_data = {
"path": str(file_path),
"change": "IN_CREATE",
"id": salt_mm_minion_1.id,
}
# It's better to compare both at once to see both responses in the error log.
assert ((expected_tag, expected_data), (expected_tag, expected_data)) == (
(mm_master_1_event.tag, mm_master_1_event.data),
(mm_master_2_event.tag, mm_master_2_event.data),
)

View file

@ -0,0 +1,114 @@
import logging
import os
import shutil
import pytest
log = logging.getLogger(__name__)
@pytest.fixture(scope="package", autouse=True)
def skip_on_tcp_transport(request):
if request.config.getoption("--transport") == "tcp":
pytest.skip("Multimaster under the TPC transport is not working. See #59053")
@pytest.fixture(scope="package")
def salt_mm_master_1(request, salt_factories):
config_defaults = {
"open_mode": True,
"transport": request.config.getoption("--transport"),
}
factory = salt_factories.get_salt_master_daemon(
"mm-master-1",
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_1_salt_cli(salt_mm_master_1):
return salt_mm_master_1.get_salt_cli(default_timeout=120)
@pytest.fixture(scope="package")
def salt_mm_master_2(salt_factories, salt_mm_master_1):
config_defaults = {
"open_mode": True,
"transport": salt_mm_master_1.config["transport"],
}
factory = salt_factories.get_salt_master_daemon(
"mm-master-2",
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
# The secondary salt master depends on the primarily salt master fixture
# because we need to clone the keys
for keyfile in ("master.pem", "master.pub"):
shutil.copyfile(
os.path.join(salt_mm_master_1.config["pki_dir"], keyfile),
os.path.join(factory.config["pki_dir"], keyfile),
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def mm_master_2_salt_cli(salt_mm_master_2):
return salt_mm_master_2.get_salt_cli(default_timeout=120)
@pytest.fixture(scope="package")
def salt_mm_minion_1(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
config_overrides = {
"master": [
"localhost:{}".format(mm_master_1_port),
"localhost:{}".format(mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_1.get_salt_minion_daemon(
"mm-minion-1",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory
@pytest.fixture(scope="package")
def salt_mm_minion_2(salt_mm_master_1, salt_mm_master_2):
config_defaults = {
"transport": salt_mm_master_1.config["transport"],
}
mm_master_1_port = salt_mm_master_1.config["ret_port"]
mm_master_2_port = salt_mm_master_2.config["ret_port"]
config_overrides = {
"master": [
"localhost:{}".format(mm_master_1_port),
"localhost:{}".format(mm_master_2_port),
],
"test.foo": "baz",
}
factory = salt_mm_master_2.get_salt_minion_daemon(
"mm-minion-2",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
with factory.started(start_timeout=120):
yield factory

View file

@ -0,0 +1,225 @@
import salt.config
import salt.version
def test_ping(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.ping
"""
ret = mm_master_1_salt_cli.run("test.ping", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_2_salt_cli.run("test.ping", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_1_salt_cli.run("test.ping", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_2_salt_cli.run("test.ping", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json is True
def test_echo(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.echo
"""
ret = mm_master_1_salt_cli.run("test.echo", "text", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_2_salt_cli.run("test.echo", "text", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_1_salt_cli.run("test.echo", "text", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_2_salt_cli.run("test.echo", "text", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == "text"
def test_version(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.version
"""
ret = mm_master_1_salt_cli.run("test.version", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == salt.version.__saltstack_version__.string
ret = mm_master_2_salt_cli.run("test.version", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == salt.version.__saltstack_version__.string
ret = mm_master_1_salt_cli.run("test.version", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == salt.version.__saltstack_version__.string
ret = mm_master_2_salt_cli.run("test.version", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == salt.version.__saltstack_version__.string
def test_conf_test(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.conf_text
"""
ret = mm_master_1_salt_cli.run("test.conf_test", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == "baz"
ret = mm_master_2_salt_cli.run("test.conf_test", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json == "baz"
ret = mm_master_1_salt_cli.run("test.conf_test", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == "baz"
ret = mm_master_2_salt_cli.run("test.conf_test", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json == "baz"
def test_cross_test(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.cross_text
"""
ret = mm_master_1_salt_cli.run(
"test.cross_test", "test.ping", minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_2_salt_cli.run(
"test.cross_test", "test.ping", minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_1_salt_cli.run(
"test.cross_test", "test.ping", minion_tgt=salt_mm_minion_2.id
)
assert ret.exitcode == 0
assert ret.json is True
ret = mm_master_2_salt_cli.run(
"test.cross_test", "test.ping", minion_tgt=salt_mm_minion_2.id
)
assert ret.exitcode == 0
assert ret.json is True
def test_outputter(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.outputter
"""
ret = mm_master_1_salt_cli.run(
"test.outputter", "text", minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_2_salt_cli.run(
"test.outputter", "text", minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_1_salt_cli.run(
"test.outputter", "text", minion_tgt=salt_mm_minion_2.id
)
assert ret.exitcode == 0
assert ret.json == "text"
ret = mm_master_2_salt_cli.run(
"test.outputter", "text", minion_tgt=salt_mm_minion_2.id
)
assert ret.exitcode == 0
assert ret.json == "text"
def test_fib(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.fib
"""
ret = mm_master_1_salt_cli.run("test.fib", "20", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json[0] == 6765
ret = mm_master_2_salt_cli.run("test.fib", "20", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json[0] == 6765
ret = mm_master_1_salt_cli.run("test.fib", "20", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json[0] == 6765
ret = mm_master_2_salt_cli.run("test.fib", "20", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json[0] == 6765
def test_collatz(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.fib
"""
ret = mm_master_1_salt_cli.run("test.collatz", "40", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json[0][-1] == 2
ret = mm_master_2_salt_cli.run("test.collatz", "40", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json[0][-1] == 2
ret = mm_master_1_salt_cli.run("test.collatz", "40", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json[0][-1] == 2
ret = mm_master_2_salt_cli.run("test.collatz", "40", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json[0][-1] == 2
def test_get_opts(
mm_master_1_salt_cli, salt_mm_minion_1, mm_master_2_salt_cli, salt_mm_minion_2
):
"""
test.conf_text
"""
ret = mm_master_1_salt_cli.run("test.get_opts", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json["cachedir"] == salt_mm_minion_1.config["cachedir"]
ret = mm_master_2_salt_cli.run("test.get_opts", minion_tgt=salt_mm_minion_1.id)
assert ret.exitcode == 0
assert ret.json["cachedir"] == salt_mm_minion_1.config["cachedir"]
ret = mm_master_1_salt_cli.run("test.get_opts", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json["cachedir"] == salt_mm_minion_2.config["cachedir"]
ret = mm_master_2_salt_cli.run("test.get_opts", minion_tgt=salt_mm_minion_2.id)
assert ret.exitcode == 0
assert ret.json["cachedir"] == salt_mm_minion_2.config["cachedir"]

View file

@ -0,0 +1,113 @@
import time
import pytest
@pytest.fixture(scope="module")
def event_listener(salt_factories):
return salt_factories.event_listener
def test_minion_hangs_on_master_failure_50814(
event_listener,
salt_mm_master_1,
salt_mm_master_2,
salt_mm_minion_1,
mm_master_2_salt_cli,
):
"""
Check minion handling events for the alive master when another master is dead.
The case being checked here is described in details in issue #50814.
"""
# Let's make sure everything works with both masters online
event_count = 3
while event_count:
check_event_start_time = time.time()
stop_time = check_event_start_time + 30
event_tag = "myco/foo/bar/{}".format(event_count)
ret = mm_master_2_salt_cli.run(
"event.send", event_tag, minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json is True
# Let's make sure we get the event back
mm_master_1_event_match = mm_master_2_event_match = None
while True:
if time.time() > stop_time:
pytest.fail(
"Minion is not responding to the second master after the first "
"one has gone. Check #50814 for details."
)
if (
mm_master_1_event_match is not None
and mm_master_2_event_match is not None
):
# We got the right event back!
break
time.sleep(0.5)
if mm_master_1_event_match is None:
events = event_listener.get_events(
[(salt_mm_master_1.id, event_tag)],
after_time=check_event_start_time,
)
for event in events:
# We got the event back!
if event.tag == event_tag:
mm_master_1_event_match = True
break
if mm_master_2_event_match is None:
events = event_listener.get_events(
[(salt_mm_master_2.id, event_tag)],
after_time=check_event_start_time,
)
for event in events:
# We got the event back!
if event.tag == event_tag:
mm_master_2_event_match = True
break
event_count -= 1
time.sleep(0.5)
# Now, let's try this one of the masters offline
with salt_mm_master_1.stopped():
assert salt_mm_master_1.is_running() is False
# Sending one event would be okay. It would hang after the second with one of the masters offline
event_count = 1
while event_count <= 3:
check_event_start_time = time.time()
stop_time = check_event_start_time + 30
event_tag = "myco/foo/bar/{}".format(event_count)
ret = mm_master_2_salt_cli.run(
"event.send", event_tag, minion_tgt=salt_mm_minion_1.id
)
assert ret.exitcode == 0
assert ret.json is True
# Let's make sure we get the event back
event_match = None
while True:
if time.time() > stop_time:
pytest.fail(
"Minion is not responding to the second master(events sent: %s) after the first "
"has gone offline. Check #50814 for details.",
event_count,
)
if event_match is not None:
# We got the right event back!
break
time.sleep(0.5)
events = event_listener.get_events(
[(salt_mm_master_2.id, event_tag)],
after_time=check_event_start_time,
)
for event in events:
# We got the event back!
if event.tag == event_tag:
event_match = True
break
event_count += 1
time.sleep(0.5)

View file

@ -58,7 +58,6 @@ except ImportError as exc:
raise
from tests.integration import TestDaemon, TestDaemonStartFailed # isort:skip
from tests.multimaster import MultimasterTestDaemon # isort:skip
import salt.utils.platform # isort:skip
if not salt.utils.platform.is_windows():
@ -156,8 +155,8 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
suites -= {"proxy"}
if not include_kitchen:
suites -= {"kitchen"}
if not include_multimaster:
suites -= {"multimaster"}
# Multimaster tests now run under PyTest
suites -= {"multimaster"}
return suites
@ -570,7 +569,6 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
# Transplant configuration
TestDaemon.transplant_configs(transport=self.options.transport)
MultimasterTestDaemon.transplant_configs(transport=self.options.transport)
def post_execution_cleanup(self):
SaltCoverageTestingParser.post_execution_cleanup(self)
@ -671,68 +669,6 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
except TestDaemonStartFailed:
self.exit(status=2)
def start_multimaster_daemons_only(self):
if not salt.utils.platform.is_windows():
self.set_filehandle_limits("integration")
try:
print_header(
" * Setting up Salt daemons for interactive use",
top=False,
width=getattr(self.options, "output_columns", PNUM),
)
except TypeError:
print_header(" * Setting up Salt daemons for interactive use", top=False)
try:
with MultimasterTestDaemon(self):
print_header(" * Salt daemons started")
master_conf = MultimasterTestDaemon.config("mm_master")
sub_master_conf = MultimasterTestDaemon.config("mm_sub_master")
minion_conf = MultimasterTestDaemon.config("mm_minion")
sub_minion_conf = MultimasterTestDaemon.config("mm_sub_minion")
print_header(" * Master configuration values", top=True)
print("interface: {}".format(master_conf["interface"]))
print("publish port: {}".format(master_conf["publish_port"]))
print("return port: {}".format(master_conf["ret_port"]))
print("\n")
print_header(" * Second master configuration values", top=True)
print("interface: {}".format(sub_master_conf["interface"]))
print("publish port: {}".format(sub_master_conf["publish_port"]))
print("return port: {}".format(sub_master_conf["ret_port"]))
print("\n")
print_header(" * Minion configuration values", top=True)
print("interface: {}".format(minion_conf["interface"]))
print("masters: {}".format(", ".join(minion_conf["master"])))
if minion_conf["ipc_mode"] == "tcp":
print("tcp pub port: {}".format(minion_conf["tcp_pub_port"]))
print("tcp pull port: {}".format(minion_conf["tcp_pull_port"]))
print("\n")
print_header(" * Sub Minion configuration values", top=True)
print("interface: {}".format(sub_minion_conf["interface"]))
print("masters: {}".format(", ".join(sub_minion_conf["master"])))
if sub_minion_conf["ipc_mode"] == "tcp":
print("tcp pub port: {}".format(sub_minion_conf["tcp_pub_port"]))
print("tcp pull port: {}".format(sub_minion_conf["tcp_pull_port"]))
print("\n")
print_header(
" Your client configurations are at {}".format(
", ".join(MultimasterTestDaemon.config_location())
)
)
print("To access minions from different masters use:")
for location in MultimasterTestDaemon.config_location():
print(" salt -c {} minion test.ping".format(location))
while True:
time.sleep(1)
except TestDaemonStartFailed:
self.exit(status=2)
def set_filehandle_limits(self, limits="integration"):
"""
Set soft and hard limits on open file handles at required thresholds
@ -911,73 +847,6 @@ class SaltTestsuiteParser(SaltCoverageTestingParser):
except TestDaemonStartFailed:
self.exit(status=2)
def run_multimaster_tests(self):
"""
Execute the multimaster tests suite
"""
named_tests = []
named_unit_test = []
if self.options.name:
for test in self.options.name:
if test.startswith(("tests.multimaster.", "multimaster.")):
named_tests.append(test)
# TODO: check 'from_filenames'
if not self.options.multimaster and not named_tests:
# We're not running any multimaster test suites.
return [True]
if not salt.utils.platform.is_windows():
self.set_filehandle_limits("integration")
try:
print_header(
" * Setting up multimaster Salt daemons to execute tests",
top=False,
width=getattr(self.options, "output_columns", PNUM),
)
except TypeError:
print_header(
" * Setting up multimaster Salt daemons to execute tests", top=False
)
status = []
try:
with MultimasterTestDaemon(self):
if self.options.name:
for name in self.options.name:
name = name.strip()
if not name:
continue
if os.path.isfile(name):
if not name.endswith(".py"):
continue
if not name.startswith(
os.path.join("tests", "multimaster")
):
continue
results = self.run_suite(
os.path.dirname(name),
name,
suffix=os.path.basename(name),
load_from_name=False,
)
status.append(results)
continue
if not name.startswith(("tests.multimaster.", "multimaster.")):
continue
results = self.run_suite(
"", name, suffix="test_*.py", load_from_name=True
)
status.append(results)
return status
status.append(self.run_integration_suite(**TEST_SUITES["multimaster"]))
return status
except TestDaemonStartFailed:
self.exit(status=2)
def run_unit_tests(self):
"""
Execute the unit tests
@ -1082,13 +951,15 @@ def main(**kwargs):
overall_status = []
if parser.options.interactive:
if parser.options.multimaster:
parser.start_multimaster_daemons_only()
print(
"Multimaster tests now run under PyTest",
file=sys.stderr,
flush=True,
)
else:
parser.start_daemons_only()
status = parser.run_integration_tests()
overall_status.extend(status)
status = parser.run_multimaster_tests()
overall_status.extend(status)
status = parser.run_unit_tests()
overall_status.extend(status)
status = parser.run_kitchen_tests()

View file

@ -31,7 +31,6 @@ from tests.support.helpers import SKIP_IF_NOT_RUNNING_PYTEST, RedirectStdStreams
from tests.support.mixins import ( # pylint: disable=unused-import
AdaptedConfigurationTestCaseMixin,
SaltClientTestCaseMixin,
SaltMultimasterClientTestCaseMixin,
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
@ -550,17 +549,6 @@ class ShellCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
pass
class MultiMasterTestShellCase(ShellCase):
"""
'''
Execute a test for a shell command when running multi-master tests
"""
@property
def config_dir(self):
return RUNTIME_VARS.TMP_MM_CONF_DIR
class SPMTestUserInterface:
"""
Test user interface to SPMClient
@ -828,89 +816,6 @@ class ModuleCase(TestCase, SaltClientTestCaseMixin):
return ret
class MultimasterModuleCase(ModuleCase, SaltMultimasterClientTestCaseMixin):
"""
Execute a module function
"""
def run_function(
self,
function,
arg=(),
minion_tgt="mm-minion",
timeout=300,
master_tgt="mm-master",
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if minion_tgt == "mm-sub-minion":
known_to_return_none += ("mine.update",)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
if master_tgt is None:
client = self.clients["mm-master"]
elif isinstance(master_tgt, int):
client = self.clients[list(self.clients)[master_tgt]]
else:
client = self.clients[master_tgt]
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_function_all_masters(
self, function, arg=(), minion_tgt="mm-minion", timeout=300, **kwargs
):
"""
Run a single salt function from all the masters in multimaster environment
and condition the return down to match the behavior of the raw function call
"""
ret = []
for master_id in self.clients:
ret.append(
self.run_function(
function,
arg=arg,
minion_tgt=minion_tgt,
timeout=timeout,
master_tgt=master_id,
**kwargs
)
)
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a syndic based execution test

View file

@ -40,11 +40,6 @@ from tests.support.paths import CODE_DIR
from tests.support.pytest.loader import LoaderModuleMock
from tests.support.runtests import RUNTIME_VARS
try:
from salt.utils.odict import OrderedDict
except ImportError:
from collections import OrderedDict
log = logging.getLogger(__name__)
@ -255,27 +250,6 @@ class AdaptedConfigurationTestCaseMixin:
"""
return self.get_config("sub_minion")
@property
def mm_master_opts(self):
"""
Return the options used for the multimaster master
"""
return self.get_config("mm_master")
@property
def mm_sub_master_opts(self):
"""
Return the options used for the multimaster sub-master
"""
return self.get_config("mm_sub_master")
@property
def mm_minion_opts(self):
"""
Return the options used for the minion
"""
return self.get_config("mm_minion")
class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
"""
@ -320,52 +294,6 @@ class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
return RUNTIME_VARS.RUNTIME_CONFIGS["runtime_client"]
class SaltMultimasterClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
"""
Mix-in class that provides a ``clients`` attribute which returns a list of Salt
:class:`LocalClient<salt:salt.client.LocalClient>`.
.. code-block:: python
class LocalClientTestCase(TestCase, SaltMultimasterClientTestCaseMixin):
def test_check_pub_data(self):
just_minions = {'minions': ['m1', 'm2']}
jid_no_minions = {'jid': '1234', 'minions': []}
valid_pub_data = {'minions': ['m1', 'm2'], 'jid': '1234'}
for client in self.clients:
self.assertRaises(EauthAuthenticationError,
client._check_pub_data, None)
self.assertDictEqual({},
client._check_pub_data(just_minions),
'Did not handle lack of jid correctly')
self.assertDictEqual(
{},
client._check_pub_data({'jid': '0'}),
'Passing JID of zero is not handled gracefully')
"""
_salt_client_config_file_name_ = "master"
@property
def clients(self):
# Late import
import salt.client
if "runtime_clients" not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS["runtime_clients"] = OrderedDict()
runtime_clients = RUNTIME_VARS.RUNTIME_CONFIGS["runtime_clients"]
for master_id in ("mm-master", "mm-sub-master"):
if master_id in runtime_clients:
continue
mopts = self.get_config(master_id.replace("-", "_"), from_scratch=True)
runtime_clients[master_id] = salt.client.get_local_client(mopts=mopts)
return runtime_clients
class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin):
_call_binary_expected_version_ = salt.version.__version__

View file

@ -35,7 +35,6 @@ if sys.platform.startswith("win"):
CODE_DIR = CODE_DIR.replace("\\", "\\\\")
UNIT_TEST_DIR = os.path.join(TESTS_DIR, "unit")
INTEGRATION_TEST_DIR = os.path.join(TESTS_DIR, "integration")
MULTIMASTER_TEST_DIR = os.path.join(TESTS_DIR, "multimaster")
# Let's inject CODE_DIR so salt is importable if not there already
if TESTS_DIR in sys.path:
@ -73,10 +72,6 @@ TMP_CONF_DIR = TMP_MINION_CONF_DIR = os.path.join(TMP, "config")
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "sub-minion")
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-minion")
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-master")
TMP_MM_CONF_DIR = TMP_MM_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "multimaster")
TMP_MM_SUB_CONF_DIR = TMP_MM_SUB_MINION_CONF_DIR = os.path.join(
TMP_CONF_DIR, "sub-multimaster"
)
TMP_PROXY_CONF_DIR = TMP_CONF_DIR
TMP_SSH_CONF_DIR = TMP_MINION_CONF_DIR
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, "files", "conf")
@ -91,7 +86,7 @@ def list_test_mods():
A generator which returns all of the test files
"""
test_re = re.compile(r"^test_.+\.py$")
for dirname in (UNIT_TEST_DIR, INTEGRATION_TEST_DIR, MULTIMASTER_TEST_DIR):
for dirname in (UNIT_TEST_DIR, INTEGRATION_TEST_DIR):
test_type = os.path.basename(dirname)
for root, _, files in salt.utils.path.os_walk(dirname):
parent_mod = root[len(dirname) :].lstrip(os.sep).replace(os.sep, ".")

View file

@ -185,10 +185,6 @@ RUNTIME_VARS = RuntimeVars(
TMP_SYNDIC_MASTER_CONF_DIR=paths.TMP_SYNDIC_MASTER_CONF_DIR,
TMP_SYNDIC_MINION_CONF_DIR=paths.TMP_SYNDIC_MINION_CONF_DIR,
TMP_PROXY_CONF_DIR=paths.TMP_PROXY_CONF_DIR,
TMP_MM_CONF_DIR=paths.TMP_MM_CONF_DIR,
TMP_MM_MINION_CONF_DIR=paths.TMP_MM_MINION_CONF_DIR,
TMP_MM_SUB_CONF_DIR=paths.TMP_MM_SUB_CONF_DIR,
TMP_MM_SUB_MINION_CONF_DIR=paths.TMP_MM_SUB_CONF_DIR,
TMP_SSH_CONF_DIR=paths.TMP_SSH_CONF_DIR,
TMP_SCRIPT_DIR=paths.TMP_SCRIPT_DIR,
TMP_STATE_TREE=paths.TMP_STATE_TREE,

View file

@ -180,7 +180,6 @@ class BadTestModuleNamesTestCase(TestCase):
"integration.states.test_match",
"integration.states.test_renderers",
"integration.wheel.test_client",
"multimaster.minion.test_event",
"unit.cache.test_cache",
"unit.serializers.test_serializers",
"unit.setup.test_install",