Merge pull request #63428 from dwoz/issue/master/62618

Syndics honor MoM acl
This commit is contained in:
Gareth J. Greenaway 2023-01-12 15:07:01 -08:00 committed by GitHub
commit 64fd0cdcb2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 932 additions and 30 deletions

1
changelog/62618.fixed Normal file
View file

@ -0,0 +1 @@
Fixed syndic eauth. Now jobs will be published when a valid eauth user is targeting allowed minions/functions.

1
changelog/62933.fixed Normal file
View file

@ -0,0 +1 @@
Restored channel for Syndic minions to send job returns to the Salt master.

View file

@ -150,3 +150,13 @@ Run the following on the Salt minion:
.. _salt-users: https://groups.google.com/forum/#!forum/salt-users
.. _salt-announce: https://groups.google.com/forum/#!forum/salt-announce
Hardening of syndic setups
==========================
Syndics must be run as the same user as their syndic master process. The master
of master's will include publisher ACL information in jobs sent to downstream
masters via syndics. This means that any minions connected directly to a master
of masters will also receive ACL information in jobs being published. For the
most secure setup, only connect syndics directly to master of masters.

View file

@ -21,14 +21,6 @@ node and the local ``salt-master`` daemon. This gives the Master node control
over the Minion nodes attached to the ``salt-master`` daemon running on the
Syndic node.
.. warning::
Salt does not officially support Syndic and :ref:`external auth or
publisher_acl<acl-eauth>`. It's possible that it might work under certain
circumstances, but comprehensive support is lacking. See `issue #62618 on
GitHub <https://github.com/saltstack/salt/issues/62618>`_ for more
information. Currently Syndic is only expected to work when running Salt as
root, though work is scheduled to fix this in Salt 3006 (Sulfur).
Configuring the Syndic
======================
@ -71,6 +63,10 @@ The :conf_master:`order_masters` option configures the Master node to send
extra information with its publications that is needed by Syndic nodes
connected directly to it.
.. warning::
The syndic process must be run as the same user as the syndic master.
.. note::
Each Syndic must provide its own ``file_roots`` directory. Files will not

View file

@ -823,7 +823,6 @@ class LocalClient:
listen=True,
**kwargs
)
if not self.pub_data:
yield self.pub_data
else:
@ -1191,6 +1190,13 @@ class LocalClient:
# if we got None, then there were no events
if raw is None:
break
if "error" in raw.get("data", {}):
yield {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
if "minions" in raw.get("data", {}):
minions.update(raw["data"]["minions"])
if "missing" in raw.get("data", {}):
@ -1707,6 +1713,8 @@ class LocalClient:
"retcode": salt.defaults.exitcodes.EX_GENERIC,
}
}
elif "error" in min_ret:
raise AuthorizationError("Authorization error occurred")
else:
yield {id_: min_ret}
@ -1825,11 +1833,7 @@ class LocalClient:
if kwargs:
payload_kwargs["kwargs"] = kwargs
# If we have a salt user, add it to the payload
if self.opts["syndic_master"] and "user" in kwargs:
payload_kwargs["user"] = kwargs["user"]
elif self.salt_user:
payload_kwargs["user"] = self.salt_user
payload_kwargs["user"] = self.salt_user
# If we're a syndication master, pass the timeout
if self.opts["order_masters"]:

View file

@ -2167,7 +2167,8 @@ class ClearFuncs(TransportMethods):
}
# Retrieve the minions list
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
delimiter = extra.get("delimiter", DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter
)
@ -2175,6 +2176,8 @@ class ClearFuncs(TransportMethods):
missing = _res.get("missing", list())
ssh_minions = _res.get("ssh_minions", False)
auth_key = clear_load.get("key", None)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == "user":
@ -2184,20 +2187,36 @@ class ClearFuncs(TransportMethods):
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get("auth_list", [])
# Setup authorization list
syndic_auth_list = None
if "auth_list" in extra:
syndic_auth_list = extra.pop("auth_list", [])
# An auth_list was provided by the syndic and we're running as the same
# user as the salt master process.
if (
syndic_auth_list is not None
and auth_key == key[self.opts.get("user", "root")]
):
auth_list = syndic_auth_list
else:
auth_list = auth_check.get("auth_list", [])
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type)
if auth_check.get("error"):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {
err = {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
if "jid" in clear_load:
self.event.fire_event(
{**clear_load, **err}, tagify([clear_load["jid"], "error"], "job")
)
return err
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != "user" or (auth_type == "user" and auth_list):
# Authorize the request
@ -2226,12 +2245,18 @@ class ClearFuncs(TransportMethods):
extra["username"],
)
log.warning(err_msg)
return {
err = {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
if "jid" in clear_load:
self.event.fire_event(
{**clear_load, **err},
tagify([clear_load["jid"], "error"], "job"),
)
return err
# Perform some specific auth_type tasks after the authorization check
if auth_type == "token":
@ -2264,6 +2289,9 @@ class ClearFuncs(TransportMethods):
return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
if self.opts.get("order_masters"):
payload["auth_list"] = auth_list
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)

View file

@ -3278,7 +3278,8 @@ class Syndic(Minion):
# Set up default tgt_type
if "tgt_type" not in data:
data["tgt_type"] = "glob"
kwargs = {}
kwargs = {"auth_list": data.pop("auth_list", [])}
# optionally add a few fields to the publish data
for field in (
@ -3306,6 +3307,32 @@ class Syndic(Minion):
**kwargs
)
def _send_req_sync(self, load, timeout):
if self.opts["minion_sign_messages"]:
log.trace("Signing event to be published onto the bus.")
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
sig = salt.crypt.sign_message(
minion_privkey_path, salt.serializers.msgpack.serialize(load)
)
load["sig"] = sig
return self.req_channel.send(
load, timeout=timeout, tries=self.opts["return_retry_tries"]
)
@salt.ext.tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts["minion_sign_messages"]:
log.trace("Signing event to be published onto the bus.")
minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem")
sig = salt.crypt.sign_message(
minion_privkey_path, salt.serializers.msgpack.serialize(load)
)
load["sig"] = sig
ret = yield self.async_req_channel.send(
load, timeout=timeout, tries=self.opts["return_retry_tries"]
)
return ret
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts["enable_legacy_startup_events"]:
@ -3335,6 +3362,8 @@ class Syndic(Minion):
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
self.req_channel = salt.channel.client.ReqChannel.factory(self.opts)
self.async_req_channel = salt.channel.client.ReqChannel.factory(self.opts)
def _process_cmd_socket(self, payload):
if payload is not None and payload["enc"] == "aes":

View file

@ -748,16 +748,15 @@ class CkMinions:
"""
v_minions = set(self.check_minions(valid, "compound").get("minions", []))
if not v_minions:
# There are no valid minions, so it doesn't matter what we are
# targeting - this is a fail.
return False
if minions is None:
_res = self.check_minions(expr, tgt_type)
minions = set(_res["minions"])
else:
minions = set(minions)
return minions.issubset(v_minions)
d_bool = not bool(minions.difference(v_minions))
if len(v_minions) == len(minions) and d_bool:
return True
return d_bool
def match_check(self, regex, fun):
"""

View file

@ -0,0 +1,813 @@
import json
import pathlib
import tempfile
import time
import pytest
docker = pytest.importorskip("docker")
def json_output_to_dict(output):
"""
Convert ``salt ... --out=json`` Syndic return to a dictionary. Since the
--out=json will return several JSON outputs, e.g. {...}\\n{...}, we have to
parse that output individually.
"""
output = output or ""
results = {}
for line in (
_ for _ in output.replace("\n}", "\n}\x1f").split("\x1f") if _.strip()
):
data = json.loads(line)
if isinstance(data, dict):
for minion in data:
# Filter out syndic minions since they won't show up in the future
if minion not in ("syndic_a", "syndic_b"):
results[minion] = data[minion]
return results
def accept_keys(container, required_minions):
failure_time = time.time() + 20
while time.time() < failure_time:
container.run("salt-key -Ay")
res = container.run("salt-key -L --out=json")
if (
isinstance(res.data, dict)
and set(res.data.get("minions")) == required_minions
):
break
else:
pytest.skip(f"{container} unable to accept keys for {required_minions}")
@pytest.fixture(scope="module")
def syndic_network():
try:
client = docker.from_env()
except docker.errors.DockerException as e:
# Docker failed, it's gonna be an environment issue, let's just skip
pytest.skip(f"Docker failed with error {e}")
pool = docker.types.IPAMPool(
subnet="172.27.13.0/24",
gateway="172.27.13.1",
)
ipam_config = docker.types.IPAMConfig(
pool_configs=[pool],
)
network = None
try:
network = client.networks.create(name="syndic_test_net", ipam=ipam_config)
yield network.name
finally:
if network is not None:
network.remove()
@pytest.fixture(scope="session")
def source_path():
x = pathlib.Path(__file__).parent.parent.parent.parent.parent / "salt"
return str(x)
@pytest.fixture(scope="module")
def config(source_path):
# 3.10>= will allow the below line
# with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path:
with tempfile.TemporaryDirectory() as tmp_path:
tmp_path = pathlib.Path(tmp_path)
master_dir = tmp_path / "master"
minion_dir = tmp_path / "minion"
syndic_a_dir = tmp_path / "syndic_a"
syndic_b_dir = tmp_path / "syndic_b"
minion_a1_dir = tmp_path / "minion_a1"
minion_a2_dir = tmp_path / "minion_a2"
minion_b1_dir = tmp_path / "minion_b1"
minion_b2_dir = tmp_path / "minion_b2"
for dir_ in (
master_dir,
minion_dir,
syndic_a_dir,
syndic_b_dir,
minion_a1_dir,
minion_a2_dir,
minion_b1_dir,
minion_b2_dir,
):
dir_.mkdir(parents=True, exist_ok=True)
(dir_ / "master.d").mkdir(exist_ok=True)
# minion.d is probably needed to prevent errors on tempdir cleanup
(dir_ / "minion.d").mkdir(exist_ok=True)
(dir_ / "pki").mkdir(exist_ok=True)
(master_dir / "master.d").mkdir(exist_ok=True)
master_config_path = master_dir / "master"
master_config_path.write_text(
"""
order_masters: True
publisher_acl:
bob:
- '*1':
- test.*
- file.touch
external_auth:
pam:
bob:
- '*1':
- test.*
- file.touch
nodegroups:
second_string: "minion_*2"
b_string: "minion_b*"
"""
)
minion_config_path = minion_dir / "minion"
minion_config_path.write_text("id: minion\nmaster: master")
syndic_a_minion_config_path = syndic_a_dir / "minion"
syndic_a_minion_config_path.write_text("id: syndic_a\nmaster: master")
syndic_a_master_config_path = syndic_a_dir / "master"
syndic_a_master_config_path.write_text(
"""
syndic_master: master
publisher_acl:
bob:
- '*1':
- test.*
- file.touch
external_auth:
pam:
bob:
- '*1':
- test.*
- file.touch
"""
)
minion_a1_config_path = minion_a1_dir / "minion"
minion_a1_config_path.write_text("id: minion_a1\nmaster: syndic_a")
minion_a2_config_path = minion_a2_dir / "minion"
minion_a2_config_path.write_text("id: minion_a2\nmaster: syndic_a")
syndic_b_minion_config_path = syndic_b_dir / "minion"
syndic_b_minion_config_path.write_text("id: syndic_b\nmaster: master")
syndic_b_master_config_path = syndic_b_dir / "master"
syndic_b_master_config_path.write_text("syndic_master: master")
minion_b1_config_path = minion_b1_dir / "minion"
minion_b1_config_path.write_text("id: minion_b1\nmaster: syndic_b")
minion_b2_config_path = minion_b2_dir / "minion"
minion_b2_config_path.write_text("id: minion_b2\nmaster: syndic_b")
yield {
"minion_dir": minion_dir,
"master_dir": master_dir,
"syndic_a_dir": syndic_a_dir,
"syndic_b_dir": syndic_b_dir,
"minion_a1_dir": minion_a1_dir,
"minion_a2_dir": minion_a2_dir,
"minion_b1_dir": minion_b1_dir,
"minion_b2_dir": minion_b2_dir,
}
@pytest.fixture(scope="module")
def docker_master(salt_factories, syndic_network, config, source_path):
config_dir = str(config["master_dir"])
container = salt_factories.get_container(
"master",
image_name="saltstack/salt:3005",
container_run_kwargs={
# "entrypoint": "salt-master -ldebug",
"entrypoint": "python -m http.server",
"network": syndic_network,
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
for user in ("bob", "fnord"):
container.run(f"adduser -D {user}")
container.run(f"passwd -d {user}")
container.run("apk add linux-pam-dev")
yield factory
@pytest.fixture(scope="module")
def docker_minion(salt_factories, syndic_network, config, source_path):
config_dir = str(config["minion_dir"])
container = salt_factories.get_container(
"minion",
image_name="saltstack/salt:3005",
container_run_kwargs={
# "entrypoint": "salt-minion",
"entrypoint": "python -m http.server",
"network": syndic_network,
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_syndic_a(salt_factories, config, syndic_network, source_path):
config_dir = str(config["syndic_a_dir"])
container = salt_factories.get_container(
"syndic_a",
image_name="saltstack/salt:3005",
container_run_kwargs={
# "entrypoint": "salt-master -ldebug",
"entrypoint": "python -m http.server",
"network": syndic_network,
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_syndic_b(salt_factories, config, syndic_network, source_path):
config_dir = str(config["syndic_b_dir"])
container = salt_factories.get_container(
"syndic_b",
image_name="saltstack/salt:3005",
container_run_kwargs={
# "entrypoint": "salt-master -ldebug",
"entrypoint": "python -m http.server",
"network": syndic_network,
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_minion_a1(salt_factories, config, syndic_network, source_path):
config_dir = str(config["minion_a1_dir"])
container = salt_factories.get_container(
"minion_a1",
image_name="saltstack/salt:3005",
container_run_kwargs={
"network": syndic_network,
# "entrypoint": "salt-minion -ldebug",
"entrypoint": "python -m http.server",
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_minion_a2(salt_factories, config, syndic_network, source_path):
config_dir = str(config["minion_a2_dir"])
container = salt_factories.get_container(
"minion_a2",
image_name="saltstack/salt:3005",
container_run_kwargs={
"network": syndic_network,
# "entrypoint": "salt-minion",
"entrypoint": "python -m http.server",
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_minion_b1(salt_factories, config, syndic_network, source_path):
config_dir = str(config["minion_b1_dir"])
container = salt_factories.get_container(
"minion_b1",
image_name="saltstack/salt:3005",
container_run_kwargs={
"network": syndic_network,
# "entrypoint": "salt-minion",
"entrypoint": "python -m http.server",
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def docker_minion_b2(salt_factories, config, syndic_network, source_path):
config_dir = str(config["minion_b2_dir"])
container = salt_factories.get_container(
"minion_b2",
image_name="saltstack/salt:3005",
container_run_kwargs={
"network": syndic_network,
# "entrypoint": "salt-minion",
"entrypoint": "python -m http.server",
"volumes": {
config_dir: {"bind": "/etc/salt", "mode": "z"},
source_path: {
"bind": "/usr/local/lib/python3.7/site-packages/salt/",
"mode": "z",
},
},
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
# container.container_start_check(confirm_container_started, container)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module", autouse=True)
def all_the_docker(
docker_master,
docker_minion,
docker_syndic_a,
docker_syndic_b,
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
):
try:
for s in (
docker_master,
docker_syndic_a,
docker_syndic_b,
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
docker_minion,
):
s.run("python3 -m pip install looseversion packaging")
# WORKAROUND
for s in (docker_master, docker_syndic_a, docker_syndic_b):
s.run("salt-master -d -ldebug")
for s in (
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
docker_minion,
):
s.run("salt-minion -d")
# END WORKAROUND
for s in (docker_syndic_a, docker_syndic_b):
s.run("salt-syndic -d")
failure_time = time.time() + 20
accept_keys(
container=docker_master, required_minions={"minion", "syndic_a", "syndic_b"}
)
accept_keys(
container=docker_syndic_a, required_minions={"minion_a1", "minion_a2"}
)
accept_keys(
container=docker_syndic_b, required_minions={"minion_b1", "minion_b2"}
)
for tries in range(30):
res = docker_master.run(r"salt \* test.ping -t20 --out=json")
results = json_output_to_dict(res.stdout)
if set(results).issuperset(
["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"]
):
break
else:
pytest.skip(f"Missing some minions: {sorted(results)}")
yield
finally:
# looks like we need to do this to actually let the test suite run as non-root.
for container in (
docker_minion,
docker_syndic_a,
docker_syndic_b,
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
):
try:
container.run("rm -rfv /etc/salt/")
# If you need to debug this ^^^^^^^
# use this vvvvvv
# res = container.run('rm -rfv /etc/salt/')
# print(container)
# print(res.stdout)
except docker.errors.APIError as e:
# if the container isn't running, there's not thing we can do
# at this point.
print(f"Docker failed removing /etc/salt: {e}")
@pytest.fixture(
params=[
("*", ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"]),
("minion", ["minion"]),
("minion_*", ["minion_a1", "minion_a2", "minion_b1", "minion_b2"]),
("minion_a*", ["minion_a1", "minion_a2"]),
("minion_b*", ["minion_b1", "minion_b2"]),
("*1", ["minion_a1", "minion_b1"]),
("*2", ["minion_a2", "minion_b2"]),
]
)
def all_the_minions(request):
yield request.param
@pytest.fixture(
params=[
("minion_a1", ["minion_a1"]),
("minion_b1", ["minion_b1"]),
("*1", ["minion_a1", "minion_b1"]),
("minion*1", ["minion_a1", "minion_b1"]),
]
)
def eauth_valid_minions(request):
yield request.param
@pytest.fixture(
params=[
"*",
"minion",
"minion_a2",
"minion_b2",
"syndic_a",
"syndic_b",
"*2",
"minion*",
"minion_a*",
"minion_b*",
]
)
def eauth_blocked_minions(request):
yield request.param
@pytest.fixture
def docker_minions(
docker_minion,
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
):
yield [
docker_minion,
docker_minion_a1,
docker_minion_a2,
docker_minion_b1,
docker_minion_b2,
]
@pytest.fixture(
params=[
"test.arg good_argument",
"test.arg bad_news",
"test.arg not_allowed",
"test.echo very_not_good",
"cmd.run 'touch /tmp/fun.txt'",
"file.touch /tmp/more_fun.txt",
"test.arg_repr this_is_whatever",
"test.arg_repr more whatever",
"test.arg_repr cool guy",
]
)
def all_the_commands(request):
yield request.param
@pytest.fixture(
params=[
"test.arg",
"test.echo",
]
)
def eauth_valid_commands(request):
yield request.param
@pytest.fixture(
params=[
"cmd.run",
"file.manage_file",
"test.arg_repr",
]
)
def eauth_invalid_commands(request):
yield request.param
@pytest.fixture(
params=[
"good_argument",
"good_things",
"good_super_awesome_stuff",
]
)
def eauth_valid_arguments(request):
# TODO: Should these be part of valid commands? I don't know yet! -W. Werner, 2022-12-01
yield request.param
@pytest.fixture(
params=[
"bad_news",
"not_allowed",
"very_not_good",
]
)
def eauth_invalid_arguments(request):
yield request.param
@pytest.fixture(
params=[
"G@id:minion_a1 and minion_b*",
"E@minion_[^b]1 and minion_b2",
"P@id:minion_[^b]. and minion",
# TODO: Do soemthing better with these. Apparently lists work... weirdly -W. Werner, 2022-12-02
# "L@minion_* not L@minion_*2 and minion",
# "I@has_syndic:* not minion_b2 not minion_a2 and minion",
# "J@has_syndic:syndic_(a|b) not *2 and minion",
# TODO: I need to figure out a good way to get IPs -W. Werner, 2022-12-02
# "S@172.13.1.4 and S@172.13.1.5 and minion_*2",
# TODO: I don't have any range servers -W. Werner, 2022-12-02
# "((R@%minion_a1..2 and R@%minion_b1..2) not N@second_string) and minion",
]
)
def invalid_comprehensive_minion_targeting(request):
yield request.param
@pytest.fixture(
params=[
(
"G@id:minion or minion_a1 or E@minion_[^b]2 or L@minion_b1,minion_b2",
["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"],
),
(
"minion or E@minion_a[12] or N@b_string",
["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"],
),
(
"L@minion,minion_a1 or N@second_string or N@b_string",
["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"],
),
# TODO: I don't have pillar setup, nor do I know IPs, and also SECO range servers -W. Werner, 2022-12-02
# (
# "I@my_minion:minion and I@has_syndic:syndic_[^b] and S@172.13.1.4 and S@172.13.1.5",
# ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"],
# ),
# (
# "minion and R@%minion_a1..2 and N@b_string",
# ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"],
# ),
]
)
def comprehensive_minion_targeting(request):
# TODO: include SECO range? -W. Werner, 2022-12-01
yield request.param
@pytest.fixture(
params=[
("G@id:minion_a1 and minion_b1", ["minion_a1", "minion_b1"]),
("E@minion_[^b]1", ["minion_a1"]),
(
"P@id:minion_[^b].",
["minion_a1", "minion_a2"],
), # should this be this thing? or something different?
# TODO: Turns out that it doesn't exclude things? Not sure -W. Werner, 2022-12-02
(
"L@minion_a1,minion_a2,minion_b1 not minion_*2",
["minion_a1", "minion_a2", "minion_b1"],
),
# TODO: I don't have pillar data yet -W. Werner, 2022-12-02
# ("I@has_syndic:* not minion_b2 not minion_a2", ["minion_a1", "minion_b1"]),
# ("J@has_syndic:syndic_(a|b) not *2", ["minion_a1", "minion_b1"]),
# TODO: Need a different way to get IPs -W. Werner, 2022-12-02
# ( "S@172.13.1.4 and S@172.13.1.5", ["minion_a1", "minion_b1"]),
# TODO: Need a range server for these tests (see range targeting docs) -W. Werner, 2022-12-02
# ("(R@%minion_a1..2 and R@%minion_b1..2) not N@second_string", ["minion_a1", "minion_b1"]),
]
)
def valid_comprehensive_minion_targeting(request):
yield request.param
@pytest.fixture(
params=[
# TODO: not sure why this doesn't work. Pretty sure it's just the cli part -W. Werner, 2022-12-02
# ("G@id:minion_a1 and minion_b1", ["minion_a1", "minion_b1"]),
("E@minion_[^b]1", ["minion_a1"]),
(
"P@id:minion_[^a]1",
["minion_b1"],
),
("L@minion_a1,minion_b1 not minion_*2", ["minion_a1", "minion_b1"]),
# TODO: need to add pillars -W. Werner, 2022-12-02
# ("I@has_syndic:* not minion_b2 not minion_a2", ["minion_a1", "minion_b1"]),
# ("J@has_syndic:syndic_(a|b) not *2", ["minion_a1", "minion_b1"]),
# TODO: Need a different way to get IPs -W. Werner, 2022-12-02
# ( "S@172.13.1.4 and S@172.13.1.5", ["minion_a1", "minion_b1"]),
# TODO: Need a range server for these tests (see range targeting docs) -W. Werner, 2022-12-02
# ("(R@%minion_a1..2 and R@%minion_b1..2) not N@second_string", ["minion_a1", "minion_b1"]),
]
)
def valid_eauth_comprehensive_minion_targeting(request):
yield request.param
def test_root_user_should_be_able_to_call_any_and_all_minions_with_any_and_all_commands(
all_the_minions, all_the_commands, docker_master
):
target, expected_minions = all_the_minions
res = docker_master.run(
f"salt {target} {all_the_commands} -t 20 --out=json",
)
if "jid does not exist" in (res.stderr or ""):
# might be flaky, let's retry
res = docker_master.run(
f"salt {target} {all_the_commands} -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert sorted(results) == expected_minions, res.stdout
def test_eauth_user_should_be_able_to_target_valid_minions_with_valid_command(
eauth_valid_minions, eauth_valid_commands, eauth_valid_arguments, docker_master
):
target, expected_minions = eauth_valid_minions
res = docker_master.run(
f"salt -a pam --username bob --password '' {target} {eauth_valid_commands} {eauth_valid_arguments} -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert sorted(results) == expected_minions, res.stdout
def test_eauth_user_should_not_be_able_to_target_invalid_minions(
eauth_blocked_minions, docker_master, docker_minions
):
res = docker_master.run(
f"salt -a pam --username bob --password '' {eauth_blocked_minions} file.touch /tmp/bad_bad_file.txt -t 20 --out=json",
)
assert "Authorization error occurred." == res.data or res.data is None
for minion in docker_minions:
res = minion.run("test -f /tmp/bad_bad_file.txt")
file_exists = res.returncode == 0
assert not file_exists
@pytest.mark.skip(reason="Not sure about blocklist")
def test_eauth_user_should_not_be_able_to_target_valid_minions_with_invalid_commands(
eauth_valid_minions, eauth_invalid_commands, docker_master
):
tgt, _ = eauth_valid_minions
res = docker_master.run(
f"salt -a pam --username bob --password '' {tgt} {eauth_invalid_commands} -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert "Authorization error occurred" in res.stdout
assert sorted(results) == []
@pytest.mark.skip(reason="Not sure about blocklist")
def test_eauth_user_should_not_be_able_to_target_valid_minions_with_valid_commands_and_invalid_arguments(
eauth_valid_minions, eauth_valid_commands, eauth_invalid_arguments, docker_master
):
tgt, _ = eauth_valid_minions
res = docker_master.run(
f"salt -a pam --username bob --password '' -C '{tgt}' {eauth_valid_commands} {eauth_invalid_arguments} -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert "Authorization error occurred" in res.stdout
assert sorted(results) == []
def test_invalid_eauth_user_should_not_be_able_to_do_anything(
eauth_valid_minions, eauth_valid_commands, eauth_valid_arguments, docker_master
):
# TODO: Do we really need to run all of these tests for the invalid user? Maybe not! -W. Werner, 2022-12-01
tgt, _ = eauth_valid_minions
res = docker_master.run(
f"salt -a pam --username badguy --password '' -C '{tgt}' {eauth_valid_commands} {eauth_valid_arguments} -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert sorted(results) == []
def test_root_should_be_able_to_use_comprehensive_targeting(
comprehensive_minion_targeting, docker_master
):
tgt, expected_minions = comprehensive_minion_targeting
res = docker_master.run(
f"salt -C '{tgt}' test.version -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert sorted(results) == expected_minions
def test_eauth_user_should_be_able_to_target_valid_minions_with_valid_commands_comprehensively(
valid_eauth_comprehensive_minion_targeting, docker_master
):
tgt, expected_minions = valid_eauth_comprehensive_minion_targeting
res = docker_master.run(
f"salt -a pam --username bob --password '' -C '{tgt}' test.version -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert sorted(results) == expected_minions
def test_eauth_user_with_invalid_comprehensive_targeting_should_auth_failure(
invalid_comprehensive_minion_targeting, docker_master
):
res = docker_master.run(
f"salt -a pam --username fnord --password '' -C '{invalid_comprehensive_minion_targeting}' test.version -t 20 --out=json",
)
results = json_output_to_dict(res.stdout)
assert "Authorization error occurred" in res.stdout
assert sorted(results) == []

View file

@ -59,13 +59,17 @@ def test_connected_ids_remote_minions():
# These validate_tgt tests make the assumption that CkMinions.check_minions is
# correct. In other words, these tests are only worthwhile if check_minions is
# also correct.
def test_validate_tgt_should_return_false_when_no_valid_minions_have_been_found():
def test_validate_tgt_returns_true_when_no_valid_minions_have_been_found():
"""
CKMinions is only able to check against minions the master knows about. If
no minion keys have been accepted it will return True.
"""
ckminions = salt.utils.minions.CkMinions(opts={})
with patch(
"salt.utils.minions.CkMinions.check_minions", autospec=True, return_value={}
):
result = ckminions.validate_tgt("fnord", "fnord", "fnord", minions=[])
assert result is False
assert result is True
@pytest.mark.parametrize(

View file

@ -272,8 +272,8 @@ class MasterACLTestCase(ModuleCase):
sys_doc_load = self.valid_clear_load
sys_doc_load["fun"] = "sys.doc"
self.clear.publish(sys_doc_load)
self.assertNotEqual(
self.fire_event_mock.call_args[0][0]["fun"], "sys.doc"
self.assertIn(
"error", self.fire_event_mock.call_args[0][0]
) # If sys.doc were to fire, this would match
def test_master_publish_group(self):
@ -301,6 +301,7 @@ class MasterACLTestCase(ModuleCase):
# Did we fire it?
self.assertNotEqual(self.fire_event_mock.call_args[0][0]["fun"], "sys.doc")
@pytest.mark.skip_on_windows(reason="PAM eauth not available on Windows")
def test_master_publish_some_minions(self):
"""
Tests to ensure we can only target minions for which we
@ -333,6 +334,7 @@ class MasterACLTestCase(ModuleCase):
self.valid_clear_load["user"] = "NOT_A_VALID_USERNAME"
self.valid_clear_load["fun"] = "test.ping"
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
@pytest.mark.skip_on_windows(reason="PAM eauth not available on Windows")
@ -501,18 +503,22 @@ class MasterACLTestCase(ModuleCase):
}
)
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Wrong first arg
self.valid_clear_load["arg"] = ["TES", "any", "TEST1234"]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Missing the last arg
self.valid_clear_load["arg"] = ["TEST", "any"]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# No args
self.valid_clear_load["arg"] = []
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
@pytest.mark.skip_on_windows(reason="PAM eauth not available on Windows")
@ -579,32 +585,39 @@ class MasterACLTestCase(ModuleCase):
}
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Missing kwarg value
self.valid_clear_load["arg"] = [
{"anything": "hello all", "none": "hello none", "__kwarg__": True}
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
self.valid_clear_load["arg"] = [{"__kwarg__": True}]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
self.valid_clear_load["arg"] = [{}]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
self.valid_clear_load["arg"] = []
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Missing kwarg allowing any value
self.valid_clear_load["arg"] = [
{"text": "KWMSG: a message", "none": "hello none", "__kwarg__": True}
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
self.valid_clear_load["arg"] = [
{"text": "KWMSG: a message", "anything": "hello all", "__kwarg__": True}
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
@pytest.mark.skip_on_windows(reason="PAM eauth not available on Windows")
@ -686,6 +699,7 @@ class MasterACLTestCase(ModuleCase):
},
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Wrong kwarg value
self.valid_clear_load["arg"] = [
@ -700,6 +714,7 @@ class MasterACLTestCase(ModuleCase):
},
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Missing arg
self.valid_clear_load["arg"] = [
@ -712,6 +727,7 @@ class MasterACLTestCase(ModuleCase):
},
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])
# Missing kwarg
self.valid_clear_load["arg"] = [
@ -721,6 +737,7 @@ class MasterACLTestCase(ModuleCase):
{"kwa": "kwarg #1", "one_more": "just one more", "__kwarg__": True},
]
self.clear.publish(self.valid_clear_load)
self.assertIn("error", self.fire_event_mock.mock_calls.pop(-1).args[0])
self.assertEqual(self.fire_event_mock.mock_calls, [])