Merge branch 'master' into fix-pillar-obfuscate-module

This commit is contained in:
Gareth J. Greenaway 2022-10-21 12:23:29 -07:00 committed by GitHub
commit f2c0e14616
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 45 additions and 150 deletions

View file

@ -1149,6 +1149,7 @@ repos:
salt/ext/.*|
tests/.*
)$
additional_dependencies: ['importlib_metadata<5']
- repo: https://github.com/PyCQA/bandit
rev: "1.7.4"
hooks:
@ -1158,6 +1159,7 @@ repos:
args: [--silent, -lll, --skip, B701]
files: ^tests/.*
exclude: ^tests/minionswarm\.py
additional_dependencies: ['importlib_metadata<5']
# <---- Security ---------------------------------------------------------------------------------------------------
# ----- Pre-Commit ------------------------------------------------------------------------------------------------>

View file

@ -0,0 +1 @@
Removing all references to napalm-base which is no longer supported.

View file

@ -1 +0,0 @@
Pass the context to pillar ext modules to make it possible to reuse the values from it.

View file

@ -168,7 +168,7 @@ module they are using.
Requisites Types
----------------
All requisite types have a corresponding :ref:`<requisite>_in <requisites-in>` form:
All requisite types have a corresponding :ref:`_in <requisites-in>` form:
* :ref:`require <requisites-require>`: Requires that a list of target states succeed before execution
* :ref:`onchanges <requisites-onchanges>`: Execute if any target states succeed with changes
@ -185,8 +185,10 @@ Several requisite types have a corresponding :ref:`requisite_any <requisites-any
* ``onchanges_any``
* ``onfail_any``
Lastly, onfail has one special ``onfail_all`` form to account for when `AND`
logic is desired instead of the default `OR` logic of onfail/onfail_any (which
There is no combined form of :ref:`_any <requisites-any>` and :ref:`_in <requisites-in>` requisites, such as ``require_any_in``!
Lastly, onfail has one special ``onfail_all`` form to account for when ``AND``
logic is desired instead of the default ``OR`` logic of onfail/onfail_any (which
are equivalent).
All requisites define specific relationships and always work with the dependency
@ -797,8 +799,8 @@ from ``all()`` to ``any()``.
cmd.run:
- name: /bin/false
In this example `A` will run because at least one of the requirements specified,
`B` or `C`, will succeed.
In this example ``A`` will run because at least one of the requirements specified,
``B`` or ``C``, will succeed.
.. code-block:: yaml

View file

@ -950,7 +950,6 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = time.time()
self.context = {}
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
@ -1138,7 +1137,7 @@ class MWorker(salt.utils.process.SignalHandlingProcess):
self.key,
)
self.clear_funcs.connect()
self.aes_funcs = AESFuncs(self.opts, context=self.context)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
@ -1201,7 +1200,7 @@ class AESFuncs(TransportMethods):
"_file_envs",
)
def __init__(self, opts, context=None):
def __init__(self, opts):
"""
Create a new AESFuncs
@ -1211,7 +1210,6 @@ class AESFuncs(TransportMethods):
:returns: Instance for handling AES operations
"""
self.opts = opts
self.context = context
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
@ -1599,7 +1597,6 @@ class AESFuncs(TransportMethods):
pillarenv=load.get("pillarenv"),
extra_minion_data=load.get("extra_minion_data"),
clean_cache=load.get("clean_cache"),
context=self.context,
)
data = pillar.compile_pillar()
self.fs_.update_opts()

View file

@ -186,22 +186,6 @@ def send(
salt-call event.send myco/mytag foo=Foo bar=Bar
salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}'
A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The
following rule in sudoers will allow the ``jenkins`` user to run only the
following command.
``/etc/sudoers`` (allow preserving the environment):
.. code-block:: text
jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send*
Call Jenkins via sudo (preserve the environment):
.. code-block:: bash
sudo -E salt-call event.send myco/jenkins/build/success with_env=[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]
"""
data_dict = {}

View file

@ -7156,6 +7156,8 @@ def grep(path, pattern, *opts):
This function's return value is slated for refinement in future
versions of Salt
Windows does not support the ``grep`` functionality.
path
Path to the file to be searched

View file

@ -46,7 +46,6 @@ def get_pillar(
pillarenv=None,
extra_minion_data=None,
clean_cache=False,
context=None,
):
"""
Return the correct pillar driver based on the file_client option
@ -82,7 +81,6 @@ def get_pillar(
pillar_override=pillar_override,
pillarenv=pillarenv,
clean_cache=clean_cache,
context=context,
)
return ptype(
opts,
@ -94,7 +92,6 @@ def get_pillar(
pillar_override=pillar_override,
pillarenv=pillarenv,
extra_minion_data=extra_minion_data,
context=context,
)
@ -312,7 +309,6 @@ class RemotePillar(RemotePillarMixin):
pillar_override=None,
pillarenv=None,
extra_minion_data=None,
context=None,
):
self.opts = opts
self.opts["saltenv"] = saltenv
@ -337,7 +333,6 @@ class RemotePillar(RemotePillarMixin):
merge_lists=True,
)
self._closing = False
self.context = context
def compile_pillar(self):
"""
@ -411,7 +406,6 @@ class PillarCache:
pillarenv=None,
extra_minion_data=None,
clean_cache=False,
context=None,
):
# Yes, we need all of these because we need to route to the Pillar object
# if we have no cache. This is another refactor target.
@ -438,8 +432,6 @@ class PillarCache:
minion_cache_path=self._minion_cache_path(minion_id),
)
self.context = context
def _minion_cache_path(self, minion_id):
"""
Return the path to the cache file for the minion.
@ -463,7 +455,6 @@ class PillarCache:
functions=self.functions,
pillar_override=self.pillar_override,
pillarenv=self.pillarenv,
context=self.context,
)
return fresh_pillar.compile_pillar()
@ -539,7 +530,6 @@ class Pillar:
pillar_override=None,
pillarenv=None,
extra_minion_data=None,
context=None,
):
self.minion_id = minion_id
self.ext = ext
@ -578,9 +568,7 @@ class Pillar:
if opts.get("pillar_source_merging_strategy"):
self.merge_strategy = opts["pillar_source_merging_strategy"]
self.ext_pillars = salt.loader.pillars(
ext_pillar_opts, self.functions, context=context
)
self.ext_pillars = salt.loader.pillars(ext_pillar_opts, self.functions)
self.ignored_pillars = {}
self.pillar_override = pillar_override or {}
if not isinstance(self.pillar_override, dict):

View file

@ -30,7 +30,6 @@ try:
# https://github.com/napalm-automation/napalm
# pylint: disable=unused-import,no-name-in-module
import napalm
import napalm.base as napalm_base
# pylint: enable=unused-import,no-name-in-module
HAS_NAPALM = True
@ -315,16 +314,8 @@ def get_device(opts, salt_obj=None):
"""
log.debug("Setting up NAPALM connection")
network_device = get_device_opts(opts, salt_obj=salt_obj)
provider_lib = napalm_base
provider_lib = napalm.base
if network_device.get("PROVIDER"):
# In case the user requires a different provider library,
# other than napalm-base.
# For example, if napalm-base does not satisfy the requirements
# and needs to be enahanced with more specific features,
# we may need to define a custom library on top of napalm-base
# with the constraint that it still needs to provide the
# `get_network_driver` function. However, even this can be
# extended later, if really needed.
# Configuration example:
# provider: napalm_base_example
try:
@ -333,7 +324,6 @@ def get_device(opts, salt_obj=None):
log.error(
"Unable to import %s", network_device.get("PROVIDER"), exc_info=True
)
log.error("Falling back to napalm-base")
_driver_ = provider_lib.get_network_driver(network_device.get("DRIVER_NAME"))
try:
network_device["DRIVER"] = _driver_(
@ -346,7 +336,7 @@ def get_device(opts, salt_obj=None):
network_device.get("DRIVER").open()
# no exception raised here, means connection established
network_device["UP"] = True
except napalm_base.exceptions.ConnectionException as error:
except napalm.base.exceptions.ConnectionException as error:
base_err_msg = "Cannot connect to {hostname}{port} as {username}.".format(
hostname=network_device.get("HOSTNAME", "[unspecified hostname]"),
port=(
@ -360,7 +350,7 @@ def get_device(opts, salt_obj=None):
)
log.error(base_err_msg)
log.error("Please check error: %s", error)
raise napalm_base.exceptions.ConnectionException(base_err_msg)
raise napalm.base.exceptions.ConnectionException(base_err_msg)
return network_device
@ -420,7 +410,7 @@ def proxy_napalm_wrap(func):
# in order to make sure we are editing the same session.
try:
wrapped_global_namespace["napalm_device"] = get_device(opts)
except napalm_base.exceptions.ConnectionException as nce:
except napalm.base.exceptions.ConnectionException as nce:
log.error(nce)
return "{base_msg}. See log for details.".format(
base_msg=str(nce.msg)
@ -490,7 +480,7 @@ def proxy_napalm_wrap(func):
wrapped_global_namespace["napalm_device"] = get_device(
opts, salt_obj=_salt_obj
)
except napalm_base.exceptions.ConnectionException as nce:
except napalm.base.exceptions.ConnectionException as nce:
log.error(nce)
return "{base_msg}. See log for details.".format(
base_msg=str(nce.msg)

View file

@ -124,9 +124,11 @@ class MacUserModuleTest(ModuleCase):
self.assertEqual(fullname_info["fullname"], "Foo Bar")
# Test mac_user.chgroups
pre_info = self.run_function("user.info", [CHANGE_USER])["groups"]
expected = pre_info + ["wheel"]
self.run_function("user.chgroups", [CHANGE_USER, "wheel"])
groups_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(groups_info["groups"], ["wheel"])
self.assertEqual(groups_info["groups"], expected)
except AssertionError:
self.run_function("user.delete", [CHANGE_USER])

View file

@ -1,19 +1,38 @@
import random
import pytest
from pytestshellutils.exceptions import FactoryTimeout
from salt.utils.platform import spawning_platform
pytestmark = [pytest.mark.slow_test]
def run_salt_cmd(salt_cli, *args, **kwargs):
timeout = salt_cli.timeout
if spawning_platform():
timeout = salt_cli.timeout * 2
kwargs["_timeout"] = timeout
try:
return salt_cli.run(*args, **kwargs)
except FactoryTimeout:
if spawning_platform():
pytest.skip("Salt command timed out, skipping on spawning platform")
def test_ping(minion_swarm, salt_cli):
ret = salt_cli.run("test.ping", minion_tgt="*")
ret = run_salt_cmd(salt_cli, "test.ping", minion_tgt="*")
assert ret.data
for minion in minion_swarm:
assert minion.id in ret.data
minion_ret = ret.data[minion.id]
# Sometimes the command times out but doesn't fail, so we catch it
if isinstance(minion_ret, str) and "Minion did not return" in minion_ret:
continue
assert ret.data[minion.id] is True
def test_ping_one(minion_swarm, salt_cli):
minion = random.choice(minion_swarm)
ret = salt_cli.run("test.ping", minion_tgt=minion.id)
ret = run_salt_cmd(salt_cli, "test.ping", minion_tgt=minion.id)
assert ret.data is True

View file

@ -1,7 +1,7 @@
import time
import salt.master
from tests.support.mock import MagicMock, patch
from tests.support.mock import patch
def test_fileserver_duration():
@ -14,90 +14,3 @@ def test_fileserver_duration():
update.called_once()
# Timeout is 1 second
assert 2 > end - start > 1
def test_mworker_pass_context():
"""
Test of passing the __context__ to pillar ext module loader
"""
req_channel_mock = MagicMock()
local_client_mock = MagicMock()
opts = {
"req_server_niceness": None,
"mworker_niceness": None,
"sock_dir": "/tmp",
"conf_file": "/tmp/fake_conf",
"transport": "zeromq",
"fileserver_backend": ["roots"],
"file_client": "local",
"pillar_cache": False,
"state_top": "top.sls",
"pillar_roots": {},
}
data = {
"id": "MINION_ID",
"grains": {},
"saltenv": None,
"pillarenv": None,
"pillar_override": {},
"extra_minion_data": {},
"ver": "2",
"cmd": "_pillar",
}
test_context = {"testing": 123}
def mworker_bind_mock():
mworker.aes_funcs.run_func(data["cmd"], data)
with patch("salt.client.get_local_client", local_client_mock), patch(
"salt.master.ClearFuncs", MagicMock()
), patch("salt.minion.MasterMinion", MagicMock()), patch(
"salt.utils.verify.valid_id", return_value=True
), patch(
"salt.loader.matchers", MagicMock()
), patch(
"salt.loader.render", MagicMock()
), patch(
"salt.loader.utils", MagicMock()
), patch(
"salt.loader.fileserver", MagicMock()
), patch(
"salt.loader.minion_mods", MagicMock()
), patch(
"salt.loader.LazyLoader", MagicMock()
) as loadler_pillars_mock:
mworker = salt.master.MWorker(opts, {}, {}, [req_channel_mock])
with patch.object(mworker, "_MWorker__bind", mworker_bind_mock), patch.dict(
mworker.context, test_context
):
mworker.run()
assert (
loadler_pillars_mock.call_args_list[0][1].get("pack").get("__context__")
== test_context
)
loadler_pillars_mock.reset_mock()
opts.update(
{
"pillar_cache": True,
"pillar_cache_backend": "file",
"pillar_cache_ttl": 1000,
"cachedir": "/tmp",
}
)
mworker = salt.master.MWorker(opts, {}, {}, [req_channel_mock])
with patch.object(mworker, "_MWorker__bind", mworker_bind_mock), patch.dict(
mworker.context, test_context
), patch("salt.utils.cache.CacheFactory.factory", MagicMock()):
mworker.run()
assert (
loadler_pillars_mock.call_args_list[0][1].get("pack").get("__context__")
== test_context
)

View file

@ -200,7 +200,6 @@ class BuildoutTestCase(Base):
def test_get_bootstrap_url(self):
for path in [
os.path.join(self.tdir, "var/ver/1/dumppicked"),
os.path.join(self.tdir, "var/ver/1/bootstrap"),
os.path.join(self.tdir, "var/ver/1/versions"),
]:
self.assertEqual(
@ -211,7 +210,6 @@ class BuildoutTestCase(Base):
for path in [
os.path.join(self.tdir, "/non/existing"),
os.path.join(self.tdir, "var/ver/2/versions"),
os.path.join(self.tdir, "var/ver/2/bootstrap"),
os.path.join(self.tdir, "var/ver/2/default"),
]:
self.assertEqual(
@ -224,7 +222,6 @@ class BuildoutTestCase(Base):
def test_get_buildout_ver(self):
for path in [
os.path.join(self.tdir, "var/ver/1/dumppicked"),
os.path.join(self.tdir, "var/ver/1/bootstrap"),
os.path.join(self.tdir, "var/ver/1/versions"),
]:
self.assertEqual(
@ -233,7 +230,6 @@ class BuildoutTestCase(Base):
for path in [
os.path.join(self.tdir, "/non/existing"),
os.path.join(self.tdir, "var/ver/2/versions"),
os.path.join(self.tdir, "var/ver/2/bootstrap"),
os.path.join(self.tdir, "var/ver/2/default"),
]:
self.assertEqual(