mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2017.7' into module_run_compare
This commit is contained in:
commit
aefc773c2f
82 changed files with 6816 additions and 866 deletions
|
@ -10795,6 +10795,7 @@ cmd_whitelist_glob:
|
|||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS Thread Settings
|
||||
.SS \fBmultiprocessing\fP
|
||||
.sp
|
||||
Default: \fBTrue\fP
|
||||
.sp
|
||||
|
|
|
@ -2337,11 +2337,14 @@ Thread Settings
|
|||
|
||||
.. conf_minion:: multiprocessing
|
||||
|
||||
``multiprocessing``
|
||||
-------
|
||||
|
||||
Default: ``True``
|
||||
|
||||
If `multiprocessing` is enabled when a minion receives a
|
||||
If ``multiprocessing`` is enabled when a minion receives a
|
||||
publication a new process is spawned and the command is executed therein.
|
||||
Conversely, if `multiprocessing` is disabled the new publication will be run
|
||||
Conversely, if ``multiprocessing`` is disabled the new publication will be run
|
||||
executed in a thread.
|
||||
|
||||
|
||||
|
|
|
@ -25,6 +25,9 @@ configuration:
|
|||
- web*:
|
||||
- test.*
|
||||
- pkg.*
|
||||
# Allow managers to use saltutil module functions
|
||||
manager_.*:
|
||||
- saltutil.*
|
||||
|
||||
Permission Issues
|
||||
-----------------
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.auth module
|
||||
========================
|
||||
salt.runners.auth
|
||||
=================
|
||||
|
||||
.. automodule:: salt.runners.auth
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.digicertapi module
|
||||
===============================
|
||||
salt.runners.digicertapi
|
||||
========================
|
||||
|
||||
.. automodule:: salt.runners.digicertapi
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.event module
|
||||
=========================
|
||||
salt.runners.event
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.event
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.mattermost module
|
||||
==============================
|
||||
salt.runners.mattermost
|
||||
=======================
|
||||
|
||||
**Note for 2017.7 releases!**
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.smartos_vmadm module
|
||||
=================================
|
||||
salt.runners.smartos_vmadm
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.runners.smartos_vmadm
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.vault module
|
||||
=========================
|
||||
salt.runners.vault
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.vault
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.venafiapi module
|
||||
=============================
|
||||
salt.runners.venafiapi
|
||||
======================
|
||||
|
||||
.. automodule:: salt.runners.venafiapi
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.vistara module
|
||||
===========================
|
||||
salt.runners.vistara
|
||||
====================
|
||||
|
||||
.. automodule:: salt.runners.vistara
|
||||
:members:
|
||||
|
|
|
@ -31,7 +31,7 @@ documentation for more information.
|
|||
.. _github-pull-request:
|
||||
|
||||
Sending a GitHub pull request
|
||||
=============================
|
||||
-----------------------------
|
||||
|
||||
Sending pull requests on GitHub is the preferred method for receiving
|
||||
contributions. The workflow advice below mirrors `GitHub's own guide <GitHub
|
||||
|
@ -66,7 +66,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
|||
.. code-block:: bash
|
||||
|
||||
git fetch upstream
|
||||
git checkout -b fix-broken-thing upstream/2016.3
|
||||
git checkout -b fix-broken-thing upstream/2016.11
|
||||
|
||||
If you're working on a feature, create your branch from the develop branch.
|
||||
|
||||
|
@ -130,7 +130,7 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
|||
.. code-block:: bash
|
||||
|
||||
git fetch upstream
|
||||
git rebase upstream/2016.3 fix-broken-thing
|
||||
git rebase upstream/2016.11 fix-broken-thing
|
||||
git push -u origin fix-broken-thing
|
||||
|
||||
or
|
||||
|
@ -170,9 +170,9 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
|||
https://github.com/my-account/salt/pull/new/fix-broken-thing
|
||||
|
||||
#. If your branch is a fix for a release branch, choose that as the base
|
||||
branch (e.g. ``2016.3``),
|
||||
branch (e.g. ``2016.11``),
|
||||
|
||||
https://github.com/my-account/salt/compare/saltstack:2016.3...fix-broken-thing
|
||||
https://github.com/my-account/salt/compare/saltstack:2016.11...fix-broken-thing
|
||||
|
||||
If your branch is a feature, choose ``develop`` as the base branch,
|
||||
|
||||
|
@ -205,80 +205,206 @@ Fork a Repo Guide_>`_ and is well worth reading.
|
|||
|
||||
.. _which-salt-branch:
|
||||
|
||||
Which Salt branch?
|
||||
==================
|
||||
|
||||
GitHub will open pull requests against Salt's main branch, ``develop``, by
|
||||
default. Ideally, features should go into ``develop`` and bug fixes and
|
||||
documentation changes should go into the oldest supported release branch
|
||||
affected by the bug or documentation update. See
|
||||
:ref:`Sending a GitHub pull request <github-pull-request>`.
|
||||
|
||||
If you have a bug fix or doc change and have already forked your working
|
||||
branch from ``develop`` and do not know how to rebase your commits against
|
||||
another branch, then submit it to ``develop`` anyway and we'll be sure to
|
||||
back-port it to the correct place.
|
||||
|
||||
The current release branch
|
||||
--------------------------
|
||||
|
||||
The current release branch is the most recent stable release. Pull requests
|
||||
containing bug fixes should be made against the release branch.
|
||||
|
||||
The branch name will be a date-based name such as ``2016.3``.
|
||||
|
||||
Bug fixes are made on this branch so that minor releases can be cut from this
|
||||
branch without introducing surprises and new features. This approach maximizes
|
||||
stability.
|
||||
|
||||
The Salt development team will "merge-forward" any fixes made on the release
|
||||
branch to the ``develop`` branch once the pull request has been accepted. This
|
||||
keeps the fix in isolation on the release branch and also keeps the ``develop``
|
||||
branch up-to-date.
|
||||
|
||||
.. note:: Closing GitHub issues from commits
|
||||
|
||||
This "merge-forward" strategy requires that `the magic keywords to close a
|
||||
GitHub issue <Closing issues via commit message_>`_ appear in the commit
|
||||
message text directly. Only including the text in a pull request will not
|
||||
close the issue.
|
||||
|
||||
GitHub will close the referenced issue once the *commit* containing the
|
||||
magic text is merged into the default branch (``develop``). Any magic text
|
||||
input only into the pull request description will not be seen at the
|
||||
Git-level when those commits are merged-forward. In other words, only the
|
||||
commits are merged-forward and not the pull request.
|
||||
|
||||
The ``develop`` branch
|
||||
Salt's Branch Topology
|
||||
----------------------
|
||||
|
||||
There are three different kinds of branches in use: develop, main release
|
||||
branches, and dot release branches.
|
||||
|
||||
- All feature work should go into the ``develop`` branch.
|
||||
- Bug fixes and documentation changes should go into the oldest supported
|
||||
**main** release branch affected by the the bug or documentation change.
|
||||
Main release branches are named after a year and month, such as
|
||||
``2016.11`` and ``2017.7``.
|
||||
- Hot fixes, as determined by SaltStack's release team, should be submitted
|
||||
against **dot** release branches. Dot release branches are named after a
|
||||
year, month, and version. Examples include ``2016.11.8`` and ``2017.7.2``.
|
||||
|
||||
.. note::
|
||||
|
||||
GitHub will open pull requests against Salt's main branch, ``develop``,
|
||||
byndefault. Be sure to check which branch is selected when creating the
|
||||
pull request.
|
||||
|
||||
The Develop Branch
|
||||
==================
|
||||
|
||||
The ``develop`` branch is unstable and bleeding-edge. Pull requests containing
|
||||
feature additions or non-bug-fix changes should be made against the ``develop``
|
||||
branch.
|
||||
|
||||
The Salt development team will back-port bug fixes made to ``develop`` to the
|
||||
current release branch if the contributor cannot create the pull request
|
||||
against that branch.
|
||||
.. note::
|
||||
|
||||
Release Branches
|
||||
----------------
|
||||
If you have a bug fix or documentation change and have already forked your
|
||||
working branch from ``develop`` and do not know how to rebase your commits
|
||||
against another branch, then submit it to ``develop`` anyway. SaltStack's
|
||||
development team will be happy to back-port it to the correct branch.
|
||||
|
||||
For each release, a branch will be created when the SaltStack release team is
|
||||
ready to tag. The release branch is created from the parent branch and will be
|
||||
the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release
|
||||
branch was created from the ``2017.7`` parent branch and the ``v2017.7.1``
|
||||
release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching
|
||||
strategy will allow for more stability when there is a need for a re-tag during
|
||||
the testing phase of the release process.
|
||||
**Please make sure you let the maintainers know that the pull request needs
|
||||
to be back-ported.**
|
||||
|
||||
Once the release branch is created, the fixes required for a given release, as
|
||||
determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the parent branch as well.
|
||||
Main Release Branches
|
||||
=====================
|
||||
|
||||
The current release branch is the most recent stable release. Pull requests
|
||||
containing bug fixes or documentation changes should be made against the main
|
||||
release branch that is affected.
|
||||
|
||||
The branch name will be a date-based name such as ``2016.11``.
|
||||
|
||||
Bug fixes are made on this branch so that dot release branches can be cut from
|
||||
the main release branch without introducing surprises and new features. This
|
||||
approach maximizes stability.
|
||||
|
||||
Dot Release Branches
|
||||
====================
|
||||
|
||||
Prior to tagging an official release, a branch will be created when the SaltStack
|
||||
release team is ready to tag. The dot release branch is created from a main release
|
||||
branch. The dot release branch will be the same name as the tag minus the ``v``.
|
||||
For example, the ``2017.7.1`` dot release branch was created from the ``2017.7``
|
||||
main release branch. The ``v2017.7.1`` release was tagged at the ``HEAD`` of the
|
||||
``2017.7.1`` branch.
|
||||
|
||||
This branching strategy will allow for more stability when there is a need for
|
||||
a re-tag during the testing phase of the release process and further increases
|
||||
stability.
|
||||
|
||||
Once the dot release branch is created, the fixes required for a given release,
|
||||
as determined by the SaltStack release team, will be added to this branch. All
|
||||
commits in this branch will be merged forward into the main release branch as
|
||||
well.
|
||||
|
||||
Merge Forward Process
|
||||
=====================
|
||||
|
||||
The Salt repository follows a "Merge Forward" policy. The merge-forward
|
||||
behavior means that changes submitted to older main release branches will
|
||||
automatically be "merged-forward" into the newer branches.
|
||||
|
||||
For example, a pull request is merged into ``2016.11``. Then, the entire
|
||||
``2016.11`` branch is merged-forward into the ``2017.7`` branch, and the
|
||||
``2017.7`` branch is merged-forward into the ``develop`` branch.
|
||||
|
||||
This process makes is easy for contributors to make only one pull-request
|
||||
against an older branch, but allows the change to propagate to all **main**
|
||||
release branches.
|
||||
|
||||
The merge-forward work-flow applies to all main release branches and the
|
||||
operation runs continuously.
|
||||
|
||||
Merge-Forwards for Dot Release Branches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The merge-forward policy applies to dot release branches as well, but has a
|
||||
slightly different behavior. If a change is submitted to a **dot** release
|
||||
branch, the dot release branch will be merged into its parent **main**
|
||||
release branch.
|
||||
|
||||
For example, a pull request is merged into the ``2017.7.2`` release branch.
|
||||
Then, the entire ``2017.7.2`` branch is merged-forward into the ``2017.7``
|
||||
branch. From there, the merge forward process continues as normal.
|
||||
|
||||
The only way in which dot release branches differ from main release branches
|
||||
in regard to merge-forwards, is that once a dot release branch is created
|
||||
from the main release branch, the dot release branch does not receive merge
|
||||
forwards.
|
||||
|
||||
.. note::
|
||||
|
||||
The merge forward process for dot release branches is one-way:
|
||||
dot release branch --> main release branch.
|
||||
|
||||
Closing GitHub issues from commits
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This "merge-forward" strategy requires that `the magic keywords to close a
|
||||
GitHub issue <Closing issues via commit message_>`_ appear in the commit
|
||||
message text directly. Only including the text in a pull request will not
|
||||
close the issue.
|
||||
|
||||
GitHub will close the referenced issue once the *commit* containing the
|
||||
magic text is merged into the default branch (``develop``). Any magic text
|
||||
input only into the pull request description will not be seen at the
|
||||
Git-level when those commits are merged-forward. In other words, only the
|
||||
commits are merged-forward and not the pull request text.
|
||||
|
||||
.. _backporting-pull-requests:
|
||||
|
||||
Backporting Pull Requests
|
||||
=========================
|
||||
|
||||
If a bug is fixed on ``develop`` and the bug is also present on a
|
||||
currently-supported release branch, it will need to be back-ported to an
|
||||
applicable branch.
|
||||
|
||||
.. note:: Most Salt contributors can skip these instructions
|
||||
|
||||
These instructions do not need to be read in order to contribute to the
|
||||
Salt project! The SaltStack team will back-port fixes on behalf of
|
||||
contributors in order to keep the contribution process easy.
|
||||
|
||||
These instructions are intended for frequent Salt contributors, advanced
|
||||
Git users, SaltStack employees, or independent souls who wish to back-port
|
||||
changes themselves.
|
||||
|
||||
It is often easiest to fix a bug on the oldest supported release branch and
|
||||
then merge that branch forward into ``develop`` (as described earlier in this
|
||||
document). When that is not possible the fix must be back-ported, or copied,
|
||||
into any other affected branches.
|
||||
|
||||
These steps assume a pull request ``#1234`` has been merged into ``develop``.
|
||||
And ``upstream`` is the name of the remote pointing to the main Salt repo.
|
||||
|
||||
#. Identify the oldest supported release branch that is affected by the bug.
|
||||
|
||||
#. Create a new branch for the back-port by reusing the same branch from the
|
||||
original pull request.
|
||||
|
||||
Name the branch ``bp-<NNNN>`` and use the number of the original pull
|
||||
request.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream refs/pull/1234/head:bp-1234
|
||||
git checkout bp-1234
|
||||
|
||||
#. Find the parent commit of the original pull request.
|
||||
|
||||
The parent commit of the original pull request must be known in order to
|
||||
rebase onto a release branch. The easiest way to find this is on GitHub.
|
||||
|
||||
Open the original pull request on GitHub and find the first commit in the
|
||||
list of commits. Select and copy the SHA for that commit. The parent of
|
||||
that commit can be specified by appending ``~1`` to the end.
|
||||
|
||||
#. Rebase the new branch on top of the release branch.
|
||||
|
||||
* ``<release-branch>`` is the branch identified in step #1.
|
||||
|
||||
* ``<orig-base>`` is the SHA identified in step #3 -- don't forget to add
|
||||
``~1`` to the end!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git rebase --onto <release-branch> <orig-base> bp-1234
|
||||
|
||||
Note, release branches prior to ``2016.11`` will not be able to make use of
|
||||
rebase and must use cherry-picking instead.
|
||||
|
||||
#. Push the back-port branch to GitHub and open a new pull request.
|
||||
|
||||
Opening a pull request for the back-port allows for the test suite and
|
||||
normal code-review process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git push -u origin bp-1234
|
||||
|
||||
Keeping Salt Forks in Sync
|
||||
==========================
|
||||
--------------------------
|
||||
|
||||
Salt is advancing quickly. It is therefore critical to pull upstream changes
|
||||
Salt advances quickly. It is therefore critical to pull upstream changes
|
||||
from upstream into your fork on a regular basis. Nothing is worse than putting
|
||||
hard work into a pull request only to see bunches of merge conflicts because it
|
||||
has diverged too far from upstream.
|
||||
|
@ -340,92 +466,53 @@ the name of the main `saltstack/salt`_ repository.
|
|||
the current release branch.
|
||||
|
||||
Posting patches to the mailing list
|
||||
===================================
|
||||
-----------------------------------
|
||||
|
||||
Patches will also be accepted by email. Format patches using `git
|
||||
format-patch`_ and send them to the `salt-users`_ mailing list. The contributor
|
||||
will then get credit for the patch, and the Salt community will have an archive
|
||||
of the patch and a place for discussion.
|
||||
|
||||
.. _backporting-pull-requests:
|
||||
|
||||
Backporting Pull Requests
|
||||
=========================
|
||||
|
||||
If a bug is fixed on ``develop`` and the bug is also present on a
|
||||
currently-supported release branch it will need to be back-ported to all
|
||||
applicable branches.
|
||||
|
||||
.. note:: Most Salt contributors can skip these instructions
|
||||
|
||||
These instructions do not need to be read in order to contribute to the
|
||||
Salt project! The SaltStack team will back-port fixes on behalf of
|
||||
contributors in order to keep the contribution process easy.
|
||||
|
||||
These instructions are intended for frequent Salt contributors, advanced
|
||||
Git users, SaltStack employees, or independent souls who wish to back-port
|
||||
changes themselves.
|
||||
|
||||
It is often easiest to fix a bug on the oldest supported release branch and
|
||||
then merge that branch forward into ``develop`` (as described earlier in this
|
||||
document). When that is not possible the fix must be back-ported, or copied,
|
||||
into any other affected branches.
|
||||
|
||||
These steps assume a pull request ``#1234`` has been merged into ``develop``.
|
||||
And ``upstream`` is the name of the remote pointing to the main Salt repo.
|
||||
|
||||
1. Identify the oldest supported release branch that is affected by the bug.
|
||||
|
||||
2. Create a new branch for the back-port by reusing the same branch from the
|
||||
original pull request.
|
||||
|
||||
Name the branch ``bp-<NNNN>`` and use the number of the original pull
|
||||
request.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git fetch upstream refs/pull/1234/head:bp-1234
|
||||
git checkout bp-1234
|
||||
|
||||
3. Find the parent commit of the original pull request.
|
||||
|
||||
The parent commit of the original pull request must be known in order to
|
||||
rebase onto a release branch. The easiest way to find this is on GitHub.
|
||||
|
||||
Open the original pull request on GitHub and find the first commit in the
|
||||
list of commits. Select and copy the SHA for that commit. The parent of
|
||||
that commit can be specified by appending ``~1`` to the end.
|
||||
|
||||
4. Rebase the new branch on top of the release branch.
|
||||
|
||||
* ``<release-branch>`` is the branch identified in step #1.
|
||||
|
||||
* ``<orig-base>`` is the SHA identified in step #3 -- don't forget to add
|
||||
``~1`` to the end!
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git rebase --onto <release-branch> <orig-base> bp-1234
|
||||
|
||||
Note, release branches prior to ``2016.3`` will not be able to make use of
|
||||
rebase and must use cherry-picking instead.
|
||||
|
||||
5. Push the back-port branch to GitHub and open a new pull request.
|
||||
|
||||
Opening a pull request for the back-port allows for the test suite and
|
||||
normal code-review process.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git push -u origin bp-1234
|
||||
|
||||
Issue and Pull Request Labeling System
|
||||
======================================
|
||||
--------------------------------------
|
||||
|
||||
SaltStack uses several labeling schemes to help facilitate code contributions
|
||||
and bug resolution. See the :ref:`Labels and Milestones
|
||||
<labels-and-milestones>` documentation for more information.
|
||||
|
||||
Mentionbot
|
||||
----------
|
||||
|
||||
SaltStack runs a mention-bot which notifies contributors who might be able
|
||||
to help review incoming pull-requests based on their past contribution to
|
||||
files which are being changed.
|
||||
|
||||
If you do not wish to receive these notifications, please add your GitHub
|
||||
handle to the blacklist line in the ``.mention-bot`` file located in the
|
||||
root of the Salt repository.
|
||||
|
||||
.. _probot-gpg-verification:
|
||||
|
||||
GPG Verification
|
||||
----------------
|
||||
|
||||
SaltStack has enabled `GPG Probot`_ to enforce GPG signatures for all
|
||||
commits included in a Pull Request.
|
||||
|
||||
In order for the GPG verification status check to pass, *every* contributor in
|
||||
the pull request must:
|
||||
|
||||
- Set up a GPG key on local machine
|
||||
- Sign all commits in the pull request with key
|
||||
- Link key with GitHub account
|
||||
|
||||
This applies to all commits in the pull request.
|
||||
|
||||
GitHub hosts a number of `help articles`_ for creating a GPG key, using the
|
||||
GPG key with ``git`` locally, and linking the GPG key to your GitHub account.
|
||||
Once these steps are completed, the commit signing verification will look like
|
||||
the example in GitHub's `GPG Signature Verification feature announcement`_.
|
||||
|
||||
.. _`saltstack/salt`: https://github.com/saltstack/salt
|
||||
.. _`GitHub Fork a Repo Guide`: https://help.github.com/articles/fork-a-repo
|
||||
.. _`GitHub issue tracker`: https://github.com/saltstack/salt/issues
|
||||
|
@ -434,14 +521,6 @@ and bug resolution. See the :ref:`Labels and Milestones
|
|||
.. _`Closing issues via commit message`: https://help.github.com/articles/closing-issues-via-commit-messages
|
||||
.. _`git format-patch`: https://www.kernel.org/pub/software/scm/git/docs/git-format-patch.html
|
||||
.. _salt-users: https://groups.google.com/forum/#!forum/salt-users
|
||||
|
||||
Mentionbot
|
||||
==========
|
||||
|
||||
SaltStack runs a mention-bot which notifies contributors who might be able
|
||||
to help review incoming pull-requests based on their past contribution to
|
||||
files which are being changed.
|
||||
|
||||
If you do not wish to receive these notifications, please add your GitHub
|
||||
handle to the blacklist line in the `.mention-bot` file located in the
|
||||
root of the Salt repository.
|
||||
.. _GPG Probot: https://probot.github.io/apps/gpg/
|
||||
.. _help articles: https://help.github.com/articles/signing-commits-with-gpg/
|
||||
.. _GPG Signature Verification feature announcement: https://github.com/blog/2144-gpg-signature-verification
|
||||
|
|
|
@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt
|
|||
port: 5959
|
||||
proto: tcp
|
||||
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines.
|
||||
Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
engines_dirs:
|
||||
- /home/bob/engines
|
||||
|
||||
Writing an Engine
|
||||
=================
|
||||
|
|
|
@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described
|
|||
Installation from the Official SaltStack Repository
|
||||
===================================================
|
||||
|
||||
Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the
|
||||
Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the
|
||||
Official SaltStack repository.
|
||||
|
||||
Instructions are at https://repo.saltstack.com/#debian.
|
||||
|
|
1719
doc/topics/releases/2016.11.8.rst
Normal file
1719
doc/topics/releases/2016.11.8.rst
Normal file
File diff suppressed because it is too large
Load diff
|
@ -21,6 +21,9 @@ Salt will no longer support Python 2.6. We will provide python2.7 packages on ou
|
|||
|
||||
.. _repo: https://repo.saltstack.com/
|
||||
|
||||
As this will impact the installation of additional dependencies for salt modules please use pip packages if there is not a package available in a repository. You will need to install the python27-pip package to get access to the correct pip27 executable: ``yum install python27-pip``
|
||||
|
||||
|
||||
============
|
||||
Known Issues
|
||||
============
|
||||
|
|
3160
doc/topics/releases/2017.7.2.rst
Normal file
3160
doc/topics/releases/2017.7.2.rst
Normal file
File diff suppressed because it is too large
Load diff
|
@ -132,7 +132,7 @@ fi
|
|||
###############################################################################
|
||||
# Remove the salt from the paths.d
|
||||
###############################################################################
|
||||
if [ ! -f "/etc/paths.d/salt" ]; then
|
||||
if [ -f "/etc/paths.d/salt" ]; then
|
||||
echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt"
|
||||
rm "/etc/paths.d/salt"
|
||||
echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt"
|
||||
|
|
|
@ -377,46 +377,13 @@ class LoadAuth(object):
|
|||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list(
|
||||
eauth_config,
|
||||
name,
|
||||
groups)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
|
|
2
salt/cache/__init__.py
vendored
2
salt/cache/__init__.py
vendored
|
@ -73,7 +73,7 @@ class Cache(object):
|
|||
self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR)
|
||||
else:
|
||||
self.cachedir = cachedir
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS)
|
||||
self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache'])
|
||||
self.serial = Serial(opts)
|
||||
self._modules = None
|
||||
self._kwargs = kwargs
|
||||
|
|
21
salt/cache/redis_cache.py
vendored
21
salt/cache/redis_cache.py
vendored
|
@ -421,18 +421,17 @@ def list_(bank):
|
|||
Lists entries stored in the specified bank.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
banks = redis_server.smembers(bank_redis_key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
if not banks:
|
||||
return []
|
||||
return list(bank_keys)
|
||||
return list(banks)
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
|
@ -440,15 +439,11 @@ def contains(bank, key):
|
|||
Checks if the specified bank contains the specified key.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
return redis_server.sismember(bank_redis_key, key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
return False
|
||||
return key in bank_keys
|
||||
|
|
|
@ -140,10 +140,11 @@ class Batch(object):
|
|||
# sure that the main while loop finishes even with unresp minions
|
||||
minion_tracker = {}
|
||||
|
||||
# We already know some minions didn't respond to the ping, so inform
|
||||
# the user we won't be attempting to run a job on them
|
||||
for down_minion in self.down_minions:
|
||||
print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
|
||||
if not self.quiet:
|
||||
# We already know some minions didn't respond to the ping, so inform
|
||||
# the user we won't be attempting to run a job on them
|
||||
for down_minion in self.down_minions:
|
||||
print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))
|
||||
|
||||
# Iterate while we still have things to execute
|
||||
while len(ret) < len(self.minions):
|
||||
|
|
|
@ -3539,16 +3539,15 @@ def list_nodes_min(location=None, call=None):
|
|||
|
||||
for instance in instances:
|
||||
if isinstance(instance['instancesSet']['item'], list):
|
||||
for item in instance['instancesSet']['item']:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
items = instance['instancesSet']['item']
|
||||
else:
|
||||
item = instance['instancesSet']['item']
|
||||
items = [instance['instancesSet']['item']]
|
||||
|
||||
for item in items:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ Example profile:
|
|||
master_port: 5506
|
||||
|
||||
Tested on:
|
||||
- Fedora 26 (libvirt 3.2.1, qemu 2.9.1)
|
||||
- Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1)
|
||||
- Fedora 23 (libvirt 1.2.18, qemu 2.4.1)
|
||||
- Centos 7 (libvirt 1.2.17, qemu 1.5.3)
|
||||
|
@ -82,9 +83,6 @@ from salt.exceptions import (
|
|||
SaltCloudSystemExit
|
||||
)
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {0: 'running',
|
||||
1: 'running',
|
||||
2: 'running',
|
||||
|
@ -99,6 +97,20 @@ IP_LEARNING_XML = """<filterref filter='clean-traffic'>
|
|||
|
||||
__virtualname__ = 'libvirt'
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
log.debug("libvirt error {0}".format(error))
|
||||
|
||||
|
||||
if HAS_LIBVIRT:
|
||||
libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
|
|
@ -717,6 +717,10 @@ VALID_OPTS = {
|
|||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
|
||||
# applied only if the user didn't matched by other matchers.
|
||||
'permissive_acl': bool,
|
||||
|
||||
# Optionally enables keeping the calculated user's auth list in the token file.
|
||||
'keep_acl_in_token': bool,
|
||||
|
||||
|
@ -1466,6 +1470,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'external_auth': {},
|
||||
'token_expire': 43200,
|
||||
'token_expire_user_override': False,
|
||||
'permissive_acl': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
|
||||
|
|
|
@ -204,6 +204,14 @@ def clean_old_jobs(opts):
|
|||
|
||||
|
||||
def mk_key(opts, user):
|
||||
if HAS_PWD:
|
||||
uid = None
|
||||
try:
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
except KeyError:
|
||||
# User doesn't exist in the system
|
||||
if opts['client_acl_verify']:
|
||||
return None
|
||||
if salt.utils.is_windows():
|
||||
# The username may contain '\' if it is in Windows
|
||||
# 'DOMAIN\username' format. Fix this for the keyfile path.
|
||||
|
@ -231,9 +239,9 @@ def mk_key(opts, user):
|
|||
# Write access is necessary since on subsequent runs, if the file
|
||||
# exists, it needs to be written to again. Windows enforces this.
|
||||
os.chmod(keyfile, 0o600)
|
||||
if HAS_PWD:
|
||||
if HAS_PWD and uid is not None:
|
||||
try:
|
||||
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
|
||||
os.chown(keyfile, uid, -1)
|
||||
except OSError:
|
||||
# The master is not being run as root and can therefore not
|
||||
# chown the key file
|
||||
|
@ -248,27 +256,26 @@ def access_keys(opts):
|
|||
'''
|
||||
# TODO: Need a way to get all available users for systems not supported by pwd module.
|
||||
# For now users pattern matching will not work for publisher_acl.
|
||||
users = []
|
||||
keys = {}
|
||||
publisher_acl = opts['publisher_acl']
|
||||
acl_users = set(publisher_acl.keys())
|
||||
if opts.get('user'):
|
||||
acl_users.add(opts['user'])
|
||||
acl_users.add(salt.utils.get_user())
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in pwd.getpwall():
|
||||
users.append(user.pw_name)
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in acl_users:
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
keys[user] = mk_key(opts, user)
|
||||
key = mk_key(opts, user)
|
||||
if key is not None:
|
||||
keys[user] = key
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
if HAS_PWD:
|
||||
for user in users:
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
log.profile('Beginning pwd.getpwall() call in masterapi access_keys function')
|
||||
for user in pwd.getpwall():
|
||||
user = user.pw_name
|
||||
if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users):
|
||||
keys[user] = mk_key(opts, user)
|
||||
log.profile('End pwd.getpwall() call in masterapi access_keys function')
|
||||
|
||||
return keys
|
||||
|
||||
|
|
|
@ -74,8 +74,12 @@ def start(docker_url='unix://var/run/docker.sock',
|
|||
else:
|
||||
__salt__['event.send'](tag, msg)
|
||||
|
||||
client = docker.Client(base_url=docker_url,
|
||||
timeout=timeout)
|
||||
try:
|
||||
# docker-py 2.0 renamed this client attribute
|
||||
client = docker.APIClient(base_url=docker_url, timeout=timeout)
|
||||
except AttributeError:
|
||||
client = docker.Client(base_url=docker_url, timeout=timeout)
|
||||
|
||||
try:
|
||||
events = client.events()
|
||||
for event in events:
|
||||
|
|
|
@ -265,6 +265,12 @@ class SaltCacheError(SaltException):
|
|||
'''
|
||||
|
||||
|
||||
class TimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when an opration cannot be completet within a given time limit.
|
||||
'''
|
||||
|
||||
|
||||
class SaltReqTimeoutError(SaltException):
|
||||
'''
|
||||
Thrown when a salt master request call fails to return within the timeout
|
||||
|
|
|
@ -367,6 +367,16 @@ def _file_lists(load, form):
|
|||
'roots: %s symlink destination is %s',
|
||||
abs_path, link_dest
|
||||
)
|
||||
if salt.utils.is_windows() \
|
||||
and link_dest.startswith('\\\\'):
|
||||
# Symlink points to a network path. Since you can't
|
||||
# join UNC and non-UNC paths, just assume the original
|
||||
# path.
|
||||
log.trace(
|
||||
'roots: %s is a UNCH path, using %s instead',
|
||||
link_dest, abs_path
|
||||
)
|
||||
link_dest = abs_path
|
||||
if link_dest.startswith('..'):
|
||||
joined = os.path.join(abs_path, link_dest)
|
||||
else:
|
||||
|
|
|
@ -1175,6 +1175,10 @@ _OS_FAMILY_MAP = {
|
|||
'Raspbian': 'Debian',
|
||||
'Devuan': 'Debian',
|
||||
'antiX': 'Debian',
|
||||
'Kali': 'Debian',
|
||||
'neon': 'Debian',
|
||||
'Cumulus': 'Debian',
|
||||
'Deepin': 'Debian',
|
||||
'NILinuxRT': 'NILinuxRT',
|
||||
'NILinuxRT-XFCE': 'NILinuxRT',
|
||||
'Void': 'Void',
|
||||
|
|
|
@ -60,7 +60,6 @@ import salt.search
|
|||
import salt.key
|
||||
import salt.acl
|
||||
import salt.engines
|
||||
import salt.fileserver
|
||||
import salt.daemons.masterapi
|
||||
import salt.defaults.exitcodes
|
||||
import salt.transport.server
|
||||
|
@ -181,7 +180,8 @@ class Maintenance(SignalHandlingMultiprocessingProcess):
|
|||
in the parent process, then once the fork happens you'll start getting
|
||||
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
|
||||
'''
|
||||
# Init fileserver manager
|
||||
# Avoid circular import
|
||||
import salt.fileserver
|
||||
self.fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
# Load Runners
|
||||
ropts = dict(self.opts)
|
||||
|
@ -459,6 +459,8 @@ class Master(SMaster):
|
|||
)
|
||||
|
||||
if self.opts.get('fileserver_verify_config', True):
|
||||
# Avoid circular import
|
||||
import salt.fileserver
|
||||
fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
if not fileserver.servers:
|
||||
errors.append(
|
||||
|
@ -494,16 +496,15 @@ class Master(SMaster):
|
|||
if non_legacy_git_pillars:
|
||||
try:
|
||||
new_opts = copy.deepcopy(self.opts)
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as per_remote_overrides, \
|
||||
PER_REMOTE_ONLY as per_remote_only
|
||||
import salt.pillar.git_pillar
|
||||
for repo in non_legacy_git_pillars:
|
||||
new_opts['ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(repo['git'],
|
||||
per_remote_overrides,
|
||||
per_remote_only)
|
||||
git_pillar.init_remotes(
|
||||
repo['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
|
@ -973,6 +974,8 @@ class AESFuncs(object):
|
|||
'''
|
||||
Set the local file objects from the file server interface
|
||||
'''
|
||||
# Avoid circular import
|
||||
import salt.fileserver
|
||||
self.fs_ = salt.fileserver.Fileserver(self.opts)
|
||||
self._serve_file = self.fs_.serve_file
|
||||
self._file_find = self.fs_._find_file
|
||||
|
|
|
@ -1600,13 +1600,24 @@ class Minion(MinionBase):
|
|||
minion side execution.
|
||||
'''
|
||||
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
|
||||
ret = {
|
||||
'return': {},
|
||||
'retcode': {},
|
||||
'success': {}
|
||||
}
|
||||
for ind in range(0, len(data['fun'])):
|
||||
ret['success'][data['fun'][ind]] = False
|
||||
multifunc_ordered = opts.get('multifunc_ordered', False)
|
||||
num_funcs = len(data['fun'])
|
||||
if multifunc_ordered:
|
||||
ret = {
|
||||
'return': [None] * num_funcs,
|
||||
'retcode': [None] * num_funcs,
|
||||
'success': [False] * num_funcs
|
||||
}
|
||||
else:
|
||||
ret = {
|
||||
'return': {},
|
||||
'retcode': {},
|
||||
'success': {}
|
||||
}
|
||||
|
||||
for ind in range(0, num_funcs):
|
||||
if not multifunc_ordered:
|
||||
ret['success'][data['fun'][ind]] = False
|
||||
try:
|
||||
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
|
||||
# this minion is blacked out. Only allow saltutil.refresh_pillar
|
||||
|
@ -1621,12 +1632,20 @@ class Minion(MinionBase):
|
|||
data['arg'][ind],
|
||||
data)
|
||||
minion_instance.functions.pack['__context__']['retcode'] = 0
|
||||
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
|
||||
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][data['fun'][ind]] = True
|
||||
if multifunc_ordered:
|
||||
ret['return'][ind] = func(*args, **kwargs)
|
||||
ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][ind] = True
|
||||
else:
|
||||
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
|
||||
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
|
||||
'retcode',
|
||||
0
|
||||
)
|
||||
ret['success'][data['fun'][ind]] = True
|
||||
except Exception as exc:
|
||||
trb = traceback.format_exc()
|
||||
log.warning(
|
||||
|
@ -1634,7 +1653,10 @@ class Minion(MinionBase):
|
|||
exc
|
||||
)
|
||||
)
|
||||
ret['return'][data['fun'][ind]] = trb
|
||||
if multifunc_ordered:
|
||||
ret['return'][ind] = trb
|
||||
else:
|
||||
ret['return'][data['fun'][ind]] = trb
|
||||
ret['jid'] = data['jid']
|
||||
ret['fun'] = data['fun']
|
||||
ret['fun_args'] = data['arg']
|
||||
|
|
|
@ -93,11 +93,15 @@ __virtualname__ = 'pkg'
|
|||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confirm this module is on a Debian based system
|
||||
Confirm this module is on a Debian-based system
|
||||
'''
|
||||
if __grains__.get('os_family') in ('Kali', 'Debian', 'neon'):
|
||||
return __virtualname__
|
||||
elif __grains__.get('os_family', False) == 'Cumulus':
|
||||
# If your minion is running an OS which is Debian-based but does not have
|
||||
# an "os_family" grain of Debian, then the proper fix is NOT to check for
|
||||
# the minion's "os_family" grain here in the __virtual__. The correct fix
|
||||
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
|
||||
# dict in salt/grains/core.py, so that we assign the correct "os_family"
|
||||
# grain to the minion.
|
||||
if __grains__.get('os_family') == 'Debian':
|
||||
return __virtualname__
|
||||
return (False, 'The pkg module could not be loaded: unsupported OS family')
|
||||
|
||||
|
|
|
@ -66,15 +66,17 @@ except ImportError:
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'boto_kinesis'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if boto3 libraries exist.
|
||||
'''
|
||||
if not HAS_BOTO:
|
||||
return False
|
||||
return False, 'The boto_kinesis module could not be loaded: boto libraries not found.'
|
||||
__utils__['boto3.assign_funcs'](__name__, 'kinesis')
|
||||
return True
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def _get_basic_stream(stream_name, conn):
|
||||
|
|
|
@ -598,9 +598,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
|
|||
try:
|
||||
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
|
||||
region=region, key=key, keyid=keyid, profile=profile)
|
||||
return {'exists': bool(vpc_ids)}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
return {'error': boto_err}
|
||||
|
||||
return {'exists': bool(vpc_ids)}
|
||||
|
||||
|
||||
def create(cidr_block, instance_tenancy=None, vpc_name=None,
|
||||
|
@ -722,27 +727,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
|
|||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
|
||||
if not vpc_id:
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return None.
|
||||
return {'vpc': None}
|
||||
return {'error': boto_err}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
if not vpc_id:
|
||||
return {'vpc': None}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
|
||||
try:
|
||||
vpcs = conn.get_all_vpcs(**filter_parameters)
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
|
||||
def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
|
||||
|
@ -808,7 +820,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
|
|||
Given subnet properties, find and return matching subnet ids
|
||||
'''
|
||||
|
||||
if not any(subnet_name, tags, cidr):
|
||||
if not any([subnet_name, tags, cidr]):
|
||||
raise SaltInvocationError('At least one of the following must be '
|
||||
'specified: subnet_name, cidr or tags.')
|
||||
|
||||
|
@ -926,34 +938,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
|
|||
|
||||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
filter_parameters = {'filters': {}}
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
filter_parameters = {'filters': {}}
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
|
||||
try:
|
||||
subnets = conn.get_all_subnets(**filter_parameters)
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
|
||||
# Subnet was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
return {'error': boto_err}
|
||||
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
return {'exists': False}
|
||||
|
||||
|
||||
def get_subnet_association(subnets, region=None, key=None, keyid=None,
|
||||
|
|
|
@ -54,7 +54,8 @@ import salt.utils.files
|
|||
import salt.utils.locales
|
||||
import salt.utils.templates
|
||||
import salt.utils.url
|
||||
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError, get_error_message as _get_error_message
|
||||
from salt.utils.files import HASHES, HASHES_REVMAP
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -62,16 +63,6 @@ __func_alias__ = {
|
|||
'makedirs_': 'makedirs'
|
||||
}
|
||||
|
||||
HASHES = {
|
||||
'sha512': 128,
|
||||
'sha384': 96,
|
||||
'sha256': 64,
|
||||
'sha224': 56,
|
||||
'sha1': 40,
|
||||
'md5': 32,
|
||||
}
|
||||
HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)])
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -2188,14 +2179,14 @@ def replace(path,
|
|||
if not_found_content is None:
|
||||
not_found_content = repl
|
||||
if prepend_if_not_found:
|
||||
new_file.insert(0, not_found_content + b'\n')
|
||||
new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep))
|
||||
else:
|
||||
# append_if_not_found
|
||||
# Make sure we have a newline at the end of the file
|
||||
if 0 != len(new_file):
|
||||
if not new_file[-1].endswith(b'\n'):
|
||||
new_file[-1] += b'\n'
|
||||
new_file.append(not_found_content + b'\n')
|
||||
if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)):
|
||||
new_file[-1] += salt.utils.to_bytes(os.linesep)
|
||||
new_file.append(not_found_content + salt.utils.to_bytes(os.linesep))
|
||||
has_changes = True
|
||||
if not dry_run:
|
||||
try:
|
||||
|
@ -2206,9 +2197,9 @@ def replace(path,
|
|||
raise CommandExecutionError("Exception: {0}".format(exc))
|
||||
# write new content in the file while avoiding partial reads
|
||||
try:
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
|
||||
for line in new_file:
|
||||
fh_.write(salt.utils.to_str(line))
|
||||
fh_.write(salt.utils.to_bytes(line))
|
||||
finally:
|
||||
fh_.close()
|
||||
|
||||
|
@ -2378,9 +2369,10 @@ def blockreplace(path,
|
|||
try:
|
||||
fi_file = fileinput.input(path,
|
||||
inplace=False, backup=False,
|
||||
bufsize=1, mode='r')
|
||||
bufsize=1, mode='rb')
|
||||
for line in fi_file:
|
||||
|
||||
line = salt.utils.to_str(line)
|
||||
result = line
|
||||
|
||||
if marker_start in line:
|
||||
|
@ -2393,14 +2385,24 @@ def blockreplace(path,
|
|||
# end of block detected
|
||||
in_block = False
|
||||
|
||||
# Check for multi-line '\n' terminated content as split will
|
||||
# introduce an unwanted additional new line.
|
||||
if content and content[-1] == '\n':
|
||||
content = content[:-1]
|
||||
# Handle situations where there may be multiple types
|
||||
# of line endings in the same file. Separate the content
|
||||
# into lines. Account for Windows-style line endings
|
||||
# using os.linesep, then by linux-style line endings
|
||||
# using '\n'
|
||||
split_content = []
|
||||
for linesep_line in content.split(os.linesep):
|
||||
for content_line in linesep_line.split('\n'):
|
||||
split_content.append(content_line)
|
||||
|
||||
# Trim any trailing new lines to avoid unwanted
|
||||
# additional new lines
|
||||
while not split_content[-1]:
|
||||
split_content.pop()
|
||||
|
||||
# push new block content in file
|
||||
for cline in content.split('\n'):
|
||||
new_file.append(cline + '\n')
|
||||
for content_line in split_content:
|
||||
new_file.append(content_line + os.linesep)
|
||||
|
||||
done = True
|
||||
|
||||
|
@ -2428,25 +2430,25 @@ def blockreplace(path,
|
|||
if not done:
|
||||
if prepend_if_not_found:
|
||||
# add the markers and content at the beginning of file
|
||||
new_file.insert(0, marker_end + '\n')
|
||||
new_file.insert(0, marker_end + os.linesep)
|
||||
if append_newline is True:
|
||||
new_file.insert(0, content + '\n')
|
||||
new_file.insert(0, content + os.linesep)
|
||||
else:
|
||||
new_file.insert(0, content)
|
||||
new_file.insert(0, marker_start + '\n')
|
||||
new_file.insert(0, marker_start + os.linesep)
|
||||
done = True
|
||||
elif append_if_not_found:
|
||||
# Make sure we have a newline at the end of the file
|
||||
if 0 != len(new_file):
|
||||
if not new_file[-1].endswith('\n'):
|
||||
new_file[-1] += '\n'
|
||||
if not new_file[-1].endswith(os.linesep):
|
||||
new_file[-1] += os.linesep
|
||||
# add the markers and content at the end of file
|
||||
new_file.append(marker_start + '\n')
|
||||
new_file.append(marker_start + os.linesep)
|
||||
if append_newline is True:
|
||||
new_file.append(content + '\n')
|
||||
new_file.append(content + os.linesep)
|
||||
else:
|
||||
new_file.append(content)
|
||||
new_file.append(marker_end + '\n')
|
||||
new_file.append(marker_end + os.linesep)
|
||||
done = True
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
|
@ -2477,9 +2479,9 @@ def blockreplace(path,
|
|||
|
||||
# write new content in the file while avoiding partial reads
|
||||
try:
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
|
||||
for line in new_file:
|
||||
fh_.write(line)
|
||||
fh_.write(salt.utils.to_bytes(line))
|
||||
finally:
|
||||
fh_.close()
|
||||
|
||||
|
@ -3618,6 +3620,14 @@ def source_list(source, source_hash, saltenv):
|
|||
single_src = next(iter(single))
|
||||
single_hash = single[single_src] if single[single_src] else source_hash
|
||||
urlparsed_single_src = _urlparse(single_src)
|
||||
# Fix this for Windows
|
||||
if salt.utils.is_windows():
|
||||
# urlparse doesn't handle a local Windows path without the
|
||||
# protocol indicator (file://). The scheme will be the
|
||||
# drive letter instead of the protocol. So, we'll add the
|
||||
# protocol and re-parse
|
||||
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
|
||||
urlparsed_single_src = _urlparse('file://' + single_src)
|
||||
proto = urlparsed_single_src.scheme
|
||||
if proto == 'salt':
|
||||
path, senv = salt.utils.url.parse(single_src)
|
||||
|
@ -3627,18 +3637,17 @@ def source_list(source, source_hash, saltenv):
|
|||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
try:
|
||||
if __salt__['cp.cache_file'](single_src):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
except MinionError as exc:
|
||||
# Error downloading file. Log the caught exception and
|
||||
# continue on to the next source.
|
||||
log.exception(exc)
|
||||
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif single_src.startswith('/') and os.path.exists(single_src):
|
||||
elif proto == 'file' and (
|
||||
os.path.exists(urlparsed_single_src.netloc) or
|
||||
os.path.exists(urlparsed_single_src.path) or
|
||||
os.path.exists(os.path.join(
|
||||
urlparsed_single_src.netloc,
|
||||
urlparsed_single_src.path))):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif single_src.startswith(os.sep) and os.path.exists(single_src):
|
||||
ret = (single_src, single_hash)
|
||||
break
|
||||
elif isinstance(single, six.string_types):
|
||||
|
@ -3649,15 +3658,26 @@ def source_list(source, source_hash, saltenv):
|
|||
ret = (single, source_hash)
|
||||
break
|
||||
urlparsed_src = _urlparse(single)
|
||||
if salt.utils.is_windows():
|
||||
# urlparse doesn't handle a local Windows path without the
|
||||
# protocol indicator (file://). The scheme will be the
|
||||
# drive letter instead of the protocol. So, we'll add the
|
||||
# protocol and re-parse
|
||||
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
|
||||
urlparsed_src = _urlparse('file://' + single)
|
||||
proto = urlparsed_src.scheme
|
||||
if proto == 'file' and os.path.exists(urlparsed_src.path):
|
||||
if proto == 'file' and (
|
||||
os.path.exists(urlparsed_src.netloc) or
|
||||
os.path.exists(urlparsed_src.path) or
|
||||
os.path.exists(os.path.join(
|
||||
urlparsed_src.netloc,
|
||||
urlparsed_src.path))):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif proto.startswith('http') or proto == 'ftp':
|
||||
if __salt__['cp.cache_file'](single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif single.startswith('/') and os.path.exists(single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
elif single.startswith(os.sep) and os.path.exists(single):
|
||||
ret = (single, source_hash)
|
||||
break
|
||||
if ret is None:
|
||||
|
@ -4478,7 +4498,7 @@ def check_file_meta(
|
|||
'''
|
||||
changes = {}
|
||||
if not source_sum:
|
||||
source_sum = dict()
|
||||
source_sum = {}
|
||||
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
|
||||
if not lstats:
|
||||
changes['newfile'] = name
|
||||
|
|
|
@ -102,8 +102,6 @@ def _construct_yaml_str(self, node):
|
|||
Construct for yaml
|
||||
'''
|
||||
return self.construct_scalar(node)
|
||||
YamlLoader.add_constructor(u'tag:yaml.org,2002:str',
|
||||
_construct_yaml_str)
|
||||
YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp',
|
||||
_construct_yaml_str)
|
||||
|
||||
|
|
|
@ -373,10 +373,11 @@ class _Ini(_Section):
|
|||
with salt.utils.fopen(self.name) as rfh:
|
||||
inicontents = rfh.read()
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
"Unable to open file '{0}'. "
|
||||
"Exception: {1}".format(self.name, exc)
|
||||
)
|
||||
if __opts__['test'] is False:
|
||||
raise CommandExecutionError(
|
||||
"Unable to open file '{0}'. "
|
||||
"Exception: {1}".format(self.name, exc)
|
||||
)
|
||||
if not inicontents:
|
||||
return
|
||||
# Remove anything left behind from a previous run.
|
||||
|
|
|
@ -40,11 +40,16 @@ import base64
|
|||
import logging
|
||||
import yaml
|
||||
import tempfile
|
||||
import signal
|
||||
from time import sleep
|
||||
from contextlib import contextmanager
|
||||
|
||||
from salt.exceptions import CommandExecutionError
|
||||
from salt.ext.six import iteritems
|
||||
import salt.utils
|
||||
import salt.utils.templates
|
||||
from salt.exceptions import TimeoutError
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error
|
||||
|
||||
try:
|
||||
import kubernetes # pylint: disable=import-self
|
||||
|
@ -78,6 +83,21 @@ def __virtual__():
|
|||
return False, 'python kubernetes library not found'
|
||||
|
||||
|
||||
if not salt.utils.is_windows():
|
||||
@contextmanager
|
||||
def _time_limit(seconds):
|
||||
def signal_handler(signum, frame):
|
||||
raise TimeoutError
|
||||
signal.signal(signal.SIGALRM, signal_handler)
|
||||
signal.alarm(seconds)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
signal.alarm(0)
|
||||
|
||||
POLLING_TIME_LIMIT = 30
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _setup_conn(**kwargs):
|
||||
'''
|
||||
|
@ -692,7 +712,30 @@ def delete_deployment(name, namespace='default', **kwargs):
|
|||
name=name,
|
||||
namespace=namespace,
|
||||
body=body)
|
||||
return api_response.to_dict()
|
||||
mutable_api_response = api_response.to_dict()
|
||||
if not salt.utils.is_windows():
|
||||
try:
|
||||
with _time_limit(POLLING_TIME_LIMIT):
|
||||
while show_deployment(name, namespace) is not None:
|
||||
sleep(1)
|
||||
else: # pylint: disable=useless-else-on-loop
|
||||
mutable_api_response['code'] = 200
|
||||
except TimeoutError:
|
||||
pass
|
||||
else:
|
||||
# Windows has not signal.alarm implementation, so we are just falling
|
||||
# back to loop-counting.
|
||||
for i in range(60):
|
||||
if show_deployment(name, namespace) is None:
|
||||
mutable_api_response['code'] = 200
|
||||
break
|
||||
else:
|
||||
sleep(1)
|
||||
if mutable_api_response['code'] != 200:
|
||||
log.warning('Reached polling time limit. Deployment is not yet '
|
||||
'deleted, but we are backing off. Sorry, but you\'ll '
|
||||
'have to check manually.')
|
||||
return mutable_api_response
|
||||
except (ApiException, HTTPError) as exc:
|
||||
if isinstance(exc, ApiException) and exc.status == 404:
|
||||
return None
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Support for Linux File Access Control Lists
|
||||
|
||||
The Linux ACL module requires the `getfacl` and `setfacl` binaries.
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
|
|
@ -687,11 +687,20 @@ def file_query(database, file_name, **connection_args):
|
|||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
database
|
||||
|
||||
database to run script inside
|
||||
|
||||
file_name
|
||||
|
||||
File name of the script. This can be on the minion, or a file that is reachable by the fileserver
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql
|
||||
salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql
|
||||
|
||||
Return data:
|
||||
|
||||
|
@ -700,6 +709,9 @@ def file_query(database, file_name, **connection_args):
|
|||
{'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L}
|
||||
|
||||
'''
|
||||
if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')):
|
||||
file_name = __salt__['cp.cache_file'](file_name)
|
||||
|
||||
if os.path.exists(file_name):
|
||||
with salt.utils.fopen(file_name, 'r') as ifile:
|
||||
contents = ifile.read()
|
||||
|
@ -708,7 +720,7 @@ def file_query(database, file_name, **connection_args):
|
|||
return False
|
||||
|
||||
query_string = ""
|
||||
ret = {'rows returned': 0, 'columns': 0, 'results': 0, 'rows affected': 0, 'query time': {'raw': 0}}
|
||||
ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}}
|
||||
for line in contents.splitlines():
|
||||
if re.match(r'--', line): # ignore sql comments
|
||||
continue
|
||||
|
@ -728,16 +740,16 @@ def file_query(database, file_name, **connection_args):
|
|||
if 'rows returned' in query_result:
|
||||
ret['rows returned'] += query_result['rows returned']
|
||||
if 'columns' in query_result:
|
||||
ret['columns'] += query_result['columns']
|
||||
ret['columns'].append(query_result['columns'])
|
||||
if 'results' in query_result:
|
||||
ret['results'] += query_result['results']
|
||||
ret['results'].append(query_result['results'])
|
||||
if 'rows affected' in query_result:
|
||||
ret['rows affected'] += query_result['rows affected']
|
||||
ret['query time']['human'] = str(round(float(ret['query time']['raw']), 2)) + 's'
|
||||
ret['query time']['raw'] = round(float(ret['query time']['raw']), 5)
|
||||
|
||||
# Remove empty keys in ret
|
||||
ret = dict((k, v) for k, v in six.iteritems(ret) if v)
|
||||
ret = {k: v for k, v in six.iteritems(ret) if v}
|
||||
|
||||
return ret
|
||||
|
||||
|
|
|
@ -374,8 +374,10 @@ def list_semod():
|
|||
|
||||
def _validate_filetype(filetype):
|
||||
'''
|
||||
Checks if the given filetype is a valid SELinux filetype specification.
|
||||
Throws an SaltInvocationError if it isn't.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Checks if the given filetype is a valid SELinux filetype
|
||||
specification. Throws an SaltInvocationError if it isn't.
|
||||
'''
|
||||
if filetype not in _SELINUX_FILETYPES.keys():
|
||||
raise SaltInvocationError('Invalid filetype given: {0}'.format(filetype))
|
||||
|
@ -384,6 +386,8 @@ def _validate_filetype(filetype):
|
|||
|
||||
def _context_dict_to_string(context):
|
||||
'''
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Converts an SELinux file context from a dict to a string.
|
||||
'''
|
||||
return '{sel_user}:{sel_role}:{sel_type}:{sel_level}'.format(**context)
|
||||
|
@ -391,6 +395,8 @@ def _context_dict_to_string(context):
|
|||
|
||||
def _context_string_to_dict(context):
|
||||
'''
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Converts an SELinux file context from string to dict.
|
||||
'''
|
||||
if not re.match('[^:]+:[^:]+:[^:]+:[^:]+$', context):
|
||||
|
@ -405,8 +411,11 @@ def _context_string_to_dict(context):
|
|||
|
||||
def filetype_id_to_string(filetype='a'):
|
||||
'''
|
||||
Translates SELinux filetype single-letter representation
|
||||
to a more human-readable version (which is also used in `semanage fcontext -l`).
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Translates SELinux filetype single-letter representation to a more
|
||||
human-readable version (which is also used in `semanage fcontext
|
||||
-l`).
|
||||
'''
|
||||
_validate_filetype(filetype)
|
||||
return _SELINUX_FILETYPES.get(filetype, 'error')
|
||||
|
@ -414,20 +423,27 @@ def filetype_id_to_string(filetype='a'):
|
|||
|
||||
def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Returns the current entry in the SELinux policy list as a dictionary.
|
||||
Returns None if no exact match was found
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Returns the current entry in the SELinux policy list as a
|
||||
dictionary. Returns None if no exact match was found.
|
||||
|
||||
Returned keys are:
|
||||
- filespec (the name supplied and matched)
|
||||
- filetype (the descriptive name of the filetype supplied)
|
||||
- sel_user, sel_role, sel_type, sel_level (the selinux context)
|
||||
|
||||
* filespec (the name supplied and matched)
|
||||
* filetype (the descriptive name of the filetype supplied)
|
||||
* sel_user, sel_role, sel_type, sel_level (the selinux context)
|
||||
|
||||
For a more in-depth explanation of the selinux context, go to
|
||||
https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Security-Enhanced_Linux/chap-Security-Enhanced_Linux-SELinux_Contexts.html
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files)
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -447,7 +463,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
|
|||
cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype)
|
||||
cmd = 'semanage fcontext -l | egrep ' + \
|
||||
"'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs)
|
||||
current_entry_text = __salt__['cmd.shell'](cmd)
|
||||
current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True)
|
||||
if current_entry_text == '':
|
||||
return None
|
||||
ret = {}
|
||||
|
@ -460,20 +476,34 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
|
|||
|
||||
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Sets or deletes the SELinux policy for a given filespec and other optional parameters.
|
||||
Returns the result of the call to semanage.
|
||||
Note that you don't have to remove an entry before setting a new one for a given
|
||||
filespec and filetype, as adding one with semanage automatically overwrites a
|
||||
previously configured SELinux context.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
file_type: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also ``man semanage-fcontext``.
|
||||
Defaults to 'a' (all files)
|
||||
sel_type: SELinux context type. There are many.
|
||||
sel_user: SELinux user. Use ``semanage login -l`` to determine which ones are available to you
|
||||
sel_level: The MLS range of the SELinux context.
|
||||
Sets or deletes the SELinux policy for a given filespec and other
|
||||
optional parameters.
|
||||
|
||||
Returns the result of the call to semanage.
|
||||
|
||||
Note that you don't have to remove an entry before setting a new
|
||||
one for a given filespec and filetype, as adding one with semanage
|
||||
automatically overwrites a previously configured SELinux context.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
file_type
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_type
|
||||
SELinux context type. There are many.
|
||||
|
||||
sel_user
|
||||
SELinux user. Use ``semanage login -l`` to determine which ones
|
||||
are available to you.
|
||||
|
||||
sel_level
|
||||
The MLS range of the SELinux context.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -499,10 +529,14 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
|
|||
|
||||
def fcontext_policy_is_applied(name, recursive=False):
|
||||
'''
|
||||
Returns an empty string if the SELinux policy for a given filespec is applied,
|
||||
returns string with differences in policy and actual situation otherwise.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
Returns an empty string if the SELinux policy for a given filespec
|
||||
is applied, returns string with differences in policy and actual
|
||||
situation otherwise.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -519,11 +553,17 @@ def fcontext_policy_is_applied(name, recursive=False):
|
|||
|
||||
def fcontext_apply_policy(name, recursive=False):
|
||||
'''
|
||||
Applies SElinux policies to filespec using `restorecon [-R] filespec`.
|
||||
Returns dict with changes if succesful, the output of the restorecon command otherwise.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
recursive: Recursively apply SELinux policies.
|
||||
Applies SElinux policies to filespec using `restorecon [-R]
|
||||
filespec`. Returns dict with changes if succesful, the output of
|
||||
the restorecon command otherwise.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
recursive
|
||||
Recursively apply SELinux policies.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
|
|
@ -1276,10 +1276,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.shlex_split(install_flags))
|
||||
arguments.extend(salt.utils.shlex_split(install_flags, posix=False))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.shlex_split(install_flags)
|
||||
arguments = salt.utils.shlex_split(install_flags, posix=False)
|
||||
|
||||
# Install the software
|
||||
# Check Use Scheduler Option
|
||||
|
@ -1341,7 +1341,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
|||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
cache_path,
|
||||
output_loglevel='quiet',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
|
@ -1600,19 +1599,20 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
|||
#Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be separated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False))
|
||||
else:
|
||||
cmd = expanded_cached_pkg
|
||||
arguments = salt.utils.shlex_split(uninstall_flags, posix=False)
|
||||
|
||||
# Uninstall the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[target].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.shlex_split(uninstall_flags))
|
||||
else:
|
||||
cmd = expanded_cached_pkg
|
||||
arguments = salt.utils.shlex_split(uninstall_flags)
|
||||
|
||||
# Create Scheduled Task
|
||||
__salt__['task.create_task'](name='update-salt-software',
|
||||
user_name='System',
|
||||
|
@ -1633,16 +1633,12 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
|||
ret[pkgname] = {'uninstall status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
if use_msiexec:
|
||||
cmd.extend([msiexec, '/x', expanded_cached_pkg])
|
||||
else:
|
||||
cmd.append(expanded_cached_pkg)
|
||||
cmd.extend(salt.utils.shlex_split(uninstall_flags))
|
||||
cmd = [cmd]
|
||||
cmd.extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
|
|
|
@ -25,7 +25,8 @@ from salt.exceptions import CommandExecutionError, SaltRenderError
|
|||
from salt.runners.winrepo import (
|
||||
genrepo as _genrepo,
|
||||
update_git_repos as _update_git_repos,
|
||||
PER_REMOTE_OVERRIDES
|
||||
PER_REMOTE_OVERRIDES,
|
||||
PER_REMOTE_ONLY
|
||||
)
|
||||
from salt.ext import six
|
||||
try:
|
||||
|
|
|
@ -110,7 +110,7 @@ def available(software=True,
|
|||
Include software updates in the results (default is True)
|
||||
|
||||
drivers (bool):
|
||||
Include driver updates in the results (default is False)
|
||||
Include driver updates in the results (default is True)
|
||||
|
||||
summary (bool):
|
||||
- True: Return a summary of updates available for each category.
|
||||
|
|
|
@ -1262,6 +1262,7 @@ def install(name=None,
|
|||
to_install = []
|
||||
to_downgrade = []
|
||||
to_reinstall = []
|
||||
_available = {}
|
||||
# The above three lists will be populated with tuples containing the
|
||||
# package name and the string being used for this particular package
|
||||
# modification. The reason for this method is that the string we use for
|
||||
|
@ -1281,7 +1282,8 @@ def install(name=None,
|
|||
if pkg_type == 'repository':
|
||||
has_wildcards = [x for x, y in six.iteritems(pkg_params)
|
||||
if y is not None and '*' in y]
|
||||
_available = list_repo_pkgs(*has_wildcards, byrepo=False, **kwargs)
|
||||
if has_wildcards:
|
||||
_available = list_repo_pkgs(*has_wildcards, byrepo=False, **kwargs)
|
||||
pkg_params_items = six.iteritems(pkg_params)
|
||||
elif pkg_type == 'advisory':
|
||||
pkg_params_items = []
|
||||
|
|
|
@ -778,9 +778,12 @@ class Pillar(object):
|
|||
and self.opts.get('__role') != 'minion':
|
||||
# Avoid circular import
|
||||
import salt.utils.gitfs
|
||||
from salt.pillar.git_pillar import PER_REMOTE_OVERRIDES
|
||||
import salt.pillar.git_pillar
|
||||
git_pillar = salt.utils.gitfs.GitPillar(self.opts)
|
||||
git_pillar.init_remotes(self.ext['git'], PER_REMOTE_OVERRIDES)
|
||||
git_pillar.init_remotes(
|
||||
self.ext['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_pillar.fetch_remotes()
|
||||
except TypeError:
|
||||
# Handle malformed ext_pillar
|
||||
|
|
|
@ -492,6 +492,12 @@ except ImportError:
|
|||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.pillar.git_pillar.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -90,7 +90,8 @@ class POSTGRESExtPillar(SqlBaseExtPillar):
|
|||
conn = psycopg2.connect(host=_options['host'],
|
||||
user=_options['user'],
|
||||
password=_options['pass'],
|
||||
dbname=_options['db'])
|
||||
dbname=_options['db'],
|
||||
port=_options['port'])
|
||||
cursor = conn.cursor()
|
||||
try:
|
||||
yield cursor
|
||||
|
|
|
@ -16,12 +16,11 @@ import salt.utils
|
|||
import salt.utils.master
|
||||
import salt.payload
|
||||
import salt.cache
|
||||
import salt.fileserver.gitfs
|
||||
import salt.pillar.git_pillar
|
||||
import salt.runners.winrepo
|
||||
from salt.exceptions import SaltInvocationError
|
||||
from salt.fileserver import clear_lock as _clear_lock
|
||||
from salt.fileserver.gitfs import PER_REMOTE_OVERRIDES as __GITFS_OVERRIDES
|
||||
from salt.pillar.git_pillar \
|
||||
import PER_REMOTE_OVERRIDES as __GIT_PILLAR_OVERRIDES
|
||||
from salt.runners.winrepo import PER_REMOTE_OVERRIDES as __WINREPO_OVERRIDES
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -328,8 +327,10 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
|
||||
if role == 'gitfs':
|
||||
git_objects = [salt.utils.gitfs.GitFS(__opts__)]
|
||||
git_objects[0].init_remotes(__opts__['gitfs_remotes'],
|
||||
__GITFS_OVERRIDES)
|
||||
git_objects[0].init_remotes(
|
||||
__opts__['gitfs_remotes'],
|
||||
salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
salt.fileserver.gitfs.PER_REMOTE_ONLY)
|
||||
elif role == 'git_pillar':
|
||||
git_objects = []
|
||||
for ext_pillar in __opts__['ext_pillar']:
|
||||
|
@ -338,7 +339,10 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
if not isinstance(ext_pillar['git'], list):
|
||||
continue
|
||||
obj = salt.utils.gitfs.GitPillar(__opts__)
|
||||
obj.init_remotes(ext_pillar['git'], __GIT_PILLAR_OVERRIDES)
|
||||
obj.init_remotes(
|
||||
ext_pillar['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_objects.append(obj)
|
||||
elif role == 'winrepo':
|
||||
winrepo_dir = __opts__['winrepo_dir']
|
||||
|
@ -350,7 +354,10 @@ def clear_git_lock(role, remote=None, **kwargs):
|
|||
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng'])
|
||||
):
|
||||
obj = salt.utils.gitfs.WinRepo(__opts__, base_dir)
|
||||
obj.init_remotes(remotes, __WINREPO_OVERRIDES)
|
||||
obj.init_remotes(
|
||||
remotes,
|
||||
salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
salt.runners.winrepo.PER_REMOTE_ONLY)
|
||||
git_objects.append(obj)
|
||||
else:
|
||||
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
|
||||
|
|
|
@ -6,9 +6,10 @@ Module for sending messages to Mattermost
|
|||
|
||||
:configuration: This module can be used by either passing an api_url and hook
|
||||
directly or by specifying both in a configuration profile in the salt
|
||||
master/minion config.
|
||||
For example:
|
||||
master/minion config. For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
mattermost:
|
||||
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
|
||||
api_url: https://example.com
|
||||
|
|
|
@ -31,6 +31,12 @@ log = logging.getLogger(__name__)
|
|||
# Global parameters which can be overridden on a per-remote basis
|
||||
PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.runners.winrepo.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
||||
|
||||
def genrepo(opts=None, fire_event=True):
|
||||
'''
|
||||
|
@ -211,7 +217,8 @@ def update_git_repos(opts=None, clean=False, masterless=False):
|
|||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
|
||||
winrepo.init_remotes(remotes, PER_REMOTE_OVERRIDES)
|
||||
winrepo.init_remotes(
|
||||
remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
winrepo.fetch_remotes()
|
||||
# Since we're not running update(), we need to manually call
|
||||
# clear_old_remotes() to remove directories from remotes that
|
||||
|
|
|
@ -61,16 +61,30 @@ def _gen_checksum(path):
|
|||
'hash_type': __opts__['hash_type']}
|
||||
|
||||
|
||||
def _update_checksum(cached_source):
|
||||
cached_source_sum = '.'.join((cached_source, 'hash'))
|
||||
source_sum = _gen_checksum(cached_source)
|
||||
def _checksum_file_path(path):
|
||||
relpath = '.'.join((os.path.relpath(path, __opts__['cachedir']), 'hash'))
|
||||
if re.match(r'..[/\\]', relpath):
|
||||
# path is a local file
|
||||
relpath = salt.utils.path_join(
|
||||
'local',
|
||||
os.path.splitdrive(path)[-1].lstrip('/\\'),
|
||||
)
|
||||
return salt.utils.path_join(__opts__['cachedir'], 'archive_hash', relpath)
|
||||
|
||||
|
||||
def _update_checksum(path):
|
||||
checksum_file = _checksum_file_path(path)
|
||||
checksum_dir = os.path.dirname(checksum_file)
|
||||
if not os.path.isdir(checksum_dir):
|
||||
os.makedirs(checksum_dir)
|
||||
source_sum = _gen_checksum(path)
|
||||
hash_type = source_sum.get('hash_type')
|
||||
hsum = source_sum.get('hsum')
|
||||
if hash_type and hsum:
|
||||
lines = []
|
||||
try:
|
||||
try:
|
||||
with salt.utils.fopen(cached_source_sum, 'r') as fp_:
|
||||
with salt.utils.fopen(checksum_file, 'r') as fp_:
|
||||
for line in fp_:
|
||||
try:
|
||||
lines.append(line.rstrip('\n').split(':', 1))
|
||||
|
@ -80,7 +94,7 @@ def _update_checksum(cached_source):
|
|||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
with salt.utils.fopen(cached_source_sum, 'w') as fp_:
|
||||
with salt.utils.fopen(checksum_file, 'w') as fp_:
|
||||
for line in lines:
|
||||
if line[0] == hash_type:
|
||||
line[1] = hsum
|
||||
|
@ -90,16 +104,16 @@ def _update_checksum(cached_source):
|
|||
except (IOError, OSError) as exc:
|
||||
log.warning(
|
||||
'Failed to update checksum for %s: %s',
|
||||
cached_source, exc.__str__()
|
||||
path, exc.__str__(), exc_info=True
|
||||
)
|
||||
|
||||
|
||||
def _read_cached_checksum(cached_source, form=None):
|
||||
def _read_cached_checksum(path, form=None):
|
||||
if form is None:
|
||||
form = __opts__['hash_type']
|
||||
path = '.'.join((cached_source, 'hash'))
|
||||
checksum_file = _checksum_file_path(path)
|
||||
try:
|
||||
with salt.utils.fopen(path, 'r') as fp_:
|
||||
with salt.utils.fopen(checksum_file, 'r') as fp_:
|
||||
for line in fp_:
|
||||
# Should only be one line in this file but just in case it
|
||||
# isn't, read only a single line to avoid overuse of memory.
|
||||
|
@ -114,9 +128,9 @@ def _read_cached_checksum(cached_source, form=None):
|
|||
return {'hash_type': hash_type, 'hsum': hsum}
|
||||
|
||||
|
||||
def _compare_checksum(cached_source, source_sum):
|
||||
def _compare_checksum(cached, source_sum):
|
||||
cached_sum = _read_cached_checksum(
|
||||
cached_source,
|
||||
cached,
|
||||
form=source_sum.get('hash_type', __opts__['hash_type'])
|
||||
)
|
||||
return source_sum == cached_sum
|
||||
|
@ -152,7 +166,6 @@ def extracted(name,
|
|||
user=None,
|
||||
group=None,
|
||||
if_missing=None,
|
||||
keep=False,
|
||||
trim_output=False,
|
||||
use_cmd_unzip=None,
|
||||
extract_perms=True,
|
||||
|
@ -389,6 +402,22 @@ def extracted(name,
|
|||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
keep_source : True
|
||||
For ``source`` archives not local to the minion (i.e. from the Salt
|
||||
fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt
|
||||
will need to download the archive to the minion cache before they can
|
||||
be extracted. To remove the downloaded archive after extraction, set
|
||||
this argument to ``False``.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
keep : True
|
||||
Same as ``keep_source``, kept for backward-compatibility.
|
||||
|
||||
.. note::
|
||||
If both ``keep_source`` and ``keep`` are used, ``keep`` will be
|
||||
ignored.
|
||||
|
||||
password
|
||||
**For ZIP archives only.** Password used for extraction.
|
||||
|
||||
|
@ -527,13 +556,6 @@ def extracted(name,
|
|||
simply checked for existence and extraction will be skipped if
|
||||
if is present.
|
||||
|
||||
keep : False
|
||||
For ``source`` archives not local to the minion (i.e. from the Salt
|
||||
fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt
|
||||
will need to download the archive to the minion cache before they can
|
||||
be extracted. After extraction, these source archives will be removed
|
||||
unless this argument is set to ``True``.
|
||||
|
||||
trim_output : False
|
||||
Useful for archives with many files in them. This can either be set to
|
||||
``True`` (in which case only the first 100 files extracted will be
|
||||
|
@ -635,6 +657,21 @@ def extracted(name,
|
|||
# Remove pub kwargs as they're irrelevant here.
|
||||
kwargs = salt.utils.clean_kwargs(**kwargs)
|
||||
|
||||
if 'keep_source' in kwargs and 'keep' in kwargs:
|
||||
ret.setdefault('warnings', []).append(
|
||||
'Both \'keep_source\' and \'keep\' were used. Since these both '
|
||||
'do the same thing, \'keep\' was ignored.'
|
||||
)
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
kwargs.pop('keep')
|
||||
elif 'keep_source' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
elif 'keep' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep'))
|
||||
else:
|
||||
# Neither was passed, default is True
|
||||
keep_source = True
|
||||
|
||||
if not _path_is_abs(name):
|
||||
ret['comment'] = '{0} is not an absolute path'.format(name)
|
||||
return ret
|
||||
|
@ -730,10 +767,10 @@ def extracted(name,
|
|||
urlparsed_source = _urlparse(source_match)
|
||||
source_hash_basename = urlparsed_source.path or urlparsed_source.netloc
|
||||
|
||||
source_is_local = urlparsed_source.scheme in ('', 'file')
|
||||
source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS
|
||||
if source_is_local:
|
||||
# Get rid of "file://" from start of source_match
|
||||
source_match = urlparsed_source.path
|
||||
source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path))
|
||||
if not os.path.isfile(source_match):
|
||||
ret['comment'] = 'Source file \'{0}\' does not exist'.format(source_match)
|
||||
return ret
|
||||
|
@ -882,95 +919,59 @@ def extracted(name,
|
|||
source_sum = {}
|
||||
|
||||
if source_is_local:
|
||||
cached_source = source_match
|
||||
cached = source_match
|
||||
else:
|
||||
cached_source = os.path.join(
|
||||
__opts__['cachedir'],
|
||||
'files',
|
||||
__env__,
|
||||
re.sub(r'[:/\\]', '_', source_hash_basename),
|
||||
)
|
||||
|
||||
if os.path.isdir(cached_source):
|
||||
# Prevent a traceback from attempting to read from a directory path
|
||||
salt.utils.rm_rf(cached_source)
|
||||
|
||||
existing_cached_source_sum = _read_cached_checksum(cached_source)
|
||||
|
||||
if source_is_local:
|
||||
# No need to download archive, it's local to the minion
|
||||
update_source = False
|
||||
else:
|
||||
if not os.path.isfile(cached_source):
|
||||
# Archive not cached, we need to download it
|
||||
update_source = True
|
||||
else:
|
||||
# Archive is cached, keep=True likely used in prior run. If we need
|
||||
# to verify the hash, then we *have* to update the source archive
|
||||
# to know whether or not the hash changed. Hence the below
|
||||
# statement. bool(source_hash) will be True if source_hash was
|
||||
# passed, and otherwise False.
|
||||
update_source = bool(source_hash)
|
||||
|
||||
if update_source:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'Archive {0} would be downloaded to cache and checked to '
|
||||
'discover if extraction is necessary'.format(
|
||||
'Archive {0} would be ached (if necessary) and checked to '
|
||||
'discover if extraction is needed'.format(
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
# NOTE: This will result in more than one copy of the source archive on
|
||||
# the minion. The reason this is necessary is because if we are
|
||||
# tracking the checksum using source_hash_update, we need a location
|
||||
# where we can place the checksum file alongside the cached source
|
||||
# file, where it won't be overwritten by caching a file with the same
|
||||
# name in the same parent dir as the source file. Long term, we should
|
||||
# come up with a better solution for this.
|
||||
file_result = __states__['file.managed'](cached_source,
|
||||
source=source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
makedirs=True,
|
||||
skip_verify=skip_verify)
|
||||
log.debug('file.managed: {0}'.format(file_result))
|
||||
|
||||
# Prevent a traceback if errors prevented the above state from getting
|
||||
# off the ground.
|
||||
if isinstance(file_result, list):
|
||||
try:
|
||||
ret['comment'] = '\n'.join(file_result)
|
||||
except TypeError:
|
||||
ret['comment'] = '\n'.join([str(x) for x in file_result])
|
||||
if 'file.cached' not in __states__:
|
||||
# Shouldn't happen unless there is a traceback keeping
|
||||
# salt/states/file.py from being processed through the loader. If
|
||||
# that is the case, we have much more important problems as _all_
|
||||
# file states would be unavailable.
|
||||
ret['comment'] = (
|
||||
'Unable to cache {0}, file.cached state not available'.format(
|
||||
source_match
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
try:
|
||||
if not file_result['result']:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return file_result
|
||||
except TypeError:
|
||||
if not file_result:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return file_result
|
||||
result = __states__['file.cached'](source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
except Exception as exc:
|
||||
msg = 'Failed to cache {0}: {1}'.format(source_match, exc.__str__())
|
||||
log.exception(msg)
|
||||
ret['comment'] = msg
|
||||
return ret
|
||||
else:
|
||||
log.debug('file.cached: {0}'.format(result))
|
||||
|
||||
else:
|
||||
log.debug(
|
||||
'Archive %s is already in cache',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
if result['result']:
|
||||
# Get the path of the file in the minion cache
|
||||
cached = __salt__['cp.is_cached'](source_match)
|
||||
else:
|
||||
log.debug(
|
||||
'failed to download %s',
|
||||
salt.utils.url.redact_http_basic_auth(source_match)
|
||||
)
|
||||
return result
|
||||
|
||||
existing_cached_source_sum = _read_cached_checksum(cached)
|
||||
|
||||
if source_hash and source_hash_update and not skip_verify:
|
||||
# Create local hash sum file if we're going to track sum update
|
||||
_update_checksum(cached_source)
|
||||
_update_checksum(cached)
|
||||
|
||||
if archive_format == 'zip' and not password:
|
||||
log.debug('Checking %s to see if it is password-protected',
|
||||
|
@ -979,7 +980,7 @@ def extracted(name,
|
|||
# implicitly enabled by setting the "options" argument.
|
||||
try:
|
||||
encrypted_zip = __salt__['archive.is_encrypted'](
|
||||
cached_source,
|
||||
cached,
|
||||
clean=False,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError:
|
||||
|
@ -997,7 +998,7 @@ def extracted(name,
|
|||
return ret
|
||||
|
||||
try:
|
||||
contents = __salt__['archive.list'](cached_source,
|
||||
contents = __salt__['archive.list'](cached,
|
||||
archive_format=archive_format,
|
||||
options=list_options,
|
||||
strip_components=strip_components,
|
||||
|
@ -1166,7 +1167,7 @@ def extracted(name,
|
|||
if not extraction_needed \
|
||||
and source_hash_update \
|
||||
and existing_cached_source_sum is not None \
|
||||
and not _compare_checksum(cached_source, existing_cached_source_sum):
|
||||
and not _compare_checksum(cached, existing_cached_source_sum):
|
||||
extraction_needed = True
|
||||
source_hash_trigger = True
|
||||
else:
|
||||
|
@ -1224,13 +1225,13 @@ def extracted(name,
|
|||
__states__['file.directory'](name, user=user, makedirs=True)
|
||||
created_destdir = True
|
||||
|
||||
log.debug('Extracting {0} to {1}'.format(cached_source, name))
|
||||
log.debug('Extracting {0} to {1}'.format(cached, name))
|
||||
try:
|
||||
if archive_format == 'zip':
|
||||
if use_cmd_unzip:
|
||||
try:
|
||||
files = __salt__['archive.cmd_unzip'](
|
||||
cached_source,
|
||||
cached,
|
||||
name,
|
||||
options=options,
|
||||
trim_output=trim_output,
|
||||
|
@ -1240,7 +1241,7 @@ def extracted(name,
|
|||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
else:
|
||||
files = __salt__['archive.unzip'](cached_source,
|
||||
files = __salt__['archive.unzip'](cached,
|
||||
name,
|
||||
options=options,
|
||||
trim_output=trim_output,
|
||||
|
@ -1248,7 +1249,7 @@ def extracted(name,
|
|||
**kwargs)
|
||||
elif archive_format == 'rar':
|
||||
try:
|
||||
files = __salt__['archive.unrar'](cached_source,
|
||||
files = __salt__['archive.unrar'](cached,
|
||||
name,
|
||||
trim_output=trim_output,
|
||||
**kwargs)
|
||||
|
@ -1258,7 +1259,7 @@ def extracted(name,
|
|||
else:
|
||||
if options is None:
|
||||
try:
|
||||
with closing(tarfile.open(cached_source, 'r')) as tar:
|
||||
with closing(tarfile.open(cached, 'r')) as tar:
|
||||
tar.extractall(name)
|
||||
files = tar.getnames()
|
||||
if trim_output:
|
||||
|
@ -1266,7 +1267,7 @@ def extracted(name,
|
|||
except tarfile.ReadError:
|
||||
if salt.utils.which('xz'):
|
||||
if __salt__['cmd.retcode'](
|
||||
['xz', '-t', cached_source],
|
||||
['xz', '-t', cached],
|
||||
python_shell=False,
|
||||
ignore_retcode=True) == 0:
|
||||
# XZ-compressed data
|
||||
|
@ -1282,7 +1283,7 @@ def extracted(name,
|
|||
# pipe it to tar for extraction.
|
||||
cmd = 'xz --decompress --stdout {0} | tar xvf -'
|
||||
results = __salt__['cmd.run_all'](
|
||||
cmd.format(_cmd_quote(cached_source)),
|
||||
cmd.format(_cmd_quote(cached)),
|
||||
cwd=name,
|
||||
python_shell=True)
|
||||
if results['retcode'] != 0:
|
||||
|
@ -1352,7 +1353,7 @@ def extracted(name,
|
|||
|
||||
tar_cmd.append(tar_shortopts)
|
||||
tar_cmd.extend(tar_longopts)
|
||||
tar_cmd.extend(['-f', cached_source])
|
||||
tar_cmd.extend(['-f', cached])
|
||||
|
||||
results = __salt__['cmd.run_all'](tar_cmd,
|
||||
cwd=name,
|
||||
|
@ -1523,18 +1524,15 @@ def extracted(name,
|
|||
for item in enforce_failed:
|
||||
ret['comment'] += '\n- {0}'.format(item)
|
||||
|
||||
if not source_is_local and not keep:
|
||||
for path in (cached_source, __salt__['cp.is_cached'](source_match)):
|
||||
if not path:
|
||||
continue
|
||||
log.debug('Cleaning cached source file %s', path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
log.error(
|
||||
'Failed to clean cached source file %s: %s',
|
||||
cached_source, exc.__str__()
|
||||
)
|
||||
if not source_is_local:
|
||||
if keep_source:
|
||||
log.debug('Keeping cached source file %s', cached)
|
||||
else:
|
||||
log.debug('Cleaning cached source file %s', cached)
|
||||
result = __states__['file.not_cached'](source_match, saltenv=__env__)
|
||||
if not result['result']:
|
||||
# Don't let failure to delete cached file cause the state
|
||||
# itself to fail, just drop it in the warnings.
|
||||
ret.setdefault('warnings', []).append(result['comment'])
|
||||
|
||||
return ret
|
||||
|
|
|
@ -63,13 +63,16 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'boto_kinesis'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if boto_kinesis is available.
|
||||
'''
|
||||
ret = 'boto_kinesis' if 'boto_kinesis.exists' in __salt__ else False
|
||||
return ret
|
||||
if 'boto_kinesis.exists' in __salt__:
|
||||
return __virtualname__
|
||||
return False, 'The boto_kinesis module could not be loaded: boto libraries not found.'
|
||||
|
||||
|
||||
def present(name,
|
||||
|
|
|
@ -97,29 +97,61 @@ def installed(name, version=None, source=None, force=False, pre_versions=False,
|
|||
ret['changes'] = {name: 'Version {0} will be installed'
|
||||
''.format(version)}
|
||||
else:
|
||||
ret['changes'] = {name: 'Will be installed'}
|
||||
ret['changes'] = {name: 'Latest version will be installed'}
|
||||
|
||||
# Package installed
|
||||
else:
|
||||
version_info = __salt__['chocolatey.version'](name, check_remote=True)
|
||||
|
||||
full_name = name
|
||||
lower_name = name.lower()
|
||||
for pkg in version_info:
|
||||
if lower_name == pkg.lower():
|
||||
if name.lower() == pkg.lower():
|
||||
full_name = pkg
|
||||
|
||||
available_version = version_info[full_name]['available'][0]
|
||||
version = version if version else available_version
|
||||
installed_version = version_info[full_name]['installed'][0]
|
||||
|
||||
if force:
|
||||
ret['changes'] = {name: 'Version {0} will be forcibly installed'
|
||||
''.format(version)}
|
||||
elif allow_multiple:
|
||||
ret['changes'] = {name: 'Version {0} will be installed side by side'
|
||||
''.format(version)}
|
||||
if version:
|
||||
if salt.utils.compare_versions(
|
||||
ver1=installed_version, oper="==", ver2=version):
|
||||
if force:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be reinstalled'.format(version)}
|
||||
ret['comment'] = 'Reinstall {0} {1}' \
|
||||
''.format(full_name, version)
|
||||
else:
|
||||
ret['comment'] = '{0} {1} is already installed' \
|
||||
''.format(name, version)
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
else:
|
||||
if allow_multiple:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be installed side by side with '
|
||||
'Version {1} if supported'
|
||||
''.format(version, installed_version)}
|
||||
ret['comment'] = 'Install {0} {1} side-by-side with {0} {2}' \
|
||||
''.format(full_name, version, installed_version)
|
||||
else:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be installed over Version {1} '
|
||||
''.format(version, installed_version)}
|
||||
ret['comment'] = 'Install {0} {1} over {0} {2}' \
|
||||
''.format(full_name, version, installed_version)
|
||||
force = True
|
||||
else:
|
||||
ret['comment'] = 'The Package {0} is already installed'.format(name)
|
||||
return ret
|
||||
version = installed_version
|
||||
if force:
|
||||
ret['changes'] = {
|
||||
name: 'Version {0} will be reinstalled'.format(version)}
|
||||
ret['comment'] = 'Reinstall {0} {1}' \
|
||||
''.format(full_name, version)
|
||||
else:
|
||||
ret['comment'] = '{0} {1} is already installed' \
|
||||
''.format(name, version)
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
|
|
|
@ -294,6 +294,7 @@ if salt.utils.is_windows():
|
|||
# Import 3rd-party libs
|
||||
import salt.ext.six as six
|
||||
from salt.ext.six.moves import zip_longest
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
|
||||
if salt.utils.is_windows():
|
||||
import pywintypes
|
||||
import win32com.client
|
||||
|
@ -1519,6 +1520,7 @@ def managed(name,
|
|||
source=None,
|
||||
source_hash='',
|
||||
source_hash_name=None,
|
||||
keep_source=True,
|
||||
user=None,
|
||||
group=None,
|
||||
mode=None,
|
||||
|
@ -1717,6 +1719,15 @@ def managed(name,
|
|||
|
||||
.. versionadded:: 2016.3.5
|
||||
|
||||
keep_source : True
|
||||
Set to ``False`` to discard the cached copy of the source file once the
|
||||
state completes. This can be useful for larger files to keep them from
|
||||
taking up space in minion cache. However, keep in mind that discarding
|
||||
the source file will result in the state needing to re-download the
|
||||
source file if the state is run again.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
user
|
||||
The user to own the file, this defaults to the user salt is running as
|
||||
on the minion
|
||||
|
@ -2415,8 +2426,9 @@ def managed(name,
|
|||
except Exception as exc:
|
||||
ret['changes'] = {}
|
||||
log.debug(traceback.format_exc())
|
||||
if os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
if not keep_source and sfn:
|
||||
salt.utils.files.remove(sfn)
|
||||
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
|
||||
|
||||
# file being updated to verify using check_cmd
|
||||
|
@ -2434,15 +2446,9 @@ def managed(name,
|
|||
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
|
||||
if isinstance(cret, dict):
|
||||
ret.update(cret)
|
||||
if os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
return ret
|
||||
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
|
||||
# Since we generated a new tempfile and we are not returning here
|
||||
# lets change the original sfn to the new tempfile or else we will
|
||||
# get file not found
|
||||
|
@ -2490,10 +2496,10 @@ def managed(name,
|
|||
log.debug(traceback.format_exc())
|
||||
return _error(ret, 'Unable to manage file: {0}'.format(exc))
|
||||
finally:
|
||||
if tmp_filename and os.path.isfile(tmp_filename):
|
||||
os.remove(tmp_filename)
|
||||
if sfn and os.path.isfile(sfn):
|
||||
os.remove(sfn)
|
||||
if tmp_filename:
|
||||
salt.utils.files.remove(tmp_filename)
|
||||
if not keep_source and sfn:
|
||||
salt.utils.files.remove(sfn)
|
||||
|
||||
|
||||
_RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs']
|
||||
|
@ -3022,6 +3028,7 @@ def directory(name,
|
|||
|
||||
def recurse(name,
|
||||
source,
|
||||
keep_source=True,
|
||||
clean=False,
|
||||
require=None,
|
||||
user=None,
|
||||
|
@ -3053,6 +3060,15 @@ def recurse(name,
|
|||
located on the master in the directory named spam, and is called eggs,
|
||||
the source string is salt://spam/eggs
|
||||
|
||||
keep_source : True
|
||||
Set to ``False`` to discard the cached copy of the source file once the
|
||||
state completes. This can be useful for larger files to keep them from
|
||||
taking up space in minion cache. However, keep in mind that discarding
|
||||
the source file will result in the state needing to re-download the
|
||||
source file if the state is run again.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
clean
|
||||
Make sure that only files that are set up by salt and required by this
|
||||
function are kept. If this option is set then everything in this
|
||||
|
@ -3333,6 +3349,7 @@ def recurse(name,
|
|||
_ret = managed(
|
||||
path,
|
||||
source=source,
|
||||
keep_source=keep_source,
|
||||
user=user,
|
||||
group=group,
|
||||
mode='keep' if keep_mode else file_mode,
|
||||
|
@ -6423,3 +6440,376 @@ def shortcut(
|
|||
ret['comment'] += (', but was unable to set ownership to '
|
||||
'{0}'.format(user))
|
||||
return ret
|
||||
|
||||
|
||||
def cached(name,
|
||||
source_hash='',
|
||||
source_hash_name=None,
|
||||
skip_verify=False,
|
||||
saltenv='base'):
|
||||
'''
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Ensures that a file is saved to the minion's cache. This state is primarily
|
||||
invoked by other states to ensure that we do not re-download a source file
|
||||
if we do not need to.
|
||||
|
||||
name
|
||||
The URL of the file to be cached. To cache a file from an environment
|
||||
other than ``base``, either use the ``saltenv`` argument or include the
|
||||
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
|
||||
|
||||
.. note::
|
||||
A list of URLs is not supported, this must be a single URL. If a
|
||||
local file is passed here, then the state will obviously not try to
|
||||
download anything, but it will compare a hash if one is specified.
|
||||
|
||||
source_hash
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
.. note::
|
||||
For remote files not originating from the ``salt://`` fileserver,
|
||||
such as http(s) or ftp servers, this state will not re-download the
|
||||
file if the locally-cached copy matches this hash. This is done to
|
||||
prevent unnecessary downloading on repeated runs of this state. To
|
||||
update the cached copy of a file, it is necessary to update this
|
||||
hash.
|
||||
|
||||
source_hash_name
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
skip_verify
|
||||
See the documentation for this same argument in the
|
||||
:py:func:`file.managed <salt.states.file.managed>` state.
|
||||
|
||||
.. note::
|
||||
Setting this to ``True`` will result in a copy of the file being
|
||||
downloaded from a remote (http(s), ftp, etc.) source each time the
|
||||
state is run.
|
||||
|
||||
saltenv
|
||||
Used to specify the environment from which to download a file from the
|
||||
Salt fileserver (i.e. those with ``salt://`` URL).
|
||||
|
||||
|
||||
This state will in most cases not be useful in SLS files, but it is useful
|
||||
when writing a state or remote-execution module that needs to make sure
|
||||
that a file at a given URL has been downloaded to the cachedir. One example
|
||||
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
|
||||
state:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = __states__['file.cached'](source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
|
||||
This will return a dictionary containing the state's return data, including
|
||||
a ``result`` key which will state whether or not the state was successful.
|
||||
Note that this will not catch exceptions, so it is best used within a
|
||||
try/except.
|
||||
|
||||
Once this state has been run from within another state or remote-execution
|
||||
module, the actual location of the cached file can be obtained using
|
||||
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cached = __salt__['cp.is_cached'](source_match)
|
||||
|
||||
This function will return the cached path of the file, or an empty string
|
||||
if the file is not present in the minion cache.
|
||||
|
||||
This state will in most cases not be useful in SLS files, but it is useful
|
||||
when writing a state or remote-execution module that needs to make sure
|
||||
that a file at a given URL has been downloaded to the cachedir. One example
|
||||
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
|
||||
state:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
result = __states__['file.cached'](source_match,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
|
||||
This will return a dictionary containing the state's return data, including
|
||||
a ``result`` key which will state whether or not the state was successful.
|
||||
Note that this will not catch exceptions, so it is best used within a
|
||||
try/except.
|
||||
|
||||
Once this state has been run from within another state or remote-execution
|
||||
module, the actual location of the cached file can be obtained using
|
||||
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
cached = __salt__['cp.is_cached'](source_match)
|
||||
|
||||
This function will return the cached path of the file, or an empty string
|
||||
if the file is not present in the minion cache.
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': False}
|
||||
|
||||
try:
|
||||
parsed = _urlparse(name)
|
||||
except Exception:
|
||||
ret['comment'] = 'Only URLs or local file paths are valid input'
|
||||
return ret
|
||||
|
||||
# This if statement will keep the state from proceeding if a remote source
|
||||
# is specified and no source_hash is presented (unless we're skipping hash
|
||||
# verification).
|
||||
if not skip_verify \
|
||||
and not source_hash \
|
||||
and parsed.scheme in salt.utils.files.REMOTE_PROTOS:
|
||||
ret['comment'] = (
|
||||
'Unable to verify upstream hash of source file {0}, please set '
|
||||
'source_hash or set skip_verify to True'.format(name)
|
||||
)
|
||||
return ret
|
||||
|
||||
if source_hash:
|
||||
# Get the hash and hash type from the input. This takes care of parsing
|
||||
# the hash out of a file containing checksums, if that is how the
|
||||
# source_hash was specified.
|
||||
try:
|
||||
source_sum = __salt__['file.get_source_sum'](
|
||||
source=name,
|
||||
source_hash=source_hash,
|
||||
source_hash_name=source_hash_name,
|
||||
saltenv=saltenv)
|
||||
except CommandExecutionError as exc:
|
||||
ret['comment'] = exc.strerror
|
||||
return ret
|
||||
else:
|
||||
if not source_sum:
|
||||
# We shouldn't get here, problems in retrieving the hash in
|
||||
# file.get_source_sum should result in a CommandExecutionError
|
||||
# being raised, which we catch above. Nevertheless, we should
|
||||
# provide useful information in the event that
|
||||
# file.get_source_sum regresses.
|
||||
ret['comment'] = (
|
||||
'Failed to get source hash from {0}. This may be a bug. '
|
||||
'If this error persists, please report it and set '
|
||||
'skip_verify to True to work around it.'.format(source_hash)
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
source_sum = {}
|
||||
|
||||
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
|
||||
# Source is a local file path
|
||||
full_path = os.path.realpath(os.path.expanduser(parsed.path))
|
||||
if os.path.exists(full_path):
|
||||
if not skip_verify and source_sum:
|
||||
# Enforce the hash
|
||||
local_hash = __salt__['file.get_hash'](
|
||||
full_path,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
if local_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File {0} is present on the minion and has hash '
|
||||
'{1}'.format(full_path, local_hash)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'File {0} is present on the minion, but the hash ({1}) '
|
||||
'does not match the specified hash ({2})'.format(
|
||||
full_path, local_hash, source_sum['hsum']
|
||||
)
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'File {0} is present on the minion'.format(
|
||||
full_path
|
||||
)
|
||||
return ret
|
||||
else:
|
||||
ret['comment'] = 'File {0} is not present on the minion'.format(
|
||||
full_path
|
||||
)
|
||||
return ret
|
||||
|
||||
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
|
||||
|
||||
if local_copy:
|
||||
# File is already cached
|
||||
pre_hash = __salt__['file.get_hash'](
|
||||
local_copy,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
|
||||
if not skip_verify and source_sum:
|
||||
# Get the local copy's hash to compare with the hash that was
|
||||
# specified via source_hash. If it matches, we can exit early from
|
||||
# the state without going any further, because the file is cached
|
||||
# with the correct hash.
|
||||
if pre_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File is already cached to {0} with hash {1}'.format(
|
||||
local_copy, pre_hash
|
||||
)
|
||||
)
|
||||
else:
|
||||
pre_hash = None
|
||||
|
||||
def _try_cache(path, checksum):
|
||||
'''
|
||||
This helper is not needed anymore in develop as the fileclient in the
|
||||
develop branch now has means of skipping a download if the existing
|
||||
hash matches one passed to cp.cache_file. Remove this helper and the
|
||||
code that invokes it, once we have merged forward into develop.
|
||||
'''
|
||||
if not path or not checksum:
|
||||
return True
|
||||
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
|
||||
if form is None:
|
||||
# Shouldn't happen, an invalid checksum length should be caught
|
||||
# before we get here. But in the event this gets through, don't let
|
||||
# it cause any trouble, and just return True.
|
||||
return True
|
||||
try:
|
||||
return salt.utils.get_hash(path, form=form) != checksum
|
||||
except (IOError, OSError, ValueError):
|
||||
# Again, shouldn't happen, but don't let invalid input/permissions
|
||||
# in the call to get_hash blow this up.
|
||||
return True
|
||||
|
||||
# Cache the file. Note that this will not actually download the file if
|
||||
# either of the following is true:
|
||||
# 1. source is a salt:// URL and the fileserver determines that the hash
|
||||
# of the minion's copy matches that of the fileserver.
|
||||
# 2. File is remote (http(s), ftp, etc.) and the specified source_hash
|
||||
# matches the cached copy.
|
||||
# Remote, non salt:// sources _will_ download if a copy of the file was
|
||||
# not already present in the minion cache.
|
||||
if _try_cache(local_copy, source_sum.get('hsum')):
|
||||
# The _try_cache helper is obsolete in the develop branch. Once merged
|
||||
# forward, remove the helper as well as this if statement, and dedent
|
||||
# the below block.
|
||||
try:
|
||||
local_copy = __salt__['cp.cache_file'](
|
||||
name,
|
||||
saltenv=saltenv)
|
||||
# Once this is merged into develop, uncomment the source_hash
|
||||
# line below and add it to the list of arguments to
|
||||
# cp.cache_file (note that this also means removing the
|
||||
# close-parenthesis above and replacing it with a comma). The
|
||||
# develop branch has modifications to the fileclient which will
|
||||
# allow it to skip the download if the source_hash matches what
|
||||
# is passed to cp.cache_file, so the helper is just a stopgap
|
||||
# for the 2017.7 release cycle.
|
||||
#source_hash=source_sum.get('hsum'))
|
||||
except Exception as exc:
|
||||
ret['comment'] = exc.__str__()
|
||||
return ret
|
||||
|
||||
if not local_copy:
|
||||
ret['comment'] = (
|
||||
'Failed to cache {0}, check minion log for more '
|
||||
'information'.format(name)
|
||||
)
|
||||
return ret
|
||||
|
||||
post_hash = __salt__['file.get_hash'](
|
||||
local_copy,
|
||||
source_sum.get('hash_type', __opts__['hash_type']))
|
||||
|
||||
if pre_hash != post_hash:
|
||||
ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash}
|
||||
|
||||
# Check the hash, if we're enforcing one. Note that this will be the first
|
||||
# hash check if the file was not previously cached, and the 2nd hash check
|
||||
# if it was cached and the
|
||||
if not skip_verify and source_sum:
|
||||
if post_hash == source_sum['hsum']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File is already cached to {0} with hash {1}'.format(
|
||||
local_copy, post_hash
|
||||
)
|
||||
)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'File is cached to {0}, but the hash ({1}) does not match '
|
||||
'the specified hash ({2})'.format(
|
||||
local_copy, post_hash, source_sum['hsum']
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
# We're not enforcing a hash, and we already know that the file was
|
||||
# successfully cached, so we know the state was successful.
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'File is cached to {0}'.format(local_copy)
|
||||
return ret
|
||||
|
||||
|
||||
def not_cached(name, saltenv='base'):
|
||||
'''
|
||||
Ensures that a file is saved to the minion's cache. This state is primarily
|
||||
invoked by other states to ensure that we do not re-download a source file
|
||||
if we do not need to.
|
||||
|
||||
name
|
||||
The URL of the file to be cached. To cache a file from an environment
|
||||
other than ``base``, either use the ``saltenv`` argument or include the
|
||||
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
|
||||
|
||||
.. note::
|
||||
A list of URLs is not supported, this must be a single URL. If a
|
||||
local file is passed here, the state will take no action.
|
||||
|
||||
saltenv
|
||||
Used to specify the environment from which to download a file from the
|
||||
Salt fileserver (i.e. those with ``salt://`` URL).
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': False}
|
||||
|
||||
try:
|
||||
parsed = _urlparse(name)
|
||||
except Exception:
|
||||
ret['comment'] = 'Only URLs or local file paths are valid input'
|
||||
return ret
|
||||
else:
|
||||
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
|
||||
full_path = os.path.realpath(os.path.expanduser(parsed.path))
|
||||
ret['result'] = True
|
||||
ret['comment'] = (
|
||||
'File {0} is a local path, no action taken'.format(
|
||||
full_path
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
|
||||
|
||||
if local_copy:
|
||||
try:
|
||||
os.remove(local_copy)
|
||||
except Exception as exc:
|
||||
ret['comment'] = 'Failed to delete {0}: {1}'.format(
|
||||
local_copy, exc.__str__()
|
||||
)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['changes']['deleted'] = True
|
||||
ret['comment'] = '{0} was deleted'.format(local_copy)
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = '{0} is not cached'.format(name)
|
||||
return ret
|
||||
|
|
|
@ -80,8 +80,6 @@ def _construct_yaml_str(self, node):
|
|||
Construct for yaml
|
||||
'''
|
||||
return self.construct_scalar(node)
|
||||
YamlLoader.add_constructor(u'tag:yaml.org,2002:str',
|
||||
_construct_yaml_str)
|
||||
YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp',
|
||||
_construct_yaml_str)
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ def options_present(name, sections=None, separator='=', strict=False):
|
|||
del changes[section_name]
|
||||
else:
|
||||
changes = __salt__['ini.set_option'](name, sections, separator)
|
||||
except IOError as err:
|
||||
except (IOError, KeyError) as err:
|
||||
ret['comment'] = "{0}".format(err)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
@ -102,12 +102,10 @@ def options_present(name, sections=None, separator='=', strict=False):
|
|||
ret['comment'] = 'Errors encountered. {0}'.format(changes['error'])
|
||||
ret['changes'] = {}
|
||||
else:
|
||||
if changes:
|
||||
ret['changes'] = changes
|
||||
ret['comment'] = 'Changes take effect'
|
||||
else:
|
||||
ret['changes'] = {}
|
||||
ret['comment'] = 'No changes take effect'
|
||||
for name, body in changes.items():
|
||||
if body:
|
||||
ret['comment'] = 'Changes take effect'
|
||||
ret['changes'].update({name: changes[name]})
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
'''
|
||||
Linux File Access Control Lists
|
||||
|
||||
The Linux ACL state module requires the `getfacl` and `setfacl` binaries.
|
||||
|
||||
Ensure a Linux ACL is present
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -47,7 +49,7 @@ def __virtual__():
|
|||
if salt.utils.which('getfacl') and salt.utils.which('setfacl'):
|
||||
return __virtualname__
|
||||
|
||||
return False
|
||||
return False, 'The linux_acl state cannot be loaded: the getfacl or setfacl binary is not in the path.'
|
||||
|
||||
|
||||
def present(name, acl_type, acl_name='', perms='', recurse=False):
|
||||
|
@ -81,11 +83,12 @@ def present(name, acl_type, acl_name='', perms='', recurse=False):
|
|||
# applied to the user/group that owns the file, e.g.,
|
||||
# default:group::rwx would be listed as default:group:root:rwx
|
||||
# In this case, if acl_name is empty, we really want to search for root
|
||||
# but still uses '' for other
|
||||
|
||||
# We search through the dictionary getfacl returns for the owner of the
|
||||
# file if acl_name is empty.
|
||||
if acl_name == '':
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type)
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
|
||||
else:
|
||||
_search_name = acl_name
|
||||
|
||||
|
@ -150,11 +153,12 @@ def absent(name, acl_type, acl_name='', perms='', recurse=False):
|
|||
# applied to the user/group that owns the file, e.g.,
|
||||
# default:group::rwx would be listed as default:group:root:rwx
|
||||
# In this case, if acl_name is empty, we really want to search for root
|
||||
# but still uses '' for other
|
||||
|
||||
# We search through the dictionary getfacl returns for the owner of the
|
||||
# file if acl_name is empty.
|
||||
if acl_name == '':
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type)
|
||||
_search_name = __current_perms[name].get('comment').get(_acl_type, '')
|
||||
else:
|
||||
_search_name = acl_name
|
||||
|
||||
|
|
|
@ -310,17 +310,27 @@ def module_remove(name):
|
|||
|
||||
def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_level=None):
|
||||
'''
|
||||
Makes sure a SELinux policy for a given filespec (name),
|
||||
filetype and SELinux context type is present.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
sel_type: SELinux context type. There are many.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files)
|
||||
sel_user: The SELinux user.
|
||||
sel_level: The SELinux MLS range
|
||||
Makes sure a SELinux policy for a given filespec (name), filetype
|
||||
and SELinux context type is present.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
sel_type
|
||||
SELinux context type. There are many.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_user
|
||||
The SELinux user.
|
||||
|
||||
sel_level
|
||||
The SELinux MLS range.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
new_state = {}
|
||||
|
@ -383,17 +393,27 @@ def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_lev
|
|||
|
||||
def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel_level=None):
|
||||
'''
|
||||
Makes sure an SELinux file context policy for a given filespec (name),
|
||||
filetype and SELinux context type is absent.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
name: filespec of the file or directory. Regex syntax is allowed.
|
||||
filetype: The SELinux filetype specification.
|
||||
Use one of [a, f, d, c, b, s, l, p].
|
||||
See also `man semanage-fcontext`.
|
||||
Defaults to 'a' (all files).
|
||||
sel_type: The SELinux context type. There are many.
|
||||
sel_user: The SELinux user.
|
||||
sel_level: The SELinux MLS range
|
||||
Makes sure an SELinux file context policy for a given filespec
|
||||
(name), filetype and SELinux context type is absent.
|
||||
|
||||
name
|
||||
filespec of the file or directory. Regex syntax is allowed.
|
||||
|
||||
filetype
|
||||
The SELinux filetype specification. Use one of [a, f, d, c, b,
|
||||
s, l, p]. See also `man semanage-fcontext`. Defaults to 'a'
|
||||
(all files).
|
||||
|
||||
sel_type
|
||||
The SELinux context type. There are many.
|
||||
|
||||
sel_user
|
||||
The SELinux user.
|
||||
|
||||
sel_level
|
||||
The SELinux MLS range.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
new_state = {}
|
||||
|
@ -433,7 +453,10 @@ def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel
|
|||
|
||||
def fcontext_policy_applied(name, recursive=False):
|
||||
'''
|
||||
Checks and makes sure the SELinux policies for a given filespec are applied.
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Checks and makes sure the SELinux policies for a given filespec are
|
||||
applied.
|
||||
'''
|
||||
ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''}
|
||||
|
||||
|
|
|
@ -23,10 +23,21 @@ from salt.ext import six
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
LOCAL_PROTOS = ('', 'file')
|
||||
REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3')
|
||||
VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS
|
||||
TEMPFILE_PREFIX = '__salt.tmp.'
|
||||
|
||||
HASHES = {
|
||||
'sha512': 128,
|
||||
'sha384': 96,
|
||||
'sha256': 64,
|
||||
'sha224': 56,
|
||||
'sha1': 40,
|
||||
'md5': 32,
|
||||
}
|
||||
HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)])
|
||||
|
||||
|
||||
def guess_archive_type(name):
|
||||
'''
|
||||
|
@ -271,6 +282,8 @@ def safe_filename_leaf(file_basename):
|
|||
windows is \\ / : * ? " < > | posix is /
|
||||
|
||||
.. versionadded:: 2017.7.2
|
||||
|
||||
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
|
||||
'''
|
||||
def _replace(re_obj):
|
||||
return urllib.quote(re_obj.group(0), safe=u'')
|
||||
|
@ -283,16 +296,35 @@ def safe_filename_leaf(file_basename):
|
|||
return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE)
|
||||
|
||||
|
||||
def safe_filepath(file_path_name):
|
||||
def safe_filepath(file_path_name, dir_sep=None):
|
||||
'''
|
||||
Input the full path and filename, splits on directory separator and calls safe_filename_leaf for
|
||||
each part of the path.
|
||||
each part of the path. dir_sep allows coder to force a directory separate to a particular character
|
||||
|
||||
.. versionadded:: 2017.7.2
|
||||
|
||||
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
|
||||
'''
|
||||
if not dir_sep:
|
||||
dir_sep = os.sep
|
||||
# Normally if file_path_name or dir_sep is Unicode then the output will be Unicode
|
||||
# This code ensure the output type is the same as file_path_name
|
||||
if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type):
|
||||
dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3
|
||||
# splitdrive only set drive on windows platform
|
||||
(drive, path) = os.path.splitdrive(file_path_name)
|
||||
path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)])
|
||||
path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)])
|
||||
if drive:
|
||||
return os.sep.join([drive, path])
|
||||
else:
|
||||
return path
|
||||
path = dir_sep.join([drive, path])
|
||||
return path
|
||||
|
||||
|
||||
def remove(path):
|
||||
'''
|
||||
Runs os.remove(path) and suppresses the OSError if the file doesn't exist
|
||||
'''
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
|
|
@ -985,10 +985,37 @@ class CkMinions(object):
|
|||
auth_list.append(matcher)
|
||||
return auth_list
|
||||
|
||||
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
|
||||
'''
|
||||
Returns a list of authorisation matchers that a user is eligible for.
|
||||
This list is a combination of the provided personal matchers plus the
|
||||
matchers of any group the user is in.
|
||||
'''
|
||||
if auth_list is None:
|
||||
auth_list = []
|
||||
if permissive is None:
|
||||
permissive = self.opts.get('permissive_acl')
|
||||
name_matched = False
|
||||
for match in auth_provider:
|
||||
if match == '*' and not permissive:
|
||||
continue
|
||||
if match.endswith('%'):
|
||||
if match.rstrip('%') in groups:
|
||||
auth_list.extend(auth_provider[match])
|
||||
else:
|
||||
if salt.utils.expr_match(match, name):
|
||||
name_matched = True
|
||||
auth_list.extend(auth_provider[match])
|
||||
if not permissive and not name_matched and '*' in auth_provider:
|
||||
auth_list.extend(auth_provider['*'])
|
||||
return auth_list
|
||||
|
||||
def wheel_check(self, auth_list, fun):
|
||||
'''
|
||||
Check special API permissions
|
||||
'''
|
||||
if not auth_list:
|
||||
return False
|
||||
comps = fun.split('.')
|
||||
if len(comps) != 2:
|
||||
return False
|
||||
|
@ -1020,6 +1047,8 @@ class CkMinions(object):
|
|||
'''
|
||||
Check special API permissions
|
||||
'''
|
||||
if not auth_list:
|
||||
return False
|
||||
comps = fun.split('.')
|
||||
if len(comps) != 2:
|
||||
return False
|
||||
|
@ -1051,6 +1080,8 @@ class CkMinions(object):
|
|||
'''
|
||||
Check special API permissions
|
||||
'''
|
||||
if not auth_list:
|
||||
return False
|
||||
if form != 'cloud':
|
||||
comps = fun.split('.')
|
||||
if len(comps) != 2:
|
||||
|
|
|
@ -9,6 +9,7 @@ from __future__ import absolute_import
|
|||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
|
||||
# Import 3rd-party libs
|
||||
|
@ -110,6 +111,11 @@ def readlink(path):
|
|||
# comes out in 8.3 form; convert it to LFN to make it look nicer
|
||||
target = win32file.GetLongPathName(target)
|
||||
except pywinerror as exc:
|
||||
# If target is on a UNC share, the decoded target will be in the format
|
||||
# "UNC\hostanme\sharename\additional\subdirs\under\share". So, in
|
||||
# these cases, return the target path in the proper UNC path format.
|
||||
if target.startswith('UNC\\'):
|
||||
return re.sub(r'^UNC\\+', r'\\\\', target)
|
||||
# if file is not found (i.e. bad symlink), return it anyway like on *nix
|
||||
if exc.winerror == 2:
|
||||
return target
|
||||
|
|
7
tests/integration/files/file/base/mysql/select_query.sql
Normal file
7
tests/integration/files/file/base/mysql/select_query.sql
Normal file
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE test_select (a INT);
|
||||
insert into test_select values (1);
|
||||
insert into test_select values (3);
|
||||
insert into test_select values (4);
|
||||
insert into test_select values (5);
|
||||
update test_select set a=2 where a=1;
|
||||
select * from test_select;
|
3
tests/integration/files/file/base/mysql/update_query.sql
Normal file
3
tests/integration/files/file/base/mysql/update_query.sql
Normal file
|
@ -0,0 +1,3 @@
|
|||
CREATE TABLE test_update (a INT);
|
||||
insert into test_update values (1);
|
||||
update test_update set a=2 where a=1;
|
|
@ -1280,6 +1280,7 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
testdb1 = 'tes.t\'"saltdb'
|
||||
testdb2 = 't_st `(:=salt%b)'
|
||||
testdb3 = 'test `(:=salteeb)'
|
||||
test_file_query_db = 'test_query'
|
||||
table1 = 'foo'
|
||||
table2 = "foo `\'%_bar"
|
||||
users = {
|
||||
|
@ -1391,13 +1392,19 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
name=self.testdb1,
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password,
|
||||
)
|
||||
)
|
||||
self.run_function(
|
||||
'mysql.db_remove',
|
||||
name=self.testdb2,
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password,
|
||||
)
|
||||
)
|
||||
self.run_function(
|
||||
'mysql.db_remove',
|
||||
name=self.test_file_query_db,
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password,
|
||||
)
|
||||
|
||||
def _userCreation(self,
|
||||
uname,
|
||||
|
@ -1627,3 +1634,123 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin):
|
|||
"GRANT USAGE ON *.* TO ''@'localhost'",
|
||||
"GRANT DELETE ON `test ``(:=salteeb)`.* TO ''@'localhost'"
|
||||
])
|
||||
|
||||
|
||||
@skipIf(
|
||||
NO_MYSQL,
|
||||
'Please install MySQL bindings and a MySQL Server before running'
|
||||
'MySQL integration tests.'
|
||||
)
|
||||
class MysqlModuleFileQueryTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
'''
|
||||
Test file query module
|
||||
'''
|
||||
|
||||
user = 'root'
|
||||
password = 'poney'
|
||||
testdb = 'test_file_query'
|
||||
|
||||
@destructiveTest
|
||||
def setUp(self):
|
||||
'''
|
||||
Test presence of MySQL server, enforce a root password, create users
|
||||
'''
|
||||
super(MysqlModuleFileQueryTest, self).setUp()
|
||||
NO_MYSQL_SERVER = True
|
||||
# now ensure we know the mysql root password
|
||||
# one of theses two at least should work
|
||||
ret1 = self.run_state(
|
||||
'cmd.run',
|
||||
name='mysqladmin --host="localhost" -u '
|
||||
+ self.user
|
||||
+ ' flush-privileges password "'
|
||||
+ self.password
|
||||
+ '"'
|
||||
)
|
||||
ret2 = self.run_state(
|
||||
'cmd.run',
|
||||
name='mysqladmin --host="localhost" -u '
|
||||
+ self.user
|
||||
+ ' --password="'
|
||||
+ self.password
|
||||
+ '" flush-privileges password "'
|
||||
+ self.password
|
||||
+ '"'
|
||||
)
|
||||
key, value = ret2.popitem()
|
||||
if value['result']:
|
||||
NO_MYSQL_SERVER = False
|
||||
else:
|
||||
self.skipTest('No MySQL Server running, or no root access on it.')
|
||||
# Create some users and a test db
|
||||
self.run_function(
|
||||
'mysql.db_create',
|
||||
name=self.testdb,
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password,
|
||||
connection_db='mysql',
|
||||
)
|
||||
|
||||
@destructiveTest
|
||||
def tearDown(self):
|
||||
'''
|
||||
Removes created users and db
|
||||
'''
|
||||
self.run_function(
|
||||
'mysql.db_remove',
|
||||
name=self.testdb,
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password,
|
||||
connection_db='mysql',
|
||||
)
|
||||
|
||||
@destructiveTest
|
||||
def test_update_file_query(self):
|
||||
'''
|
||||
Test query without any output
|
||||
'''
|
||||
ret = self.run_function(
|
||||
'mysql.file_query',
|
||||
database=self.testdb,
|
||||
file_name='salt://mysql/update_query.sql',
|
||||
character_set='utf8',
|
||||
collate='utf8_general_ci',
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password
|
||||
)
|
||||
self.assertTrue('query time' in ret)
|
||||
ret.pop('query time')
|
||||
self.assertEqual(ret, {'rows affected': 2})
|
||||
|
||||
@destructiveTest
|
||||
def test_select_file_query(self):
|
||||
'''
|
||||
Test query with table output
|
||||
'''
|
||||
ret = self.run_function(
|
||||
'mysql.file_query',
|
||||
database=self.testdb,
|
||||
file_name='salt://mysql/select_query.sql',
|
||||
character_set='utf8',
|
||||
collate='utf8_general_ci',
|
||||
connection_user=self.user,
|
||||
connection_pass=self.password
|
||||
)
|
||||
expected = {
|
||||
'rows affected': 5,
|
||||
'rows returned': 4,
|
||||
'results': [
|
||||
[
|
||||
['2'],
|
||||
['3'],
|
||||
['4'],
|
||||
['5']
|
||||
]
|
||||
],
|
||||
'columns': [
|
||||
['a']
|
||||
],
|
||||
}
|
||||
self.assertTrue('query time' in ret)
|
||||
ret.pop('query time')
|
||||
self.assertEqual(ret, expected)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
# Python libs
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
# Salt libs
|
||||
import salt.config
|
||||
|
@ -45,14 +46,32 @@ class StatusBeaconTestCase(TestCase, LoaderModuleMockMixin):
|
|||
def test_empty_config(self, *args, **kwargs):
|
||||
config = {}
|
||||
ret = status.beacon(config)
|
||||
self.assertEqual(sorted(list(ret[0]['data'])), sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time']))
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
expected = []
|
||||
else:
|
||||
expected = sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time'])
|
||||
|
||||
self.assertEqual(sorted(list(ret[0]['data'])), expected)
|
||||
|
||||
def test_deprecated_dict_config(self):
|
||||
config = {'time': ['all']}
|
||||
ret = status.beacon(config)
|
||||
self.assertEqual(list(ret[0]['data']), ['time'])
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
expected = []
|
||||
else:
|
||||
expected = ['time']
|
||||
|
||||
self.assertEqual(list(ret[0]['data']), expected)
|
||||
|
||||
def test_list_config(self):
|
||||
config = [{'time': ['all']}]
|
||||
ret = status.beacon(config)
|
||||
self.assertEqual(list(ret[0]['data']), ['time'])
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
expected = []
|
||||
else:
|
||||
expected = ['time']
|
||||
|
||||
self.assertEqual(list(ret[0]['data']), expected)
|
||||
|
|
|
@ -70,7 +70,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin):
|
|||
Set multiple salt process environment variables from a dict.
|
||||
Returns a dict.
|
||||
'''
|
||||
mock_environ = {'key': 'value'}
|
||||
mock_environ = {'KEY': 'value'}
|
||||
with patch.dict(os.environ, mock_environ):
|
||||
self.assertFalse(environ.setenv('environ'))
|
||||
|
||||
|
@ -83,7 +83,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin):
|
|||
with patch.dict(os.environ, mock_environ):
|
||||
mock_setval = MagicMock(return_value=None)
|
||||
with patch.object(environ, 'setval', mock_setval):
|
||||
self.assertEqual(environ.setenv({}, False, True, False)['key'],
|
||||
self.assertEqual(environ.setenv({}, False, True, False)['KEY'],
|
||||
None)
|
||||
|
||||
def test_get(self):
|
||||
|
|
|
@ -10,7 +10,7 @@ import textwrap
|
|||
# Import Salt Testing libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.paths import TMP
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import MagicMock, patch
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -89,45 +89,56 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'repl': 'baz=\\g<value>',
|
||||
'append_if_not_found': True,
|
||||
}
|
||||
base = 'foo=1\nbar=2'
|
||||
expected = '{base}\n{repl}\n'.format(base=base, **args)
|
||||
base = os.linesep.join(['foo=1', 'bar=2'])
|
||||
|
||||
# File ending with a newline, no match
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write(base + '\n')
|
||||
with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(base + os.linesep))
|
||||
tfile.flush()
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
filemod.replace(tfile.name, **args)
|
||||
expected = os.linesep.join([base, 'baz=\\g<value>']) + os.linesep
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# File not ending with a newline, no match
|
||||
with tempfile.NamedTemporaryFile('w+') as tfile:
|
||||
tfile.write(base)
|
||||
with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(base))
|
||||
tfile.flush()
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# A newline should not be added in empty files
|
||||
with tempfile.NamedTemporaryFile('w+') as tfile:
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), args['repl'] + '\n')
|
||||
with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile:
|
||||
pass
|
||||
filemod.replace(tfile.name, **args)
|
||||
expected = args['repl'] + os.linesep
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# Using not_found_content, rather than repl
|
||||
with tempfile.NamedTemporaryFile('w+') as tfile:
|
||||
args['not_found_content'] = 'baz=3'
|
||||
expected = '{base}\n{not_found_content}\n'.format(base=base, **args)
|
||||
tfile.write(base)
|
||||
with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(base))
|
||||
tfile.flush()
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
args['not_found_content'] = 'baz=3'
|
||||
expected = os.linesep.join([base, 'baz=3']) + os.linesep
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# not appending if matches
|
||||
with tempfile.NamedTemporaryFile('w+') as tfile:
|
||||
base = 'foo=1\n#baz=42\nbar=2\n'
|
||||
expected = 'foo=1\nbaz=42\nbar=2\n'
|
||||
tfile.write(base)
|
||||
with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile:
|
||||
base = os.linesep.join(['foo=1', 'baz=42', 'bar=2'])
|
||||
tfile.write(salt.utils.to_bytes(base))
|
||||
tfile.flush()
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
expected = base
|
||||
filemod.replace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
|
||||
def test_backup(self):
|
||||
fext = '.bak'
|
||||
|
@ -246,25 +257,26 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
del self.tfile
|
||||
|
||||
def test_replace_multiline(self):
|
||||
new_multiline_content = (
|
||||
"Who's that then?\nWell, how'd you become king,"
|
||||
"then?\nWe found them. I'm not a witch.\nWe shall"
|
||||
"say 'Ni' again to you, if you do not appease us."
|
||||
)
|
||||
new_multiline_content = os.linesep.join([
|
||||
"Who's that then?",
|
||||
"Well, how'd you become king, then?",
|
||||
"We found them. I'm not a witch.",
|
||||
"We shall say 'Ni' again to you, if you do not appease us."
|
||||
])
|
||||
filemod.blockreplace(self.tfile.name,
|
||||
'#-- START BLOCK 1',
|
||||
'#-- END BLOCK 1',
|
||||
new_multiline_content,
|
||||
backup=False)
|
||||
|
||||
with salt.utils.fopen(self.tfile.name, 'r') as fp:
|
||||
with salt.utils.fopen(self.tfile.name, 'rb') as fp:
|
||||
filecontent = fp.read()
|
||||
self.assertIn('#-- START BLOCK 1'
|
||||
+ "\n" + new_multiline_content
|
||||
+ "\n"
|
||||
+ '#-- END BLOCK 1', filecontent)
|
||||
self.assertNotIn('old content part 1', filecontent)
|
||||
self.assertNotIn('old content part 2', filecontent)
|
||||
self.assertIn(salt.utils.to_bytes(
|
||||
os.linesep.join([
|
||||
'#-- START BLOCK 1', new_multiline_content, '#-- END BLOCK 1'])),
|
||||
filecontent)
|
||||
self.assertNotIn(b'old content part 1', filecontent)
|
||||
self.assertNotIn(b'old content part 2', filecontent)
|
||||
|
||||
def test_replace_append(self):
|
||||
new_content = "Well, I didn't vote for you."
|
||||
|
@ -291,10 +303,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
backup=False,
|
||||
append_if_not_found=True)
|
||||
|
||||
with salt.utils.fopen(self.tfile.name, 'r') as fp:
|
||||
self.assertIn('#-- START BLOCK 2'
|
||||
+ "\n" + new_content
|
||||
+ '#-- END BLOCK 2', fp.read())
|
||||
with salt.utils.fopen(self.tfile.name, 'rb') as fp:
|
||||
self.assertIn(salt.utils.to_bytes(
|
||||
os.linesep.join([
|
||||
'#-- START BLOCK 2',
|
||||
'{0}#-- END BLOCK 2'.format(new_content)])),
|
||||
fp.read())
|
||||
|
||||
def test_replace_append_newline_at_eof(self):
|
||||
'''
|
||||
|
@ -308,27 +322,33 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'content': 'baz',
|
||||
'append_if_not_found': True,
|
||||
}
|
||||
block = '{marker_start}\n{content}{marker_end}\n'.format(**args)
|
||||
expected = base + '\n' + block
|
||||
block = os.linesep.join(['#start', 'baz#stop']) + os.linesep
|
||||
# File ending with a newline
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write(base + '\n')
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(base + os.linesep))
|
||||
tfile.flush()
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
expected = os.linesep.join([base, block])
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# File not ending with a newline
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write(base)
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(base))
|
||||
tfile.flush()
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
os.remove(tfile.name)
|
||||
|
||||
# A newline should not be added in empty files
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), block)
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
pass
|
||||
filemod.blockreplace(tfile.name, **args)
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), block)
|
||||
os.remove(tfile.name)
|
||||
|
||||
def test_replace_prepend(self):
|
||||
new_content = "Well, I didn't vote for you."
|
||||
|
@ -343,10 +363,11 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
prepend_if_not_found=False,
|
||||
backup=False
|
||||
)
|
||||
with salt.utils.fopen(self.tfile.name, 'r') as fp:
|
||||
self.assertNotIn(
|
||||
'#-- START BLOCK 2' + "\n"
|
||||
+ new_content + '#-- END BLOCK 2',
|
||||
with salt.utils.fopen(self.tfile.name, 'rb') as fp:
|
||||
self.assertNotIn(salt.utils.to_bytes(
|
||||
os.linesep.join([
|
||||
'#-- START BLOCK 2',
|
||||
'{0}#-- END BLOCK 2'.format(new_content)])),
|
||||
fp.read())
|
||||
|
||||
filemod.blockreplace(self.tfile.name,
|
||||
|
@ -355,12 +376,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
backup=False,
|
||||
prepend_if_not_found=True)
|
||||
|
||||
with salt.utils.fopen(self.tfile.name, 'r') as fp:
|
||||
with salt.utils.fopen(self.tfile.name, 'rb') as fp:
|
||||
self.assertTrue(
|
||||
fp.read().startswith(
|
||||
'#-- START BLOCK 2'
|
||||
+ "\n" + new_content
|
||||
+ '#-- END BLOCK 2'))
|
||||
fp.read().startswith(salt.utils.to_bytes(
|
||||
os.linesep.join([
|
||||
'#-- START BLOCK 2',
|
||||
'{0}#-- END BLOCK 2'.format(new_content)]))))
|
||||
|
||||
def test_replace_partial_marked_lines(self):
|
||||
filemod.blockreplace(self.tfile.name,
|
||||
|
@ -477,6 +498,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin):
|
|||
}
|
||||
}
|
||||
|
||||
@skipIf(salt.utils.is_windows(), 'SED is not available on Windows')
|
||||
def test_sed_limit_escaped(self):
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write(SED_CONTENT)
|
||||
|
@ -501,127 +523,131 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin):
|
|||
newlines at end of file.
|
||||
'''
|
||||
# File ending with a newline
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write('foo\n')
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes('foo' + os.linesep))
|
||||
tfile.flush()
|
||||
filemod.append(tfile.name, 'bar')
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), 'foo\nbar\n')
|
||||
filemod.append(tfile.name, 'bar')
|
||||
expected = os.linesep.join(['foo', 'bar']) + os.linesep
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
|
||||
# File not ending with a newline
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write('foo')
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes('foo'))
|
||||
tfile.flush()
|
||||
filemod.append(tfile.name, 'bar')
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), expected)
|
||||
|
||||
# A newline should be added in empty files
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
filemod.append(tfile.name, 'bar')
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), 'foo\nbar\n')
|
||||
# A newline should not be added in empty files
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
filemod.append(tfile.name, 'bar')
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), 'bar\n')
|
||||
with salt.utils.fopen(tfile.name) as tfile2:
|
||||
self.assertEqual(tfile2.read(), 'bar' + os.linesep)
|
||||
|
||||
def test_extract_hash(self):
|
||||
'''
|
||||
Check various hash file formats.
|
||||
'''
|
||||
# With file name
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write(
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(
|
||||
'rc.conf ef6e82e4006dee563d98ada2a2a80a27\n'
|
||||
'ead48423703509d37c4a90e6a0d53e143b6fc268 example.tar.gz\n'
|
||||
'fe05bcdcdc4928012781a5f1a2a77cbb5398e106 ./subdir/example.tar.gz\n'
|
||||
'ad782ecdac770fc6eb9a62e44f90873fb97fb26b foo.tar.bz2\n'
|
||||
)
|
||||
))
|
||||
tfile.flush()
|
||||
|
||||
result = filemod.extract_hash(tfile.name, '', '/rc.conf')
|
||||
self.assertEqual(result, {
|
||||
'hsum': 'ef6e82e4006dee563d98ada2a2a80a27',
|
||||
'hash_type': 'md5'
|
||||
})
|
||||
result = filemod.extract_hash(tfile.name, '', '/rc.conf')
|
||||
self.assertEqual(result, {
|
||||
'hsum': 'ef6e82e4006dee563d98ada2a2a80a27',
|
||||
'hash_type': 'md5'
|
||||
})
|
||||
|
||||
result = filemod.extract_hash(tfile.name, '', '/example.tar.gz')
|
||||
self.assertEqual(result, {
|
||||
result = filemod.extract_hash(tfile.name, '', '/example.tar.gz')
|
||||
self.assertEqual(result, {
|
||||
'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268',
|
||||
'hash_type': 'sha1'
|
||||
})
|
||||
|
||||
# All the checksums in this test file are sha1 sums. We run this
|
||||
# loop three times. The first pass tests auto-detection of hash
|
||||
# type by length of the hash. The second tests matching a specific
|
||||
# type. The third tests a failed attempt to match a specific type,
|
||||
# since sha256 was requested but sha1 is what is in the file.
|
||||
for hash_type in ('', 'sha1', 'sha256'):
|
||||
# Test the source_hash_name argument. Even though there are
|
||||
# matches in the source_hash file for both the file_name and
|
||||
# source params, they should be ignored in favor of the
|
||||
# source_hash_name.
|
||||
file_name = '/example.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = './subdir/example.tar.gz'
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Test both a file_name and source but no source_hash_name.
|
||||
# Even though there are matches for both file_name and
|
||||
# source_hash_name, file_name should be preferred.
|
||||
file_name = '/example.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = None
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268',
|
||||
'hash_type': 'sha1'
|
||||
})
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# All the checksums in this test file are sha1 sums. We run this
|
||||
# loop three times. The first pass tests auto-detection of hash
|
||||
# type by length of the hash. The second tests matching a specific
|
||||
# type. The third tests a failed attempt to match a specific type,
|
||||
# since sha256 was requested but sha1 is what is in the file.
|
||||
for hash_type in ('', 'sha1', 'sha256'):
|
||||
# Test the source_hash_name argument. Even though there are
|
||||
# matches in the source_hash file for both the file_name and
|
||||
# source params, they should be ignored in favor of the
|
||||
# source_hash_name.
|
||||
file_name = '/example.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = './subdir/example.tar.gz'
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Test both a file_name and source but no source_hash_name.
|
||||
# Even though there are matches for both file_name and
|
||||
# source_hash_name, file_name should be preferred.
|
||||
file_name = '/example.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = None
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Test both a file_name and source but no source_hash_name.
|
||||
# Since there is no match for the file_name, the source is
|
||||
# matched.
|
||||
file_name = '/somefile.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = None
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
# Test both a file_name and source but no source_hash_name.
|
||||
# Since there is no match for the file_name, the source is
|
||||
# matched.
|
||||
file_name = '/somefile.tar.gz'
|
||||
source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2'
|
||||
source_hash_name = None
|
||||
result = filemod.extract_hash(
|
||||
tfile.name,
|
||||
hash_type,
|
||||
file_name,
|
||||
source,
|
||||
source_hash_name)
|
||||
expected = {
|
||||
'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
# Hash only, no file name (Maven repo checksum format)
|
||||
# Since there is no name match, the first checksum in the file will
|
||||
# always be returned, never the second.
|
||||
with tempfile.NamedTemporaryFile(mode='w+') as tfile:
|
||||
tfile.write('ead48423703509d37c4a90e6a0d53e143b6fc268\n'
|
||||
'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n')
|
||||
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile:
|
||||
tfile.write(salt.utils.to_bytes(
|
||||
'ead48423703509d37c4a90e6a0d53e143b6fc268\n'
|
||||
'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n'))
|
||||
tfile.flush()
|
||||
|
||||
for hash_type in ('', 'sha1', 'sha256'):
|
||||
result = filemod.extract_hash(tfile.name, hash_type, '/testfile')
|
||||
expected = {
|
||||
'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
for hash_type in ('', 'sha1', 'sha256'):
|
||||
result = filemod.extract_hash(tfile.name, hash_type, '/testfile')
|
||||
expected = {
|
||||
'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268',
|
||||
'hash_type': 'sha1'
|
||||
} if hash_type != 'sha256' else None
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_user_to_uid_int(self):
|
||||
'''
|
||||
|
@ -774,6 +800,7 @@ class FileBasicsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
self.addCleanup(os.remove, self.myfile)
|
||||
self.addCleanup(delattr, self, 'myfile')
|
||||
|
||||
@skipIf(salt.utils.is_windows(), 'os.symlink is not available on Windows')
|
||||
def test_symlink_already_in_desired_state(self):
|
||||
os.symlink(self.tfile.name, self.directory + '/a_link')
|
||||
self.addCleanup(os.remove, self.directory + '/a_link')
|
||||
|
|
|
@ -97,19 +97,20 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin):
|
|||
|
||||
def test_delete_deployments(self):
|
||||
'''
|
||||
Tests deployment creation.
|
||||
Tests deployment deletion
|
||||
:return:
|
||||
'''
|
||||
with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib:
|
||||
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
|
||||
mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="")
|
||||
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
|
||||
**{"delete_namespaced_deployment.return_value.to_dict.return_value": {}}
|
||||
)
|
||||
self.assertEqual(kubernetes.delete_deployment("test"), {})
|
||||
self.assertTrue(
|
||||
kubernetes.kubernetes.client.ExtensionsV1beta1Api().
|
||||
delete_namespaced_deployment().to_dict.called)
|
||||
with patch('salt.modules.kubernetes.show_deployment', Mock(return_value=None)):
|
||||
with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}):
|
||||
mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="")
|
||||
mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock(
|
||||
**{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': ''}}
|
||||
)
|
||||
self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200})
|
||||
self.assertTrue(
|
||||
kubernetes.kubernetes.client.ExtensionsV1beta1Api().
|
||||
delete_namespaced_deployment().to_dict.called)
|
||||
|
||||
def test_create_deployments(self):
|
||||
'''
|
||||
|
|
|
@ -50,10 +50,12 @@ class PoudriereTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
Test if it make jail ``jname`` pkgng aware.
|
||||
'''
|
||||
ret1 = 'Could not create or find required directory /tmp/salt'
|
||||
ret2 = 'Looks like file /tmp/salt/salt-make.conf could not be created'
|
||||
ret3 = {'changes': 'Created /tmp/salt/salt-make.conf'}
|
||||
mock = MagicMock(return_value='/tmp/salt')
|
||||
temp_dir = os.path.join('tmp', 'salt')
|
||||
conf_file = os.path.join('tmp', 'salt', 'salt-make.conf')
|
||||
ret1 = 'Could not create or find required directory {0}'.format(temp_dir)
|
||||
ret2 = 'Looks like file {0} could not be created'.format(conf_file)
|
||||
ret3 = {'changes': 'Created {0}'.format(conf_file)}
|
||||
mock = MagicMock(return_value=temp_dir)
|
||||
mock_true = MagicMock(return_value=True)
|
||||
with patch.dict(poudriere.__salt__, {'config.option': mock,
|
||||
'file.write': mock_true}):
|
||||
|
|
|
@ -141,6 +141,7 @@ MODULE_RET = {
|
|||
}
|
||||
|
||||
|
||||
@skipIf(sys.platform.startswith('win'), 'Snapper not available on Windows')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class SnapperTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
|
|
|
@ -279,7 +279,7 @@ class MockTarFile(object):
|
|||
'''
|
||||
Mock tarfile class
|
||||
'''
|
||||
path = "/tmp"
|
||||
path = os.sep + "tmp"
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
@ -952,30 +952,27 @@ class StateTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
Test to execute a packaged state run
|
||||
'''
|
||||
tar_file = os.sep + os.path.join('tmp', 'state_pkg.tgz')
|
||||
mock = MagicMock(side_effect=[False, True, True, True, True, True])
|
||||
with patch.object(os.path, 'isfile', mock), \
|
||||
patch('salt.modules.state.tarfile', MockTarFile), \
|
||||
patch('salt.modules.state.json', MockJson()):
|
||||
self.assertEqual(state.pkg("/tmp/state_pkg.tgz", "", "md5"), {})
|
||||
self.assertEqual(state.pkg(tar_file, "", "md5"), {})
|
||||
|
||||
mock = MagicMock(side_effect=[False, 0, 0, 0, 0])
|
||||
with patch.object(salt.utils, 'get_hash', mock):
|
||||
self.assertDictEqual(state.pkg("/tmp/state_pkg.tgz", "", "md5"),
|
||||
{})
|
||||
# Verify hash
|
||||
self.assertDictEqual(state.pkg(tar_file, "", "md5"), {})
|
||||
|
||||
self.assertDictEqual(state.pkg("/tmp/state_pkg.tgz", 0, "md5"),
|
||||
{})
|
||||
# Verify file outside intended root
|
||||
self.assertDictEqual(state.pkg(tar_file, 0, "md5"), {})
|
||||
|
||||
MockTarFile.path = ""
|
||||
MockJson.flag = True
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertListEqual(state.pkg("/tmp/state_pkg.tgz",
|
||||
0,
|
||||
"md5"),
|
||||
[True])
|
||||
self.assertListEqual(state.pkg(tar_file, 0, "md5"), [True])
|
||||
|
||||
MockTarFile.path = ""
|
||||
MockJson.flag = False
|
||||
with patch('salt.utils.fopen', mock_open()):
|
||||
self.assertTrue(state.pkg("/tmp/state_pkg.tgz",
|
||||
0, "md5"))
|
||||
self.assertTrue(state.pkg(tar_file, 0, "md5"))
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils
|
||||
|
@ -78,8 +79,9 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin):
|
|||
is_darwin=MagicMock(return_value=False),
|
||||
is_freebsd=MagicMock(return_value=False),
|
||||
is_openbsd=MagicMock(return_value=False),
|
||||
is_netbsd=MagicMock(return_value=False)):
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value="1\n2\n3")}):
|
||||
is_netbsd=MagicMock(return_value=False),
|
||||
which=MagicMock(return_value=True)):
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(['1', '2', '3']))}):
|
||||
with patch('time.time', MagicMock(return_value=m.now)):
|
||||
with patch('os.path.exists', MagicMock(return_value=True)):
|
||||
proc_uptime = '{0} {1}'.format(m.ut, m.idle)
|
||||
|
@ -103,9 +105,10 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin):
|
|||
is_darwin=MagicMock(return_value=False),
|
||||
is_freebsd=MagicMock(return_value=False),
|
||||
is_openbsd=MagicMock(return_value=False),
|
||||
is_netbsd=MagicMock(return_value=False)):
|
||||
is_netbsd=MagicMock(return_value=False),
|
||||
which=MagicMock(return_value=True)):
|
||||
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value="1\n2\n3"),
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(['1', '2', '3'])),
|
||||
'cmd.run_all': MagicMock(return_value=m2.ret)}):
|
||||
with patch('time.time', MagicMock(return_value=m.now)):
|
||||
ret = status.uptime()
|
||||
|
@ -125,8 +128,9 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin):
|
|||
is_darwin=MagicMock(return_value=True),
|
||||
is_freebsd=MagicMock(return_value=False),
|
||||
is_openbsd=MagicMock(return_value=False),
|
||||
is_netbsd=MagicMock(return_value=False)):
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value="1\n2\n3"),
|
||||
is_netbsd=MagicMock(return_value=False),
|
||||
which=MagicMock(return_value=True)):
|
||||
with patch.dict(status.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(['1', '2', '3'])),
|
||||
'sysctl.get': MagicMock(return_value=kern_boottime)}):
|
||||
with patch('time.time', MagicMock(return_value=m.now)):
|
||||
ret = status.uptime()
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
import os
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
|
@ -13,6 +14,7 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
|
|||
import salt.modules.virt as virt
|
||||
import salt.modules.config as config
|
||||
from salt._compat import ElementTree as ET
|
||||
import salt.config
|
||||
import salt.utils
|
||||
|
||||
# Import third party libs
|
||||
|
@ -245,8 +247,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
|||
disks = root.findall('.//disk')
|
||||
self.assertEqual(len(disks), 1)
|
||||
disk = disks[0]
|
||||
self.assertTrue(disk.find('source').attrib['file'].startswith('/'))
|
||||
self.assertTrue('hello/system' in disk.find('source').attrib['file'])
|
||||
root_dir = salt.config.DEFAULT_MINION_OPTS.get('root_dir')
|
||||
self.assertTrue(disk.find('source').attrib['file'].startswith(root_dir))
|
||||
self.assertTrue(os.path.join('hello', 'system') in disk.find('source').attrib['file'])
|
||||
self.assertEqual(disk.find('target').attrib['dev'], 'vda')
|
||||
self.assertEqual(disk.find('target').attrib['bus'], 'virtio')
|
||||
self.assertEqual(disk.find('driver').attrib['name'], 'qemu')
|
||||
|
@ -284,7 +287,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
|||
self.assertEqual(len(disks), 1)
|
||||
disk = disks[0]
|
||||
self.assertTrue('[0]' in disk.find('source').attrib['file'])
|
||||
self.assertTrue('hello/system' in disk.find('source').attrib['file'])
|
||||
self.assertTrue(os.path.join('hello', 'system') in disk.find('source').attrib['file'])
|
||||
self.assertEqual(disk.find('target').attrib['dev'], 'sda')
|
||||
self.assertEqual(disk.find('target').attrib['bus'], 'scsi')
|
||||
self.assertEqual(disk.find('address').attrib['unit'], '0')
|
||||
|
|
|
@ -143,14 +143,14 @@ class WinServiceTestCase(TestCase, LoaderModuleMockMixin):
|
|||
{'Status': 'Stop Pending'},
|
||||
{'Status': 'Stopped'}])
|
||||
|
||||
with patch.object(win_service, 'status', mock_false):
|
||||
with patch.dict(win_service.__salt__, {'cmd.run': MagicMock(return_value="service was stopped")}):
|
||||
self.assertTrue(win_service.stop('spongebob'))
|
||||
|
||||
with patch.object(win_service, 'status', mock_true):
|
||||
with patch.object(win32serviceutil, 'StopService', mock_true):
|
||||
with patch.object(win_service, 'info', mock_info):
|
||||
with patch.object(win_service, 'status', mock_false):
|
||||
self.assertTrue(win_service.stop('spongebob'))
|
||||
with patch.dict(win_service.__salt__, {'cmd.run': MagicMock(return_value="service was stopped")}), \
|
||||
patch.object(win32serviceutil, 'StopService', mock_true), \
|
||||
patch.object(win_service, 'info', mock_info), \
|
||||
patch.object(win_service, 'status', mock_false):
|
||||
self.assertTrue(win_service.stop('spongebob'))
|
||||
|
||||
def test_restart(self):
|
||||
'''
|
||||
|
|
|
@ -54,7 +54,8 @@ class ZncTestCase(TestCase, LoaderModuleMockMixin):
|
|||
Tests write the active configuration state to config file
|
||||
'''
|
||||
mock = MagicMock(return_value='SALT')
|
||||
with patch.dict(znc.__salt__, {'ps.pkill': mock}):
|
||||
with patch.dict(znc.__salt__, {'ps.pkill': mock}), \
|
||||
patch.object(znc, 'signal', MagicMock()):
|
||||
self.assertEqual(znc.dumpconf(), 'SALT')
|
||||
|
||||
# 'rehashconf' function tests: 1
|
||||
|
@ -64,7 +65,8 @@ class ZncTestCase(TestCase, LoaderModuleMockMixin):
|
|||
Tests rehash the active configuration state from config file
|
||||
'''
|
||||
mock = MagicMock(return_value='SALT')
|
||||
with patch.dict(znc.__salt__, {'ps.pkill': mock}):
|
||||
with patch.dict(znc.__salt__, {'ps.pkill': mock}), \
|
||||
patch.object(znc, 'signal', MagicMock()):
|
||||
self.assertEqual(znc.rehashconf(), 'SALT')
|
||||
|
||||
# 'version' function tests: 1
|
||||
|
|
|
@ -189,7 +189,7 @@ class ZypperTestCase(TestCase, LoaderModuleMockMixin):
|
|||
}
|
||||
with patch.dict('salt.modules.zypper.__salt__', {'cmd.run_all': MagicMock(return_value=ref_out)}):
|
||||
with self.assertRaisesRegex(CommandExecutionError,
|
||||
"^Zypper command failure: Some handled zypper internal error\nAnother zypper internal error$"):
|
||||
"^Zypper command failure: Some handled zypper internal error{0}Another zypper internal error$".format(os.linesep)):
|
||||
zypper.list_upgrades(refresh=False)
|
||||
|
||||
# Test unhandled error
|
||||
|
|
|
@ -140,7 +140,8 @@ class AugeasTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'augeas.method_map': self.mock_method_map}
|
||||
with patch.dict(augeas.__salt__, mock_dict_):
|
||||
mock_filename = MagicMock(return_value='/etc/services')
|
||||
with patch.object(augeas, '_workout_filename', mock_filename):
|
||||
with patch.object(augeas, '_workout_filename', mock_filename), \
|
||||
patch('os.path.isfile', MagicMock(return_value=True)):
|
||||
with patch('salt.utils.fopen', MagicMock(mock_open)):
|
||||
mock_diff = MagicMock(return_value=['+ zabbix-agent'])
|
||||
with patch('difflib.unified_diff', mock_diff):
|
||||
|
|
|
@ -2,19 +2,21 @@
|
|||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.states.mac_package as macpackage
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import TestCase
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import (
|
||||
MagicMock,
|
||||
patch
|
||||
)
|
||||
|
||||
|
||||
@skipIf(sys.platform.startswith('win'), "Not a Windows test")
|
||||
class MacPackageTestCase(TestCase, LoaderModuleMockMixin):
|
||||
def setup_loader_modules(self):
|
||||
return {macpackage: {}}
|
||||
|
|
|
@ -958,5 +958,47 @@ class SaltAPIParserTestCase(LogSettingsParserTests):
|
|||
self.addCleanup(delattr, self, 'parser')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class DaemonMixInTestCase(TestCase):
|
||||
'''
|
||||
Tests the PIDfile deletion in the DaemonMixIn.
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Setting up
|
||||
'''
|
||||
# Set PID
|
||||
self.pid = '/some/fake.pid'
|
||||
|
||||
# Setup mixin
|
||||
self.mixin = salt.utils.parsers.DaemonMixIn()
|
||||
self.mixin.info = None
|
||||
self.mixin.config = {}
|
||||
self.mixin.config['pidfile'] = self.pid
|
||||
|
||||
def test_pid_file_deletion(self):
|
||||
'''
|
||||
PIDfile deletion without exception.
|
||||
'''
|
||||
with patch('os.unlink', MagicMock()) as os_unlink:
|
||||
with patch('os.path.isfile', MagicMock(return_value=True)):
|
||||
with patch.object(self.mixin, 'info', MagicMock()):
|
||||
self.mixin._mixin_before_exit()
|
||||
assert self.mixin.info.call_count == 0
|
||||
assert os_unlink.call_count == 1
|
||||
|
||||
def test_pid_file_deletion_with_oserror(self):
|
||||
'''
|
||||
PIDfile deletion with exception
|
||||
'''
|
||||
with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink:
|
||||
with patch('os.path.isfile', MagicMock(return_value=True)):
|
||||
with patch.object(self.mixin, 'info', MagicMock()):
|
||||
self.mixin._mixin_before_exit()
|
||||
assert os_unlink.call_count == 1
|
||||
self.mixin.info.assert_called_with(
|
||||
'PIDfile could not be deleted: {0}'.format(self.pid))
|
||||
|
||||
# Hide the class from unittest framework when it searches for TestCase classes in the module
|
||||
del LogSettingsParserTests
|
||||
|
|
|
@ -10,10 +10,15 @@ import os
|
|||
import sys
|
||||
import stat
|
||||
import shutil
|
||||
import resource
|
||||
import tempfile
|
||||
import socket
|
||||
|
||||
# Import third party libs
|
||||
if sys.platform.startswith('win'):
|
||||
import win32file
|
||||
else:
|
||||
import resource
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.paths import TMP
|
||||
|
@ -82,7 +87,10 @@ class TestVerify(TestCase):
|
|||
writer = FakeWriter()
|
||||
sys.stderr = writer
|
||||
# Now run the test
|
||||
self.assertFalse(check_user('nouser'))
|
||||
if sys.platform.startswith('win'):
|
||||
self.assertTrue(check_user('nouser'))
|
||||
else:
|
||||
self.assertFalse(check_user('nouser'))
|
||||
# Restore sys.stderr
|
||||
sys.stderr = stderr
|
||||
if writer.output != 'CRITICAL: User not found: "nouser"\n':
|
||||
|
@ -118,7 +126,6 @@ class TestVerify(TestCase):
|
|||
# not support IPv6.
|
||||
pass
|
||||
|
||||
@skipIf(True, 'Skipping until we can find why Jenkins is bailing out')
|
||||
def test_max_open_files(self):
|
||||
with TestsLoggingHandler() as handler:
|
||||
logmsg_dbg = (
|
||||
|
@ -139,15 +146,31 @@ class TestVerify(TestCase):
|
|||
'raise the salt\'s max_open_files setting. Please consider '
|
||||
'raising this value.'
|
||||
)
|
||||
if sys.platform.startswith('win'):
|
||||
logmsg_crash = (
|
||||
'{0}:The number of accepted minion keys({1}) should be lower '
|
||||
'than 1/4 of the max open files soft setting({2}). '
|
||||
'salt-master will crash pretty soon! Please consider '
|
||||
'raising this value.'
|
||||
)
|
||||
|
||||
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
if sys.platform.startswith('win'):
|
||||
# Check the Windows API for more detail on this
|
||||
# http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx
|
||||
# and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html
|
||||
mof_s = mof_h = win32file._getmaxstdio()
|
||||
else:
|
||||
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
tempdir = tempfile.mkdtemp(prefix='fake-keys')
|
||||
keys_dir = os.path.join(tempdir, 'minions')
|
||||
os.makedirs(keys_dir)
|
||||
|
||||
mof_test = 256
|
||||
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h))
|
||||
if sys.platform.startswith('win'):
|
||||
win32file._setmaxstdio(mof_test)
|
||||
else:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h))
|
||||
|
||||
try:
|
||||
prev = 0
|
||||
|
@ -181,7 +204,7 @@ class TestVerify(TestCase):
|
|||
level,
|
||||
newmax,
|
||||
mof_test,
|
||||
mof_h - newmax,
|
||||
mof_test - newmax if sys.platform.startswith('win') else mof_h - newmax,
|
||||
),
|
||||
handler.messages
|
||||
)
|
||||
|
@ -206,7 +229,7 @@ class TestVerify(TestCase):
|
|||
'CRITICAL',
|
||||
newmax,
|
||||
mof_test,
|
||||
mof_h - newmax,
|
||||
mof_test - newmax if sys.platform.startswith('win') else mof_h - newmax,
|
||||
),
|
||||
handler.messages
|
||||
)
|
||||
|
@ -218,7 +241,10 @@ class TestVerify(TestCase):
|
|||
raise
|
||||
finally:
|
||||
shutil.rmtree(tempdir)
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h))
|
||||
if sys.platform.startswith('win'):
|
||||
win32file._setmaxstdio(mof_h)
|
||||
else:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h))
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
def test_verify_log(self):
|
||||
|
|
Loading…
Add table
Reference in a new issue