Merge branch '2016.11' into 'nitrogen'

Conflicts:
  - pkg/salt-minion.service
  - salt/modules/junos.py
  - salt/modules/localemod.py
  - salt/modules/win_system.py
  - salt/states/junos.py
  - tests/unit/modules/test_localemod.py
  - tests/unit/modules/test_win_powercfg.py
  - tests/unit/states/dockerng_test.py
This commit is contained in:
rallytime 2017-05-24 16:32:59 -06:00
commit 9ff2694155
86 changed files with 842 additions and 414 deletions

View file

@ -74,7 +74,7 @@ confidence=
# can either give multiple identifiers separated by comma (,) or put this # can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration # option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to # file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if # disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all # you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have # --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes # no Warning level messages displayed, use"--disable=all --enable=classes

View file

@ -71,7 +71,7 @@ confidence=
# can either give multiple identifiers separated by comma (,) or put this # can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration # option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to # file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if # disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all # you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have # --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes # no Warning level messages displayed, use"--disable=all --enable=classes

View file

@ -9,11 +9,6 @@ BUILDDIR = _build
SPHINXLANG = SPHINXLANG =
XELATEX = xelatex XELATEX = xelatex
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# ----- Translations Support ------------------------------------------------> # ----- Translations Support ------------------------------------------------>
# If language is set, also set translation options # If language is set, also set translation options
ifeq ($(shell [ "x$(SPHINXLANG)" != "x" ] && echo 0 || echo 1), 0) ifeq ($(shell [ "x$(SPHINXLANG)" != "x" ] && echo 0 || echo 1), 0)
@ -36,7 +31,7 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(TRANSLATIONOPTS
# the i18n builder cannot share the environment and doctrees with the others # the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext translations download-translations .PHONY: help clean check_sphinx-build html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext translations download-translations
help: help:
@echo "Please use \`make <target>' where <target> is one of" @echo "Please use \`make <target>' where <target> is one of"
@ -69,38 +64,42 @@ clean:
rm -rf $(BUILDDIR)/* rm -rf $(BUILDDIR)/*
test -d 'locale' && find locale/ -name *.mo -exec rm {} \; || true test -d 'locale' && find locale/ -name *.mo -exec rm {} \; || true
html: translations # User-friendly check for sphinx-build
check_sphinx-build:
@which $(SPHINXBUILD) >/dev/null 2>&1 || (echo "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)" >&2; false)
html: check_sphinx-build translations
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo @echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html." @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml: translations dirhtml: check_sphinx-build translations
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo @echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml: translations singlehtml: check_sphinx-build translations
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo @echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle: translations pickle: check_sphinx-build translations
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo @echo
@echo "Build finished; now you can process the pickle files." @echo "Build finished; now you can process the pickle files."
json: translations json: check_sphinx-build translations
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo @echo
@echo "Build finished; now you can process the JSON files." @echo "Build finished; now you can process the JSON files."
htmlhelp: translations htmlhelp: check_sphinx-build translations
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo @echo
@echo "Build finished; now you can run HTML Help Workshop with the" \ @echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp." ".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp: translations qthelp: check_sphinx-build translations
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo @echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
@ -109,7 +108,7 @@ qthelp: translations
@echo "To view the help file:" @echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Salt.qhc" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Salt.qhc"
devhelp: translations devhelp: check_sphinx-build translations
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo @echo
@echo "Build finished." @echo "Build finished."
@ -118,31 +117,31 @@ devhelp: translations
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Salt" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Salt"
@echo "# devhelp" @echo "# devhelp"
epub: translations epub: check_sphinx-build translations
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo @echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub." @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: translations latex: check_sphinx-build translations
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo @echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \ @echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)." "(use \`make latexpdf' here to do that automatically)."
latexpdf: translations latexpdf: check_sphinx-build translations
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..." @echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf $(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja: translations latexpdfja: check_sphinx-build translations
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..." @echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
pdf: translations pdf: check_sphinx-build translations
@if [ "$(XELATEX)" = "xelatex" ] || [ "x$(XELATEX)" = "x" ]; then \ @if [ "$(XELATEX)" = "xelatex" ] || [ "x$(XELATEX)" = "x" ]; then \
echo "The '$(XELATEX)' command was not found."; \ echo "The '$(XELATEX)' command was not found."; \
fi fi
@ -157,62 +156,62 @@ cheatsheet: translations
cd cheatsheet && xelatex salt.tex && cp salt.pdf ../salt-cheatsheet.pdf cd cheatsheet && xelatex salt.tex && cp salt.pdf ../salt-cheatsheet.pdf
@echo "./salt-cheatsheet.pdf created." @echo "./salt-cheatsheet.pdf created."
text: translations text: check_sphinx-build translations
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo @echo
@echo "Build finished. The text files are in $(BUILDDIR)/text." @echo "Build finished. The text files are in $(BUILDDIR)/text."
man: translations man: check_sphinx-build translations
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo @echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man." @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo: translations texinfo: check_sphinx-build translations
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo @echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \ @echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)." "(use \`make info' here to do that automatically)."
info: translations info: check_sphinx-build translations
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..." @echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext: gettext: check_sphinx-build
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo @echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale" @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale"
changes: translations changes: check_sphinx-build translations
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo @echo
@echo "The overview file is in $(BUILDDIR)/changes." @echo "The overview file is in $(BUILDDIR)/changes."
spelling: spelling: check_sphinx-build
$(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling
@echo @echo
@echo "Spell check complete; look for any errors in the above output " \ @echo "Spell check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/spelling/output.txt." "or in $(BUILDDIR)/spelling/output.txt."
linkcheck: linkcheck: check_sphinx-build
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo @echo
@echo "Link check complete; look for any errors in the above output " \ @echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt." "or in $(BUILDDIR)/linkcheck/output.txt."
doctest: doctest: check_sphinx-build
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \ @echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt." "results in $(BUILDDIR)/doctest/output.txt."
xml: translations xml: check_sphinx-build translations
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo @echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml." @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml: translations pseudoxml: check_sphinx-build translations
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo @echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View file

@ -26,7 +26,7 @@ class Mock(object):
''' '''
def __init__(self, mapping=None, *args, **kwargs): def __init__(self, mapping=None, *args, **kwargs):
""" """
Mapping allows to bypass the Mock object, but actually assign Mapping allows autodoc to bypass the Mock object, but actually assign
a specific value, expected by a specific attribute returned. a specific value, expected by a specific attribute returned.
""" """
self.__mapping = mapping or {} self.__mapping = mapping or {}

View file

@ -260,9 +260,9 @@ service. But restarting the service while in the middle of a state run
interrupts the process of the Minion running states and sending results back to interrupts the process of the Minion running states and sending results back to
the Master. A common way to workaround that is to schedule restarting of the the Master. A common way to workaround that is to schedule restarting of the
Minion service using :ref:`masterless mode <masterless-quickstart>` after all Minion service using :ref:`masterless mode <masterless-quickstart>` after all
other states have been applied. This allows to keep Minion to Master connection other states have been applied. This allows the minion to keep Minion to Master
alive for the Minion to report the final results to the Master, while the connection alive for the Minion to report the final results to the Master, while
service is restarting in the background. the service is restarting in the background.
Upgrade without automatic restart Upgrade without automatic restart
********************************* *********************************

View file

@ -760,7 +760,7 @@ Extended Changelog Courtesy of Todd Stansell (https://github.com/tjstansell/salt
- **PR** `#22925`_: (*rallytime*) Backport `#22895`_ to 2014.7 - **PR** `#22925`_: (*rallytime*) Backport `#22895`_ to 2014.7
| refs: `#23113`_ | refs: `#23113`_
- **PR** `#22895`_: (*aletourneau*) pam_tally counter was not reset to 0 after a succesfull login - **PR** `#22895`_: (*aletourneau*) pam_tally counter was not reset to 0 after a successful login
| refs: `#22925`_ | refs: `#22925`_
* dfe2066 Merge pull request `#23113`_ from saltstack/revert-22925-`bp-22895`_ * dfe2066 Merge pull request `#23113`_ from saltstack/revert-22925-`bp-22895`_
* b957ea8 Revert "Backport `#22895`_ to 2014.7" * b957ea8 Revert "Backport `#22895`_ to 2014.7"
@ -921,7 +921,7 @@ Extended Changelog Courtesy of Todd Stansell (https://github.com/tjstansell/salt
| refs: `#23113`_ | refs: `#23113`_
@ *2015-04-22T02:30:26Z* @ *2015-04-22T02:30:26Z*
- **PR** `#22895`_: (*aletourneau*) pam_tally counter was not reset to 0 after a succesfull login - **PR** `#22895`_: (*aletourneau*) pam_tally counter was not reset to 0 after a successful login
| refs: `#22925`_ | refs: `#22925`_
* 6890752 Merge pull request `#22925`_ from rallytime/`bp-22895`_ * 6890752 Merge pull request `#22925`_ from rallytime/`bp-22895`_
* 3852d96 Pylint fix * 3852d96 Pylint fix
@ -930,7 +930,7 @@ Extended Changelog Courtesy of Todd Stansell (https://github.com/tjstansell/salt
* 5ebf159 Cleaned up pull request * 5ebf159 Cleaned up pull request
* a08ac47 pam_tally counter was not reset to 0 after a succesfull login * a08ac47 pam_tally counter was not reset to 0 after a successful login
- **PR** `#22914`_: (*cachedout*) Call proper returner function in jobs.list_jobs - **PR** `#22914`_: (*cachedout*) Call proper returner function in jobs.list_jobs
@ *2015-04-22T00:49:01Z* @ *2015-04-22T00:49:01Z*

View file

@ -403,7 +403,7 @@ Changes:
- **PR** `#29708`_: (*lagesag*) Fix test=True for file.directory with recurse ignore_files/ignore_dirs. - **PR** `#29708`_: (*lagesag*) Fix test=True for file.directory with recurse ignore_files/ignore_dirs.
- **PR** `#29642`_: (*cachedout*) Correctly restart deamonized minions on failure - **PR** `#29642`_: (*cachedout*) Correctly restart daemonized minions on failure
- **PR** `#29599`_: (*cachedout*) Clean up minion shutdown - **PR** `#29599`_: (*cachedout*) Clean up minion shutdown

View file

@ -264,7 +264,7 @@ Runner Changes
``salt://_utils/``) are now able to be synced to the master, making it easier ``salt://_utils/``) are now able to be synced to the master, making it easier
to use them in custom runners. A :py:mod:`saltutil.sync_utils to use them in custom runners. A :py:mod:`saltutil.sync_utils
<salt.runners.saltutil.sync_utils>` function has been added to the <salt.runners.saltutil.sync_utils>` function has been added to the
:py:mod:`saltutil runner <salt.runners.saltutil>` to faciliate the syncing of :py:mod:`saltutil runner <salt.runners.saltutil>` to facilitate the syncing of
utility modules to the master. utility modules to the master.
Pillar Changes Pillar Changes
@ -291,7 +291,7 @@ Junos Module Changes
Network Automation: NAPALM Network Automation: NAPALM
========================== ==========================
Beginning with 2016.11.0, network automation is inclued by default in the core Beginning with 2016.11.0, network automation is included by default in the core
of Salt. It is based on a the `NAPALM <https://github.com/napalm-automation/napalm>`_ of Salt. It is based on a the `NAPALM <https://github.com/napalm-automation/napalm>`_
library and provides facilities to manage the configuration and retrieve data library and provides facilities to manage the configuration and retrieve data
from network devices running widely used operating systems such: JunOS, IOS-XR, from network devices running widely used operating systems such: JunOS, IOS-XR,
@ -720,7 +720,7 @@ Runner Module Deprecations
- The ``fileserver`` runner no longer accepts the ``outputter`` keyword argument. Users will - The ``fileserver`` runner no longer accepts the ``outputter`` keyword argument. Users will
need to specify an outputter using the ``--out`` option. need to specify an outputter using the ``--out`` option.
- The ``jobs`` runner no longer accepts the ``ouputter`` keyword argument. Users will need to - The ``jobs`` runner no longer accepts the ``outputter`` keyword argument. Users will need to
specify an outputter using the ``--out`` option. specify an outputter using the ``--out`` option.
- ``virt`` runner module: - ``virt`` runner module:

View file

@ -873,7 +873,7 @@ Changes:
- **PR** `#37827`_: (*silenius*) add missing chloginclass - **PR** `#37827`_: (*silenius*) add missing chloginclass
- **PR** `#37826`_: (*rallytime*) Update branch refs to more relevant branch - **PR** `#37826`_: (*rallytime*) Update branch refs to more relevant branch
- **PR** `#37823`_: (*rallytime*) Add "names" option to file state docs: point users to highstate doc examples - **PR** `#37823`_: (*rallytime*) Add "names" option to file state docs: point users to highstate doc examples
- **PR** `#37822`_: (*laleocen*) add documenation for multiline encryption using nacl - **PR** `#37822`_: (*laleocen*) add documentation for multiline encryption using nacl
| refs: `#37826`_ | refs: `#37826`_
- **PR** `#37821`_: (*rallytime*) Clarify keystone.user_present password state docs with default behavior - **PR** `#37821`_: (*rallytime*) Clarify keystone.user_present password state docs with default behavior
- **PR** `#37820`_: (*rallytime*) Add some dependency documentation to libvirt docs - **PR** `#37820`_: (*rallytime*) Add some dependency documentation to libvirt docs

View file

@ -189,9 +189,9 @@ Changes:
* fd2ee7d Add some simple unit tests for salt.config.api_config function * fd2ee7d Add some simple unit tests for salt.config.api_config function
* 3d2fefc Make sure the pidfile and log_file values are overriden by api opts * 3d2fefc Make sure the pidfile and log_file values are overridden by api opts
* 1f6b540 Make sure the pidfile and log_file values are overriden by api opts * 1f6b540 Make sure the pidfile and log_file values are overridden by api opts
* 04d307f salt-api no longer forces the default timeout * 04d307f salt-api no longer forces the default timeout
@ -844,7 +844,7 @@ Changes:
* 68d5475 Fixing Snapper unit tests for SUBVOLUME support * 68d5475 Fixing Snapper unit tests for SUBVOLUME support
* e9919a9 Removing posible double '/' from the file paths * e9919a9 Removing possible double '/' from the file paths
* 8b4f87f Updating and fixing the documentation * 8b4f87f Updating and fixing the documentation

View file

@ -367,7 +367,7 @@ Changes:
* 5244041 Merge pull request `#39221`_ from lvg01/fix-bug-39220 * 5244041 Merge pull request `#39221`_ from lvg01/fix-bug-39220
* e8a41d6 Removes to early content stripping (stripping is allready done when needed with ident:true), fixes `#39220`_ * e8a41d6 Removes to early content stripping (stripping is already done when needed with ident:true), fixes `#39220`_
* a4b169e Fixed wrong logic, fixes `#39220`_ * a4b169e Fixed wrong logic, fixes `#39220`_
@ -482,7 +482,7 @@ Changes:
- **PR** `#39276`_: (*gtmanfred*) _device_mismatch_ignored will never be True - **PR** `#39276`_: (*gtmanfred*) _device_mismatch_ignored will never be True
@ *2017-02-09T17:05:28Z* @ *2017-02-09T17:05:28Z*
- **ISSUE** `#39269`_: (*alexharrington*) Remount forced with lizardfs fuse filesystem due to device missmatch - **ISSUE** `#39269`_: (*alexharrington*) Remount forced with lizardfs fuse filesystem due to device mismatch
| refs: `#39276`_ | refs: `#39276`_
- **ISSUE** `#39106`_: (*carsten-AEI*) CVMFS fuse mount gets remounted every time - **ISSUE** `#39106`_: (*carsten-AEI*) CVMFS fuse mount gets remounted every time
| refs: `#39276`_ | refs: `#39276`_
@ -688,7 +688,7 @@ Changes:
- **ISSUE** `#1`_: (*thatch45*) Enable regex on the salt cli - **ISSUE** `#1`_: (*thatch45*) Enable regex on the salt cli
- **PR** `#39146`_: (*gtmanfred*) update vmware getting started doc - **PR** `#39146`_: (*gtmanfred*) update vmware getting started doc
- **PR** `#39145`_: (*garethgreenaway*) [2016.3] Fix when targeting via pillar with Salt syndic - **PR** `#39145`_: (*garethgreenaway*) [2016.3] Fix when targeting via pillar with Salt syndic
- **PR** `#39131`_: (*bobrik*) Clarify ipv6 option for minion and inteface for master, closes `#39118`_ - **PR** `#39131`_: (*bobrik*) Clarify ipv6 option for minion and interface for master, closes `#39118`_
- **PR** `#39116`_: (*terminalmage*) Don't abort pillar.get with merge=True if default is None - **PR** `#39116`_: (*terminalmage*) Don't abort pillar.get with merge=True if default is None
- **PR** `#39077`_: (*terminalmage*) Apply fix from `#38705`_ to 2016.3 branch - **PR** `#39077`_: (*terminalmage*) Apply fix from `#38705`_ to 2016.3 branch
- **PR** `#38804`_: (*alexbleotu*) Second attempt to fix prepending of root_dir to paths - **PR** `#38804`_: (*alexbleotu*) Second attempt to fix prepending of root_dir to paths
@ -717,7 +717,7 @@ Changes:
* 97521b3 Second attempt to fix prepending of root_dir to paths * 97521b3 Second attempt to fix prepending of root_dir to paths
* 6ffeda3 Clarify ipv6 option for minion and inteface for master, closes `#39118`_ (`#39131`_) * 6ffeda3 Clarify ipv6 option for minion and interface for master, closes `#39118`_ (`#39131`_)
* 646b9ea Don't abort pillar.get with merge=True if default is None (`#39116`_) * 646b9ea Don't abort pillar.get with merge=True if default is None (`#39116`_)
@ -978,7 +978,7 @@ Changes:
- **PR** `#39039`_: (*rallytime*) Update 2016.11.2 release notes - **PR** `#39039`_: (*rallytime*) Update 2016.11.2 release notes
* a7fc02e Ungate the status.py module and raise unsupported errors in functions not executeable on Windows. (`#39005`_) * a7fc02e Ungate the status.py module and raise unsupported errors in functions not executable on Windows. (`#39005`_)
- **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows. - **PR** `#39005`_: (*cro*) Ungate the status.py module and raise unsupported errors in functions not executable on Windows.
| refs: `#39536`_ | refs: `#39536`_
@ -1214,7 +1214,7 @@ Changes:
| refs: `#38875`_ | refs: `#38875`_
- **PR** `#38890`_: (*cro*) Backport `#38887`_ to 2016.3: Enable resetting a VM via salt-cloud & VMware driver - **PR** `#38890`_: (*cro*) Backport `#38887`_ to 2016.3: Enable resetting a VM via salt-cloud & VMware driver
- **PR** `#38883`_: (*techhat*) Don't require text_out path to exist - **PR** `#38883`_: (*techhat*) Don't require text_out path to exist
- **PR** `#38875`_: (*terminalmage*) Reactor: fix traceback when salt:// path is nonexistant - **PR** `#38875`_: (*terminalmage*) Reactor: fix traceback when salt:// path is nonexistent
- **PR** `#38867`_: (*mchugh19*) Touch deploy.sh before use - **PR** `#38867`_: (*mchugh19*) Touch deploy.sh before use
| refs: `#38883`_ | refs: `#38883`_
- **PR** `#38851`_: (*terminalmage*) Support docker-py 2.0 in dockerng - **PR** `#38851`_: (*terminalmage*) Support docker-py 2.0 in dockerng
@ -1237,7 +1237,7 @@ Changes:
* fbc4d2a reactor: ensure glob_ref is a string * fbc4d2a reactor: ensure glob_ref is a string
* 2e443d7 cp.cache_file: add note re: return for nonexistant salt:// path * 2e443d7 cp.cache_file: add note re: return for nonexistent salt:// path
* e9ebec4 Merge pull request `#38890`_ from cro/vmware_reset_vm_20163 * e9ebec4 Merge pull request `#38890`_ from cro/vmware_reset_vm_20163

View file

@ -832,7 +832,7 @@ Changes:
* 2febd05 Merge pull request `#40372`_ from zer0def/pip-cache-fixes * 2febd05 Merge pull request `#40372`_ from zer0def/pip-cache-fixes
* d68067f Merge remote-tracking branch 'main/2016.11' into pip-cache-fixes * d68067f Merge remote-tracking branch 'main/2016.11' into pip-cache-fixes
* 4f23a23 Fixed the `test_install_download_cache_argument_in_resulting_command` to accomodate introduced cache directory argument fixes and renamed it to `test_install_download_cache_dir_arguments_in_resulting_command`. * 4f23a23 Fixed the `test_install_download_cache_argument_in_resulting_command` to accommodate introduced cache directory argument fixes and renamed it to `test_install_download_cache_dir_arguments_in_resulting_command`.
* 9d0f94e Fixed unnecessary API changes introduced with suggested changes. * 9d0f94e Fixed unnecessary API changes introduced with suggested changes.
@ -1345,7 +1345,7 @@ Changes:
@ *2017-03-17T15:17:08Z* @ *2017-03-17T15:17:08Z*
- **PR** `#40090`_: (*rallytime*) Back-port `#40056`_ to 2016.3 - **PR** `#40090`_: (*rallytime*) Back-port `#40056`_ to 2016.3
- **PR** `#40059`_: (*terminalmage*) Fix traceback when virtualenv.managed is invoked with nonexistant user - **PR** `#40059`_: (*terminalmage*) Fix traceback when virtualenv.managed is invoked with nonexistent user
- **PR** `#40057`_: (*cachedout*) More mentionbot blacklists - **PR** `#40057`_: (*cachedout*) More mentionbot blacklists
- **PR** `#40056`_: (*thatch45*) update mention bot blacklist - **PR** `#40056`_: (*thatch45*) update mention bot blacklist
| refs: `#40090`_ | refs: `#40090`_
@ -1354,7 +1354,7 @@ Changes:
* 116201f Merge pull request `#40059`_ from terminalmage/fix-virtualenv-traceback * 116201f Merge pull request `#40059`_ from terminalmage/fix-virtualenv-traceback
* e3cfd29 Fix traceback when virtualenv.managed is invoked with nonexistant user * e3cfd29 Fix traceback when virtualenv.managed is invoked with nonexistent user
* a01b52b Merge pull request `#40090`_ from rallytime/`bp-40056`_ * a01b52b Merge pull request `#40090`_ from rallytime/`bp-40056`_
@ -1386,7 +1386,7 @@ Changes:
- **PR** `#40053`_: (*gtmanfred*) Update rh_ip.py - **PR** `#40053`_: (*gtmanfred*) Update rh_ip.py
- **PR** `#40041`_: (*terminalmage*) Fix transposed lines in salt.utils.process - **PR** `#40041`_: (*terminalmage*) Fix transposed lines in salt.utils.process
- **PR** `#40038`_: (*velom*) correctly parse "pkg_name===version" from pip freeze - **PR** `#40038`_: (*velom*) correctly parse "pkg_name===version" from pip freeze
- **PR** `#40018`_: (*meaksh*) Allows overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call - **PR** `#40018`_: (*meaksh*) Allow overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call
| refs: `#40072`_ | refs: `#40072`_
* b12720a Merge pull request `#40088`_ from rallytime/merge-2016.11 * b12720a Merge pull request `#40088`_ from rallytime/merge-2016.11
* 626bd03 Merge branch '2016.3' into '2016.11' * 626bd03 Merge branch '2016.3' into '2016.11'
@ -1397,9 +1397,9 @@ Changes:
* 8dcffc7 Merge pull request `#40018`_ from meaksh/2016.3-handling-timeouts-for-manage.up-runner * 8dcffc7 Merge pull request `#40018`_ from meaksh/2016.3-handling-timeouts-for-manage.up-runner
* 9f5c3b7 Allows to set custom timeouts for 'manage.up' and 'manage.status' * 9f5c3b7 Allow setting custom timeouts for 'manage.up' and 'manage.status'
* 2102d9c Allows to set 'timeout' and 'gather_job_timeout' via kwargs * 2102d9c Allow setting 'timeout' and 'gather_job_timeout' via kwargs
* 22fc529 Merge pull request `#40038`_ from velom/fix-pip-freeze-parsing * 22fc529 Merge pull request `#40038`_ from velom/fix-pip-freeze-parsing
@ -1419,15 +1419,15 @@ Changes:
* 703ab23 Merge pull request `#40055`_ from rallytime/doc-build-warnings * 703ab23 Merge pull request `#40055`_ from rallytime/doc-build-warnings
* 72d16c9 Update "yaml" code-block references with "jinja" where needed * 72d16c9 Update "yaml" code-block references with "jinja" where needed
- **PR** `#40072`_: (*meaksh*) [2016.11] Allows overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call - **PR** `#40072`_: (*meaksh*) [2016.11] Allow overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call
@ *2017-03-16T15:31:46Z* @ *2017-03-16T15:31:46Z*
- **PR** `#40018`_: (*meaksh*) Allows overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call - **PR** `#40018`_: (*meaksh*) Allow overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call
| refs: `#40072`_ | refs: `#40072`_
* e73a1d0 Merge pull request `#40072`_ from meaksh/2016.11-handling-timeouts-for-manage.up-runner * e73a1d0 Merge pull request `#40072`_ from meaksh/2016.11-handling-timeouts-for-manage.up-runner
* 40246d3 Allows to set custom timeouts for 'manage.up' and 'manage.status' * 40246d3 Allow setting custom timeouts for 'manage.up' and 'manage.status'
* ad232fd Allows to set 'timeout' and 'gather_job_timeout' via kwargs * ad232fd Allow setting 'timeout' and 'gather_job_timeout' via kwargs
- **PR** `#40045`_: (*terminalmage*) Fix error when chhome is invoked by user.present state in Windows - **PR** `#40045`_: (*terminalmage*) Fix error when chhome is invoked by user.present state in Windows
@ *2017-03-15T19:00:41Z* @ *2017-03-15T19:00:41Z*
@ -1458,7 +1458,7 @@ Changes:
- **PR** `#40016`_: (*terminalmage*) Attempt to fix failing grains tests in 2016.3 - **PR** `#40016`_: (*terminalmage*) Attempt to fix failing grains tests in 2016.3
- **PR** `#39994`_: (*rallytime*) Add a versionadded tag for dockerng ulimits addition - **PR** `#39994`_: (*rallytime*) Add a versionadded tag for dockerng ulimits addition
- **PR** `#39988`_: (*terminalmage*) Add comment explaining change from `#39973`_ - **PR** `#39988`_: (*terminalmage*) Add comment explaining change from `#39973`_
- **PR** `#39980`_: (*vutny*) [2016.3] Allow to use `bg` kwarg for `cmd.run` state function - **PR** `#39980`_: (*vutny*) [2016.3] Allow using `bg` kwarg for `cmd.run` state function
- **PR** `#39973`_: (*terminalmage*) Don't use docker.Client instance from context if missing attributes - **PR** `#39973`_: (*terminalmage*) Don't use docker.Client instance from context if missing attributes
* 277bd17 Merge pull request `#40025`_ from rallytime/merge-2016.11 * 277bd17 Merge pull request `#40025`_ from rallytime/merge-2016.11
* 029f28b Merge branch '2016.3' into '2016.11' * 029f28b Merge branch '2016.3' into '2016.11'
@ -1475,7 +1475,7 @@ Changes:
* 0c61d06 Merge pull request `#39980`_ from vutny/cmd-run-state-bg * 0c61d06 Merge pull request `#39980`_ from vutny/cmd-run-state-bg
* a81dc9d [2016.3] Allow to use `bg` kwarg for `cmd.run` state function * a81dc9d [2016.3] Allow using `bg` kwarg for `cmd.run` state function
* b042484 Merge pull request `#39994`_ from rallytime/ulimits-dockerng-version * b042484 Merge pull request `#39994`_ from rallytime/ulimits-dockerng-version
@ -1834,7 +1834,7 @@ Changes:
* 9f70ad7 Merge pull request `#39472`_ from whiteinge/_reformat_low-update * 9f70ad7 Merge pull request `#39472`_ from whiteinge/_reformat_low-update
* d11f538 Add RunnerClient test for old/new-style arg/kwarg parsing * d11f538 Add RunnerClient test for old/new-style arg/kwarg parsing
* ec377ab Reenable skipped RunnerClient tests * ec377ab Re-enable skipped RunnerClient tests
* 27f7fd9 Update _reformat_low to run arg through parse_input * 27f7fd9 Update _reformat_low to run arg through parse_input
@ -2022,13 +2022,13 @@ Changes:
* e63cbba Merge pull request `#39653`_ from cachedout/26_odict * e63cbba Merge pull request `#39653`_ from cachedout/26_odict
* 91eb721 Use salt's ordereddict for comparison * 91eb721 Use salt's ordereddict for comparison
- **PR** `#39609`_: (*gtmanfred*) intialize the Client stuff in FSClient - **PR** `#39609`_: (*gtmanfred*) initialize the Client stuff in FSClient
@ *2017-02-24T18:50:55Z* @ *2017-02-24T18:50:55Z*
- **ISSUE** `#38836`_: (*toanctruong*) file.managed with S3 Source errors out with obscure message - **ISSUE** `#38836`_: (*toanctruong*) file.managed with S3 Source errors out with obscure message
| refs: `#39589`_ `#39609`_ | refs: `#39589`_ `#39609`_
* 0bc6027 Merge pull request `#39609`_ from gtmanfred/2016.11 * 0bc6027 Merge pull request `#39609`_ from gtmanfred/2016.11
* 0820620 intialize the Client stuff in FSClient * 0820620 initialize the Client stuff in FSClient
- **PR** `#39615`_: (*skizunov*) Bonjour/Avahi beacons: Make sure TXT record length is valid - **PR** `#39615`_: (*skizunov*) Bonjour/Avahi beacons: Make sure TXT record length is valid
@ *2017-02-24T18:47:05Z* @ *2017-02-24T18:47:05Z*

View file

@ -30,7 +30,7 @@ Backwards-incompatible Changes
It has been moved one directory down, into the master cachedir. On most It has been moved one directory down, into the master cachedir. On most
platforms, this is ``/var/cache/salt/master/extmods``. Most users won't have platforms, this is ``/var/cache/salt/master/extmods``. Most users won't have
to worry about this, but those who have been manually placing custom runners to worry about this, but those who have been manually placing custom runners
into ``/var/cache/salt/extmods/runners``, or ouputters into into ``/var/cache/salt/extmods/runners``, or outputters into
``/var/cache/salt/extmods/output``, etc. will be affected by this. To ``/var/cache/salt/extmods/output``, etc. will be affected by this. To
transition, it is recommended not to simply move the extmods directory into transition, it is recommended not to simply move the extmods directory into
``/var/cache/salt/master``, but to copy the custom modules into the salt ``/var/cache/salt/master``, but to copy the custom modules into the salt

View file

@ -149,9 +149,9 @@ Changes:
* fd2ee7d Add some simple unit tests for salt.config.api_config function * fd2ee7d Add some simple unit tests for salt.config.api_config function
* 3d2fefc Make sure the pidfile and log_file values are overriden by api opts * 3d2fefc Make sure the pidfile and log_file values are overridden by api opts
* 1f6b540 Make sure the pidfile and log_file values are overriden by api opts * 1f6b540 Make sure the pidfile and log_file values are overridden by api opts
* 04d307f salt-api no longer forces the default timeout * 04d307f salt-api no longer forces the default timeout
@ -1046,7 +1046,7 @@ Changes:
* 0e74bad Update branch refs to more relevant branch (`#37826`_) * 0e74bad Update branch refs to more relevant branch (`#37826`_)
- **PR** `#37826`_: (*rallytime*) Update branch refs to more relevant branch - **PR** `#37826`_: (*rallytime*) Update branch refs to more relevant branch
- **PR** `#37822`_: (*laleocen*) add documenation for multiline encryption using nacl - **PR** `#37822`_: (*laleocen*) add documentation for multiline encryption using nacl
| refs: `#37826`_ | refs: `#37826`_
* 6a9b49c Add "names" option to file state docs: point users to highstate doc examples (`#37823`_) * 6a9b49c Add "names" option to file state docs: point users to highstate doc examples (`#37823`_)

View file

@ -127,11 +127,11 @@ Changes:
* 35ddb79 Merge pull request `#40141`_ from bobrik/fallback-resolve * 35ddb79 Merge pull request `#40141`_ from bobrik/fallback-resolve
* af1545d Use the first address if cannot connect to any * af1545d Use the first address if cannot connect to any
- **PR** `#40059`_: (*terminalmage*) Fix traceback when virtualenv.managed is invoked with nonexistant user - **PR** `#40059`_: (*terminalmage*) Fix traceback when virtualenv.managed is invoked with nonexistent user
@ *2017-03-16T20:46:43Z* @ *2017-03-16T20:46:43Z*
* 116201f Merge pull request `#40059`_ from terminalmage/fix-virtualenv-traceback * 116201f Merge pull request `#40059`_ from terminalmage/fix-virtualenv-traceback
* e3cfd29 Fix traceback when virtualenv.managed is invoked with nonexistant user * e3cfd29 Fix traceback when virtualenv.managed is invoked with nonexistent user
- **PR** `#40090`_: (*rallytime*) Back-port `#40056`_ to 2016.3 - **PR** `#40090`_: (*rallytime*) Back-port `#40056`_ to 2016.3
@ *2017-03-16T19:42:58Z* @ *2017-03-16T19:42:58Z*
@ -153,13 +153,13 @@ Changes:
* d36bdb1 Merge pull request `#40070`_ from Ch3LL/2016.3.6_release * d36bdb1 Merge pull request `#40070`_ from Ch3LL/2016.3.6_release
* a1f8b49 update 2016.3.6 release notes with additional PR's * a1f8b49 update 2016.3.6 release notes with additional PR's
- **PR** `#40018`_: (*meaksh*) Allows overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call - **PR** `#40018`_: (*meaksh*) Allow overriding 'timeout' and 'gather_job_timeout' to 'manage.up' runner call
@ *2017-03-15T19:43:01Z* @ *2017-03-15T19:43:01Z*
* 8dcffc7 Merge pull request `#40018`_ from meaksh/2016.3-handling-timeouts-for-manage.up-runner * 8dcffc7 Merge pull request `#40018`_ from meaksh/2016.3-handling-timeouts-for-manage.up-runner
* 9f5c3b7 Allows to set custom timeouts for 'manage.up' and 'manage.status' * 9f5c3b7 Allow setting custom timeouts for 'manage.up' and 'manage.status'
* 2102d9c Allows to set 'timeout' and 'gather_job_timeout' via kwargs * 2102d9c Allow setting 'timeout' and 'gather_job_timeout' via kwargs
- **PR** `#40038`_: (*velom*) correctly parse "pkg_name===version" from pip freeze - **PR** `#40038`_: (*velom*) correctly parse "pkg_name===version" from pip freeze
@ *2017-03-15T19:30:03Z* @ *2017-03-15T19:30:03Z*
@ -197,11 +197,11 @@ Changes:
* 5d84b40 Attempt to fix failing grains tests in 2016.3 * 5d84b40 Attempt to fix failing grains tests in 2016.3
- **PR** `#39980`_: (*vutny*) [2016.3] Allow to use `bg` kwarg for `cmd.run` state function - **PR** `#39980`_: (*vutny*) [2016.3] Allow using `bg` kwarg for `cmd.run` state function
@ *2017-03-14T17:16:14Z* @ *2017-03-14T17:16:14Z*
* 0c61d06 Merge pull request `#39980`_ from vutny/cmd-run-state-bg * 0c61d06 Merge pull request `#39980`_ from vutny/cmd-run-state-bg
* a81dc9d [2016.3] Allow to use `bg` kwarg for `cmd.run` state function * a81dc9d [2016.3] Allow using `bg` kwarg for `cmd.run` state function
- **PR** `#39994`_: (*rallytime*) Add a versionadded tag for dockerng ulimits addition - **PR** `#39994`_: (*rallytime*) Add a versionadded tag for dockerng ulimits addition
@ *2017-03-13T20:58:02Z* @ *2017-03-13T20:58:02Z*
@ -658,7 +658,7 @@ Changes:
- **ISSUE** `#39220`_: (*lvg01*) state file.line skips leading spaces in content with mode:ensure and indent:False - **ISSUE** `#39220`_: (*lvg01*) state file.line skips leading spaces in content with mode:ensure and indent:False
| refs: `#39221`_ `#39221`_ `#39221`_ `#39221`_ | refs: `#39221`_ `#39221`_ `#39221`_ `#39221`_
* 5244041 Merge pull request `#39221`_ from lvg01/fix-bug-39220 * 5244041 Merge pull request `#39221`_ from lvg01/fix-bug-39220
* e8a41d6 Removes to early content stripping (stripping is allready done when needed with ident:true), fixes `#39220`_ * e8a41d6 Removes to early content stripping (stripping is already done when needed with ident:true), fixes `#39220`_
* a4b169e Fixed wrong logic, fixes `#39220`_ * a4b169e Fixed wrong logic, fixes `#39220`_
@ -807,11 +807,11 @@ Changes:
* 97521b3 Second attempt to fix prepending of root_dir to paths * 97521b3 Second attempt to fix prepending of root_dir to paths
* 6ffeda3 Clarify ipv6 option for minion and inteface for master, closes `#39118`_ (`#39131`_) * 6ffeda3 Clarify ipv6 option for minion and interface for master, closes `#39118`_ (`#39131`_)
- **ISSUE** `#39118`_: (*bobrik*) Minion ipv6 option is not documented - **ISSUE** `#39118`_: (*bobrik*) Minion ipv6 option is not documented
| refs: `#39289`_ | refs: `#39289`_
- **PR** `#39131`_: (*bobrik*) Clarify ipv6 option for minion and inteface for master, closes `#39118`_ - **PR** `#39131`_: (*bobrik*) Clarify ipv6 option for minion and interface for master, closes `#39118`_
* 646b9ea Don't abort pillar.get with merge=True if default is None (`#39116`_) * 646b9ea Don't abort pillar.get with merge=True if default is None (`#39116`_)
@ -1013,7 +1013,7 @@ Changes:
* e40fac5 Catch MinionError in file.source_list * e40fac5 Catch MinionError in file.source_list
- **PR** `#38875`_: (*terminalmage*) Reactor: fix traceback when salt:// path is nonexistant - **PR** `#38875`_: (*terminalmage*) Reactor: fix traceback when salt:// path is nonexistent
@ *2017-01-24T15:23:39Z* @ *2017-01-24T15:23:39Z*
- **ISSUE** `#36121`_: (*Ashald*) TemplateNotFound/Unable to cache file - **ISSUE** `#36121`_: (*Ashald*) TemplateNotFound/Unable to cache file
@ -1021,7 +1021,7 @@ Changes:
* b5df104 Merge pull request `#38875`_ from terminalmage/issue36121 * b5df104 Merge pull request `#38875`_ from terminalmage/issue36121
* fbc4d2a reactor: ensure glob_ref is a string * fbc4d2a reactor: ensure glob_ref is a string
* 2e443d7 cp.cache_file: add note re: return for nonexistant salt:// path * 2e443d7 cp.cache_file: add note re: return for nonexistent salt:// path
- **PR** `#38890`_: (*cro*) Backport `#38887`_ to 2016.3: Enable resetting a VM via salt-cloud & VMware driver - **PR** `#38890`_: (*cro*) Backport `#38887`_ to 2016.3: Enable resetting a VM via salt-cloud & VMware driver
@ *2017-01-24T15:15:35Z* @ *2017-01-24T15:15:35Z*

View file

@ -98,7 +98,7 @@ a new value using a command like:
Deleting values (if supported by the driver) is done pretty much the same way as Deleting values (if supported by the driver) is done pretty much the same way as
getting them. Provided that you have a profile called ``mykvstore`` that uses getting them. Provided that you have a profile called ``mykvstore`` that uses
a driver allowing to delete values you would delete a value as shown bellow: a driver allowing to delete values you would delete a value as shown below:
.. code-block:: bash .. code-block:: bash

View file

@ -66,7 +66,7 @@ Specific options can be sent to the minion also, as defined in the Python
.. note:: .. note::
While setting the ssl_version is not required, we recomend it. Some older While setting the ssl_version is not required, we recommend it. Some older
versions of python do not support the latest TLS protocol and if this is versions of python do not support the latest TLS protocol and if this is
the case for your version of python we strongly recommend upgrading your the case for your version of python we strongly recommend upgrading your
version of Python. version of Python.

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=The Salt API Description=The Salt API
Documentation=man:salt-api(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target After=network.target
[Service] [Service]

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=The Salt Master Server Description=The Salt Master Server
Documentation=man:salt-master(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target After=network.target
[Service] [Service]

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=The Salt Minion Description=The Salt Minion
Documentation=man:salt-minion(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target salt-master.service After=network.target salt-master.service
[Service] [Service]

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=salt-proxy service Description=salt-proxy service
Documentation=man:salt-proxy(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target After=network.target
[Service] [Service]

View file

@ -1,5 +1,6 @@
[Unit] [Unit]
Description=The Salt Master Server Description=The Salt Master Server
Documentation=man:salt-syndic(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
After=network.target After=network.target
[Service] [Service]

View file

@ -130,7 +130,7 @@ Wed Oct 7 19:15:54 UTC 2015 - mrueckert@suse.de
2. the only part of the package which would really benefit from 2. the only part of the package which would really benefit from
it would be the doc package. but given we only install the it would be the doc package. but given we only install the
files via %doc, we can not use it for that either. files via %doc, we can not use it for that either.
- reenable completions on distros newer than sle11 - re-enable completions on distros newer than sle11
- do not use _datarootdir, use _datadir instead. - do not use _datarootdir, use _datadir instead.
------------------------------------------------------------------- -------------------------------------------------------------------

View file

@ -91,7 +91,7 @@ def beacon(config):
''' '''
Broadcast values via zeroconf Broadcast values via zeroconf
If the announced values are static, it is adviced to set run_once: True If the announced values are static, it is advised to set run_once: True
(do not poll) on the beacon configuration. (do not poll) on the beacon configuration.
The following are required configuration settings: The following are required configuration settings:

View file

@ -84,7 +84,7 @@ def beacon(config):
''' '''
Broadcast values via zeroconf Broadcast values via zeroconf
If the announced values are static, it is adviced to set run_once: True If the announced values are static, it is advised to set run_once: True
(do not poll) on the beacon configuration. (do not poll) on the beacon configuration.
The following are required configuration settings: The following are required configuration settings:

View file

@ -538,7 +538,7 @@ class AsyncClientMixin(object):
# if this is a ret, we have our own set of rules # if this is a ret, we have our own set of rules
if suffix == 'ret': if suffix == 'ret':
# Check if ouputter was passed in the return data. If this is the case, # Check if outputter was passed in the return data. If this is the case,
# then the return data will be a dict two keys: 'data' and 'outputter' # then the return data will be a dict two keys: 'data' and 'outputter'
if isinstance(event.get('return'), dict) \ if isinstance(event.get('return'), dict) \
and set(event['return']) == set(('data', 'outputter')): and set(event['return']) == set(('data', 'outputter')):

View file

@ -4342,7 +4342,7 @@ def delete_keypair(kwargs=None, call=None):
return False return False
params = {'Action': 'DeleteKeyPair', params = {'Action': 'DeleteKeyPair',
'KeyName.1': kwargs['keyname']} 'KeyName': kwargs['keyname']}
data = aws.query(params, data = aws.query(params,
return_url=True, return_url=True,

View file

@ -2657,7 +2657,7 @@ def create(vm_):
log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom'])) log.info("Creating {0} from {1}({2})".format(vm_['name'], clone_type, vm_['clonefrom']))
if datastore and not datastore_ref and datastore_cluster_ref: if datastore and not datastore_ref and datastore_cluster_ref:
# datastore cluster has been specified so apply Storage DRS recomendations # datastore cluster has been specified so apply Storage DRS recommendations
pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref) pod_spec = vim.storageDrs.PodSelectionSpec(storagePod=datastore_cluster_ref)
storage_spec = vim.storageDrs.StoragePlacementSpec( storage_spec = vim.storageDrs.StoragePlacementSpec(

File diff suppressed because it is too large Load diff

View file

@ -2070,7 +2070,7 @@ def prepend_root_dir(opts, path_options):
# Remove relative root dir so we can add the absolute root dir # Remove relative root dir so we can add the absolute root dir
path = path[len(root_opt):] path = path[len(root_opt):]
elif os.path.isabs(path_option): elif os.path.isabs(path_option):
# Absolute path (not default or overriden root_dir) # Absolute path (not default or overridden root_dir)
# No prepending required # No prepending required
continue continue
# Prepending the root dir # Prepending the root dir
@ -2422,7 +2422,7 @@ def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
elif master_config_path is not None and master_config is None: elif master_config_path is not None and master_config is None:
master_config = salt.config.master_config(master_config_path) master_config = salt.config.master_config(master_config_path)
# cloud config has a seperate cachedir # cloud config has a separate cachedir
del master_config['cachedir'] del master_config['cachedir']
# 2nd - salt-cloud configuration which was loaded before so we could # 2nd - salt-cloud configuration which was loaded before so we could

View file

@ -65,7 +65,7 @@ def worker_fork(self):
class Worker(multiprocessing.Process): class Worker(multiprocessing.Process):
''' '''
Create an ioflo worker in a seperate process Create an ioflo worker in a separate process
''' '''
def __init__(self, opts, windex, worker_verify, access_keys, mkey, aes): def __init__(self, opts, windex, worker_verify, access_keys, mkey, aes):
super(Worker, self).__init__() super(Worker, self).__init__()

View file

@ -281,7 +281,7 @@ def renew_by(name, window=None):
def needs_renewal(name, window=None): def needs_renewal(name, window=None):
''' '''
Check if a certicate needs renewal Check if a certificate needs renewal
:param name: CommonName of cert :param name: CommonName of cert
:param window: Window in days to renew earlier or True/force to just return True :param window: Window in days to renew earlier or True/force to just return True

View file

@ -994,7 +994,7 @@ def unzip(zip_file,
extract_perms : True extract_perms : True
The Python zipfile_ module does not extract file/directory attributes The Python zipfile_ module does not extract file/directory attributes
by default. When this argument is set to ``True``, Salt will attempt to by default. When this argument is set to ``True``, Salt will attempt to
apply the file permision attributes to the extracted files/folders. apply the file permission attributes to the extracted files/folders.
On Windows, only the read-only flag will be extracted as set within the On Windows, only the read-only flag will be extracted as set within the
zip file, other attributes (i.e. user/group permissions) are ignored. zip file, other attributes (i.e. user/group permissions) are ignored.

View file

@ -597,7 +597,7 @@ def set_tags(tags,
a dict of key:value pair of tags to set on the security group a dict of key:value pair of tags to set on the security group
name name
the name of the security gruop the name of the security group
group_id group_id
the group id of the security group (in lie of a name/vpc combo) the group id of the security group (in lie of a name/vpc combo)

View file

@ -143,7 +143,7 @@ def osd_prepare(**kwargs):
Notes: Notes:
cluster_uuid cluster_uuid
Set the deivce to store the osd data on. Set the device to store the osd data on.
journal_dev journal_dev
Set the journal device. defaults to osd_dev. Set the journal device. defaults to osd_dev.
@ -194,7 +194,7 @@ def keyring_create(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -223,7 +223,7 @@ def keyring_save(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -251,7 +251,7 @@ def keyring_purge(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -281,7 +281,7 @@ def keyring_present(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -309,7 +309,7 @@ def keyring_auth_add(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -337,7 +337,7 @@ def keyring_auth_del(**kwargs):
Notes: Notes:
keyring_type keyring_type
Required paramter Required parameter
Can be set to: Can be set to:
admin, mon, osd, rgw, mds admin, mon, osd, rgw, mds
@ -374,7 +374,7 @@ def mon_is(**kwargs):
def mon_status(**kwargs): def mon_status(**kwargs):
''' '''
Get status from mon deamon Get status from mon daemon
CLI Example: CLI Example:
@ -396,7 +396,7 @@ def mon_status(**kwargs):
def mon_quorum(**kwargs): def mon_quorum(**kwargs):
''' '''
Is mon deamon in quorum Is mon daemon in quorum
CLI Example: CLI Example:
@ -418,7 +418,7 @@ def mon_quorum(**kwargs):
def mon_active(**kwargs): def mon_active(**kwargs):
''' '''
Is mon deamon running Is mon daemon running
CLI Example: CLI Example:
@ -518,7 +518,7 @@ def rgw_create(**kwargs):
Notes: Notes:
name: name:
Required paramter Required parameter
Set the rgw client name. Must start with 'rgw.' Set the rgw client name. Must start with 'rgw.'
cluster_uuid cluster_uuid
@ -546,7 +546,7 @@ def rgw_destroy(**kwargs):
Notes: Notes:
name: name:
Required paramter Required parameter
Set the rgw client name. Must start with 'rgw.' Set the rgw client name. Must start with 'rgw.'
cluster_uuid cluster_uuid
@ -576,15 +576,15 @@ def mds_create(**kwargs):
Notes: Notes:
name: name:
Required paramter Required parameter
Set the rgw client name. Must start with 'mds.' Set the rgw client name. Must start with 'mds.'
port: port:
Required paramter Required parameter
Port for the mds to listen to. Port for the mds to listen to.
addr: addr:
Required paramter Required parameter
Address or IP address for the mds to listen to. Address or IP address for the mds to listen to.
cluster_uuid cluster_uuid
@ -612,7 +612,7 @@ def mds_destroy(**kwargs):
Notes: Notes:
name: name:
Required paramter Required parameter
Set the rgw client name. Must start with 'mds.' Set the rgw client name. Must start with 'mds.'
cluster_uuid cluster_uuid

View file

@ -43,7 +43,7 @@ def get_data(datastore, path):
:type datastore: :class:`DatastoreType` (``str`` enum). :type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at, :param path: The device path to set the value at,
a list of element names in order, / seperated a list of element names in order, / separated
:type path: ``list``, ``str`` OR ``tuple`` :type path: ``list``, ``str`` OR ``tuple``
:return: The network configuration at that tree :return: The network configuration at that tree
@ -67,7 +67,7 @@ def set_data_value(datastore, path, data):
:type datastore: :class:`DatastoreType` (``str`` enum). :type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at, :param path: The device path to set the value at,
a list of element names in order, / seperated a list of element names in order, / separated
:type path: ``list``, ``str`` OR ``tuple`` :type path: ``list``, ``str`` OR ``tuple``
:param data: The new value at the given path :param data: The new value at the given path

View file

@ -1581,13 +1581,13 @@ def _write_file_ifaces(iface, data, **settings):
if adapter == iface: if adapter == iface:
saved_ifcfg = tmp saved_ifcfg = tmp
_SEPERATE_FILE = False _SEPARATE_FILE = False
if 'filename' in settings: if 'filename' in settings:
if not settings['filename'].startswith('/'): if not settings['filename'].startswith('/'):
filename = '{0}/{1}'.format(_DEB_NETWORK_DIR, settings['filename']) filename = '{0}/{1}'.format(_DEB_NETWORK_DIR, settings['filename'])
else: else:
filename = settings['filename'] filename = settings['filename']
_SEPERATE_FILE = True _SEPARATE_FILE = True
else: else:
if 'filename' in adapters[adapter]['data']: if 'filename' in adapters[adapter]['data']:
filename = adapters[adapter]['data'] filename = adapters[adapter]['data']
@ -1600,7 +1600,7 @@ def _write_file_ifaces(iface, data, **settings):
log.error(msg) log.error(msg)
raise AttributeError(msg) raise AttributeError(msg)
with salt.utils.flopen(filename, 'w') as fout: with salt.utils.flopen(filename, 'w') as fout:
if _SEPERATE_FILE: if _SEPARATE_FILE:
fout.write(saved_ifcfg) fout.write(saved_ifcfg)
else: else:
fout.write(ifcfg) fout.write(ifcfg)

View file

@ -594,7 +594,7 @@ def get_source_sum(file_name='',
file, used to disambiguate ambiguous matches. file, used to disambiguate ambiguous matches.
saltenv : base saltenv : base
Salt fileserver environment from which to retrive the source_hash. This Salt fileserver environment from which to retrieve the source_hash. This
value will only be used when ``source_hash`` refers to a file on the value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``). Salt fileserver (i.e. one beginning with ``salt://``).
@ -4663,7 +4663,7 @@ def manage_file(name,
.. note:: keep_mode does not work with salt-ssh. .. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transfered to the minion, and As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion cannot mirror the mode on the salt-ssh minion

View file

@ -127,13 +127,14 @@ def get_locale():
salt '*' locale.get_locale salt '*' locale.get_locale
''' '''
cmd = '' cmd = ''
if salt.utils.systemd.booted(__context__): if 'Suse' in __grains__['os_family']:
# this block applies to all SUSE systems - also with systemd
cmd = 'grep "^RC_LANG" /etc/sysconfig/language'
elif salt.utils.systemd.booted(__context__):
params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl() params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl()
return params.get('LANG', '') return params.get('LANG', '')
elif 'RedHat' in __grains__['os_family']: elif 'RedHat' in __grains__['os_family']:
cmd = 'grep "^LANG=" /etc/sysconfig/i18n' cmd = 'grep "^LANG=" /etc/sysconfig/i18n'
elif 'Suse' in __grains__['os_family']:
cmd = 'grep "^RC_LANG" /etc/sysconfig/language'
elif 'Debian' in __grains__['os_family']: elif 'Debian' in __grains__['os_family']:
# this block only applies to Debian without systemd # this block only applies to Debian without systemd
cmd = 'grep "^LANG=" /etc/default/locale' cmd = 'grep "^LANG=" /etc/default/locale'
@ -161,7 +162,17 @@ def set_locale(locale):
salt '*' locale.set_locale 'en_US.UTF-8' salt '*' locale.set_locale 'en_US.UTF-8'
''' '''
if salt.utils.systemd.booted(__context__): if 'Suse' in __grains__['os_family']:
# this block applies to all SUSE systems - also with systemd
if not __salt__['file.file_exists']('/etc/sysconfig/language'):
__salt__['file.touch']('/etc/sysconfig/language')
__salt__['file.replace'](
'/etc/sysconfig/language',
'^RC_LANG=.*',
'RC_LANG="{0}"'.format(locale),
append_if_not_found=True
)
elif salt.utils.systemd.booted(__context__):
return _localectl_set(locale) return _localectl_set(locale)
elif 'RedHat' in __grains__['os_family']: elif 'RedHat' in __grains__['os_family']:
if not __salt__['file.file_exists']('/etc/sysconfig/i18n'): if not __salt__['file.file_exists']('/etc/sysconfig/i18n'):
@ -172,15 +183,6 @@ def set_locale(locale):
'LANG="{0}"'.format(locale), 'LANG="{0}"'.format(locale),
append_if_not_found=True append_if_not_found=True
) )
elif 'Suse' in __grains__['os_family']:
if not __salt__['file.file_exists']('/etc/sysconfig/language'):
__salt__['file.touch']('/etc/sysconfig/language')
__salt__['file.replace'](
'/etc/sysconfig/language',
'^RC_LANG=.*',
'RC_LANG="{0}"'.format(locale),
append_if_not_found=True
)
elif 'Debian' in __grains__['os_family']: elif 'Debian' in __grains__['os_family']:
# this block only applies to Debian without systemd # this block only applies to Debian without systemd
update_locale = salt.utils.which('update-locale') update_locale = salt.utils.which('update-locale')

View file

@ -503,7 +503,7 @@ def cloud_init_interface(name, vm_=None, **kwargs):
# via the legacy salt cloud configuration style. # via the legacy salt cloud configuration style.
# On other cases, we should rely on settings provided by the new # On other cases, we should rely on settings provided by the new
# salt lxc network profile style configuration which can # salt lxc network profile style configuration which can
# be also be overriden or a per interface basis via the nic_opts dict. # be also be overridden or a per interface basis via the nic_opts dict.
if bridge: if bridge:
eth0['link'] = bridge eth0['link'] = bridge
if gateway: if gateway:

View file

@ -577,7 +577,7 @@ def ipaddrs(**kwargs): # pylint: disable=unused-argument
Returns all configured IP addresses on all interfaces as a dictionary of dictionaries.\ Returns all configured IP addresses on all interfaces as a dictionary of dictionaries.\
Keys of the main dictionary represent the name of the interface.\ Keys of the main dictionary represent the name of the interface.\
Values of the main dictionary represent are dictionaries that may consist of two keys\ Values of the main dictionary represent are dictionaries that may consist of two keys\
'ipv4' and 'ipv6' (one, both or none) which are themselvs dictionaries witht the IP addresses as keys.\ 'ipv4' and 'ipv6' (one, both or none) which are themselvs dictionaries with the IP addresses as keys.\
CLI Example: CLI Example:
@ -929,7 +929,7 @@ def load_config(filename=None,
To avoid committing the configuration, set the argument ``test`` to ``True`` and will discard (dry run). To avoid committing the configuration, set the argument ``test`` to ``True`` and will discard (dry run).
To keep the chnages but not commit, set ``commit`` to ``False``. To keep the changes but not commit, set ``commit`` to ``False``.
To replace the config, set ``replace`` to ``True``. To replace the config, set ``replace`` to ``True``.
@ -947,7 +947,7 @@ def load_config(filename=None,
Commit? Default: ``True``. Commit? Default: ``True``.
debug: False debug: False
Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` contaning the raw Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
configuration loaded on the device. configuration loaded on the device.
.. versionadded:: 2016.11.2 .. versionadded:: 2016.11.2
@ -1050,7 +1050,7 @@ def load_template(template_name,
To avoid committing the configuration, set the argument ``test`` to ``True`` To avoid committing the configuration, set the argument ``test`` to ``True``
and will discard (dry run). and will discard (dry run).
To preserve the chnages, set ``commit`` to ``False``. To preserve the changes, set ``commit`` to ``False``.
However, this is recommended to be used only in exceptional cases However, this is recommended to be used only in exceptional cases
when there are applied few consecutive states when there are applied few consecutive states
and/or configuration changes. and/or configuration changes.
@ -1074,7 +1074,7 @@ def load_template(template_name,
Placing the template under ``/etc/salt/states/templates/example.jinja``, Placing the template under ``/etc/salt/states/templates/example.jinja``,
it can be used as ``salt://templates/example.jinja``. it can be used as ``salt://templates/example.jinja``.
Alternatively, for local files, the user can specify the abolute path. Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``. If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples: Examples:
@ -1156,7 +1156,7 @@ def load_template(template_name,
debug: False debug: False
Debug mode. Will insert a new key under the output dictionary, Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` contaning the raw result after the template was rendered. as ``loaded_config`` containing the raw result after the template was rendered.
.. versionadded:: 2016.11.2 .. versionadded:: 2016.11.2
@ -1175,7 +1175,7 @@ def load_template(template_name,
.. note:: .. note::
Do not explicitely specify this argument. Do not explicitly specify this argument.
This represents any other variable that will be sent This represents any other variable that will be sent
to the template rendering system. to the template rendering system.
Please see the examples below! Please see the examples below!
@ -1314,7 +1314,7 @@ def load_template(template_name,
if template_path and not file_exists: if template_path and not file_exists:
template_name = __salt__['file.join'](template_path, template_name) template_name = __salt__['file.join'](template_path, template_name)
if not saltenv: if not saltenv:
# no saltenv overriden # no saltenv overridden
# use the custom template path # use the custom template path
saltenv = template_path if not salt_render else 'base' saltenv = template_path if not salt_render else 'base'
elif salt_render and not saltenv: elif salt_render and not saltenv:
@ -1539,8 +1539,8 @@ def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unu
If differences found, will try to commit. If differences found, will try to commit.
In case commit unsuccessful, will try to rollback. In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/commited/rollbacked on the device.\ :return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not commited properly. And a string that provides more details of the reason why the configuration was not committed properly.
CLI Example: CLI Example:

View file

@ -213,7 +213,7 @@ def set_peers(*peers, **options):
:commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when :commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when
the user does not want to commit after each change, but after a couple. the user does not want to commit after each change, but after a couple.
By default this function will commit the config changes (if any). To load without commiting, use the `commit` By default this function will commit the config changes (if any). To load without committing, use the `commit`
option. For dry run use the `test` argument. option. For dry run use the `test` argument.
CLI Example: CLI Example:
@ -246,7 +246,7 @@ def set_servers(*servers, **options):
:commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when :commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when
the user does not want to commit after each change, but after a couple. the user does not want to commit after each change, but after a couple.
By default this function will commit the config changes (if any). To load without commiting, use the `commit` By default this function will commit the config changes (if any). To load without committing, use the `commit`
option. For dry run use the `test` argument. option. For dry run use the `test` argument.
CLI Example: CLI Example:
@ -279,7 +279,7 @@ def delete_peers(*peers, **options):
:commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when :commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when
the user does not want to commit after each change, but after a couple. the user does not want to commit after each change, but after a couple.
By default this function will commit the config changes (if any). To load without commiting, use the `commit` By default this function will commit the config changes (if any). To load without committing, use the `commit`
option. For dry run use the `test` argument. option. For dry run use the `test` argument.
CLI Example: CLI Example:
@ -312,7 +312,7 @@ def delete_servers(*servers, **options):
:commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when :commit commit (bool): commit loaded config. By default `commit` is True (will commit the changes). Useful when
the user does not want to commit after each change, but after a couple. the user does not want to commit after each change, but after a couple.
By default this function will commit the config changes (if any). To load without commiting, use the `commit` By default this function will commit the config changes (if any). To load without committing, use the `commit`
option. For dry run use the `test` argument. option. For dry run use the `test` argument.
CLI Example: CLI Example:

View file

@ -7,7 +7,7 @@ see the `Parallels Desktop Reference Guide
<http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf>`_. <http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf>`_.
What has not been implemented yet can be accessed through ``parallels.prlctl`` What has not been implemented yet can be accessed through ``parallels.prlctl``
and ``parallels.prlsrvctl`` (note the preceeding double dash ``--`` as and ``parallels.prlsrvctl`` (note the preceding double dash ``--`` as
necessary): necessary):
.. code-block:: .. code-block::

View file

@ -640,7 +640,7 @@ def get_users():
def lsof(name): def lsof(name):
''' '''
Retrieve the lsof informations of the given process name. Retrieve the lsof information of the given process name.
CLI Example: CLI Example:
@ -657,7 +657,7 @@ def lsof(name):
def netstat(name): def netstat(name):
''' '''
Retrieve the netstat informations of the given process name. Retrieve the netstat information of the given process name.
CLI Example: CLI Example:

View file

@ -476,9 +476,9 @@ def set_value(hive,
under the key. If not passed, the key (Default) value will be set. under the key. If not passed, the key (Default) value will be set.
:param object vdata: The value data to be set. :param object vdata: The value data to be set.
What the type of this paramater What the type of this parameter
should be is determined by the value of the vtype should be is determined by the value of the vtype
paramater. The correspondence parameter. The correspondence
is as follows: is as follows:
.. glossary:: .. glossary::
@ -495,15 +495,15 @@ def set_value(hive,
str str
:param str vtype: The value type. :param str vtype: The value type.
The possible values of the vtype paramater are indicated The possible values of the vtype parameter are indicated
above in the description of the vdata paramater. above in the description of the vdata parameter.
:param bool use_32bit_registry: Sets the 32bit portion of the registry on :param bool use_32bit_registry: Sets the 32bit portion of the registry on
64bit installations. On 32bit machines this is ignored. 64bit installations. On 32bit machines this is ignored.
:param bool volatile: When this paramater has a value of True, the registry key will be :param bool volatile: When this parameter has a value of True, the registry key will be
made volatile (i.e. it will not persist beyond a system reset or shutdown). made volatile (i.e. it will not persist beyond a system reset or shutdown).
This paramater only has an effect when a key is being created and at no This parameter only has an effect when a key is being created and at no
other time. other time.
:return: Returns True if successful, False if not :return: Returns True if successful, False if not

View file

@ -647,7 +647,7 @@ def enable(name, start=False, **kwargs):
def disable(name, stop=False, **kwargs): def disable(name, stop=False, **kwargs):
''' '''
Don't start service ``name`` at boot Don't start service ``name`` at boot
Returns ``True`` if operation is successfull Returns ``True`` if operation is successful
name name
the service's name the service's name
@ -686,7 +686,7 @@ def disable(name, stop=False, **kwargs):
def remove(name): def remove(name):
''' '''
Remove the service <name> from system. Remove the service <name> from system.
Returns ``True`` if operation is successfull. Returns ``True`` if operation is successful.
The service will be also stopped. The service will be also stopped.
name name

View file

@ -247,7 +247,7 @@ def call_hook(message,
username=None, username=None,
icon_emoji=None): icon_emoji=None):
''' '''
Send message to Slack incomming webhook. Send message to Slack incoming webhook.
:param message: The topic of message. :param message: The topic of message.
:param attachment: The message to send to the Slacke WebHook. :param attachment: The message to send to the Slacke WebHook.
@ -258,7 +258,7 @@ def call_hook(message,
:param channel: The channel to use instead of the WebHook default. :param channel: The channel to use instead of the WebHook default.
:param username: Username to use instead of WebHook default. :param username: Username to use instead of WebHook default.
:param icon_emoji: Icon to use instead of WebHook default. :param icon_emoji: Icon to use instead of WebHook default.
:return: Boolean if message was sent successfuly. :return: Boolean if message was sent successfully.
CLI Example: CLI Example:

View file

@ -491,7 +491,7 @@ def modify_snapshot(snapshot_id=None,
snapshot = get_snapshot(config=config, number=snapshot_id) snapshot = get_snapshot(config=config, number=snapshot_id)
try: try:
# Updating only the explicitely provided attributes by the user # Updating only the explicitly provided attributes by the user
updated_opts = { updated_opts = {
'description': description if description is not None else snapshot['description'], 'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'], 'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
@ -669,7 +669,7 @@ def undo(config='root', files=None, num_pre=None, num_post=None):
the files into the state of num_pre. the files into the state of num_pre.
.. warning:: .. warning::
If one of the files has changes after num_post, they will be overwriten If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre. version of the files will be overwritten by the versions in num_pre.
@ -790,7 +790,7 @@ def diff(config='root', filename=None, num_pre=None, num_post=None):
if filepath.startswith(SUBVOLUME): if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):] _filepath = filepath[len(SUBVOLUME):]
# Just in case, removing posible double '/' from the final file paths # Just in case, removing possible double '/' from the final file paths
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/") pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/") post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")

View file

@ -56,7 +56,7 @@ def create(name, **params):
raise CommandExecutionError( raise CommandExecutionError(
'request to uptime failed : {0}'.format(req.reason) 'request to uptime failed : {0}'.format(req.reason)
) )
log.debug('[uptime] PUT request successfull') log.debug('[uptime] PUT request successful')
return req.json()['_id'] return req.json()['_id']
@ -83,7 +83,7 @@ def delete(name):
raise CommandExecutionError( raise CommandExecutionError(
'request to uptime failed : {0}'.format(req.reason) 'request to uptime failed : {0}'.format(req.reason)
) )
log.debug('[uptime] DELETE request successfull') log.debug('[uptime] DELETE request successful')
return True return True

View file

@ -272,9 +272,9 @@ def import_cert(name,
return False return False
if password: if password:
cert_props = get_cert_file(name=cached_source_path, password=password) cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format, password=password)
else: else:
cert_props = get_cert_file(name=cached_source_path) cert_props = get_cert_file(name=cached_source_path, cert_format=cert_format)
current_certs = get_certs(context=context, store=store) current_certs = get_certs(context=context, store=store)

View file

@ -881,7 +881,7 @@ def create_crl( # pylint: disable=too-many-arguments,too-many-locals
represents one certificate. A dict must contain either the key represents one certificate. A dict must contain either the key
``serial_number`` with the value of the serial number to revoke, or ``serial_number`` with the value of the serial number to revoke, or
``certificate`` with either the PEM encoded text of the certificate, ``certificate`` with either the PEM encoded text of the certificate,
or a path ot the certificate to revoke. or a path to the certificate to revoke.
The dict can optionally contain the ``revocation_date`` key. If this The dict can optionally contain the ``revocation_date`` key. If this
key is omitted the revocation date will be set to now. If should be a key is omitted the revocation date will be set to now. If should be a

View file

@ -431,7 +431,7 @@ def remove(name=None, pkgs=None, recursive=True, **kwargs):
The name of the package to be deleted. The name of the package to be deleted.
recursive recursive
Also remove dependant packages (not required elsewhere). Also remove dependent packages (not required elsewhere).
Default mode: enabled. Default mode: enabled.
Multiple Package Options: Multiple Package Options:

View file

@ -441,7 +441,7 @@ def user_getmedia(userids=None, **connection_args):
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: List of retreived media, False on failure. :return: List of retrieved media, False on failure.
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash

View file

@ -481,7 +481,6 @@ import json
import os import os
import signal import signal
import tarfile import tarfile
import time
from multiprocessing import Process, Pipe from multiprocessing import Process, Pipe
# Import third-party libs # Import third-party libs
@ -2411,7 +2410,6 @@ class WebsocketEndpoint(object):
logger.error( logger.error(
"Error: Salt event has non UTF-8 data:\n{0}" "Error: Salt event has non UTF-8 data:\n{0}"
.format(data)) .format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe() parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe handler.pipe = parent_pipe

View file

@ -58,7 +58,7 @@ You can also provide a list of config files:
Select config files through grains|pillar|opts matching Select config files through grains|pillar|opts matching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can also opt for a much more flexible configuration: MakoStack allows to You can also opt for a much more flexible configuration: MakoStack allows one to
select the config files for the current minion based on matching values from select the config files for the current minion based on matching values from
either grains, or pillar, or opts objects. either grains, or pillar, or opts objects.

View file

@ -62,7 +62,7 @@ def ping():
decode=True, decode=True,
) )
log.debug( log.debug(
'chronos.info returned succesfully: %s', 'chronos.info returned successfully: %s',
response, response,
) )
if 'dict' in response: if 'dict' in response:

View file

@ -270,7 +270,7 @@ def get_data(datastore, path):
:type datastore: :class:`DatastoreType` (``str`` enum). :type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path, a list of element names in order, :param path: The device path, a list of element names in order,
comma seperated comma separated
:type path: ``list`` of ``str`` OR ``tuple`` :type path: ``list`` of ``str`` OR ``tuple``
:return: The network configuration at that tree :return: The network configuration at that tree
@ -293,7 +293,7 @@ def set_data_value(datastore, path, data):
:type datastore: :class:`DatastoreType` (``str`` enum). :type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at, :param path: The device path to set the value at,
a list of element names in order, comma seperated a list of element names in order, comma separated
:type path: ``list`` of ``str`` OR ``tuple`` :type path: ``list`` of ``str`` OR ``tuple``
:param data: The new value at the given path :param data: The new value at the given path

View file

@ -62,7 +62,7 @@ def ping():
decode=True, decode=True,
) )
log.debug( log.debug(
'marathon.info returned succesfully: %s', 'marathon.info returned successfully: %s',
response, response,
) )
if 'text' in response and response['text'].strip() == 'pong': if 'text' in response and response['text'].strip() == 'pong':

View file

@ -245,7 +245,7 @@ def call(method, *args, **kwargs):
* result (True/False): if the operation succeeded * result (True/False): if the operation succeeded
* out (object): returns the object as-is from the call * out (object): returns the object as-is from the call
* comment (string): provides more details in case the call failed * comment (string): provides more details in case the call failed
* traceback (string): complete traceback in case of exeception. Please submit an issue including this traceback * traceback (string): complete traceback in case of exception. Please submit an issue including this traceback
on the `correct driver repo`_ and make sure to read the FAQ_ on the `correct driver repo`_ and make sure to read the FAQ_
.. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new

View file

@ -77,7 +77,7 @@ def get(key, profile=None):
returns a dict of joined credential_pairs, credentials_metadata returns a returns a dict of joined credential_pairs, credentials_metadata returns a
dict of metadata relevant to the credentials mapped to the confidant dict of metadata relevant to the credentials mapped to the confidant
service, and result returns a bool that can be used to determine if the sdb service, and result returns a bool that can be used to determine if the sdb
call succeded or failed to fetch credentials from confidant (or from local call succeeded or failed to fetch credentials from confidant (or from local
cache). If result is false, the data in credentials or credentials_metadata cache). If result is false, the data in credentials or credentials_metadata
can't be trusted. can't be trusted.
''' '''

View file

@ -116,7 +116,7 @@ def present(name, api_name, swagger_file, stage_name, api_key_required,
The canconicalization of these input parameters is done in the following order: The canconicalization of these input parameters is done in the following order:
1) lambda_funcname_format is formatted with the input parameters as passed, 1) lambda_funcname_format is formatted with the input parameters as passed,
2) resulting string is stripped for leading/trailing spaces, 2) resulting string is stripped for leading/trailing spaces,
3) path paramter's curly braces are removed from the resource path, 3) path parameter's curly braces are removed from the resource path,
4) consecutive spaces and forward slashes in the paths are replaced with '_' 4) consecutive spaces and forward slashes in the paths are replaced with '_'
5) consecutive '_' are replaced with '_' 5) consecutive '_' are replaced with '_'

View file

@ -322,11 +322,11 @@ def _pipeline_present_with_definition(name, expected_pipeline_objects,
pipeline_objects = pipeline_definition.get('pipelineObjects') pipeline_objects = pipeline_definition.get('pipelineObjects')
parameter_objects = pipeline_definition.get('parameterObjects') parameter_objects = pipeline_definition.get('parameterObjects')
paramater_values = pipeline_definition.get('parameterValues') parameter_values = pipeline_definition.get('parameterValues')
present = (_recursive_compare(_cleaned(pipeline_objects), _cleaned(expected_pipeline_objects)) and present = (_recursive_compare(_cleaned(pipeline_objects), _cleaned(expected_pipeline_objects)) and
_recursive_compare(parameter_objects, expected_parameter_objects) and _recursive_compare(parameter_objects, expected_parameter_objects) and
_recursive_compare(paramater_values, expected_parameter_values)) _recursive_compare(parameter_values, expected_parameter_values))
return present, pipeline_definition return present, pipeline_definition

View file

@ -1735,10 +1735,10 @@ def delete_vpc_peering_connection(name, conn_id=None, conn_name=None,
Name of the state Name of the state
conn_id conn_id
ID of the peering connection to delete. Exlusive with conn_name. ID of the peering connection to delete. Exclusive with conn_name.
conn_name conn_name
The name of the peering connection to delete. Exlusive with conn_id. The name of the peering connection to delete. Exclusive with conn_id.
region region
Region to connect to. Region to connect to.

View file

@ -68,14 +68,14 @@ def quorum(name, **kwargs):
- require: - require:
- sesceph: mon_running - sesceph: mon_running
''' '''
paramters = _ordereddict2dict(kwargs) parameters = _ordereddict2dict(kwargs)
if paramters is None: if parameters is None:
return _error(name, "Invalid paramters:%s") return _error(name, "Invalid parameters:%s")
if __opts__['test']: if __opts__['test']:
return _test(name, "cluster quorum") return _test(name, "cluster quorum")
try: try:
cluster_quorum = __salt__['ceph.cluster_quorum'](**paramters) cluster_quorum = __salt__['ceph.cluster_quorum'](**parameters)
except (CommandExecutionError, CommandNotFoundError) as err: except (CommandExecutionError, CommandNotFoundError) as err:
return _error(name, err.strerror) return _error(name, err.strerror)
if cluster_quorum: if cluster_quorum:

View file

@ -25,7 +25,7 @@ def value_present(name, datastore, path, config):
:type datastore: :class:`DatastoreType` (``str`` enum). :type datastore: :class:`DatastoreType` (``str`` enum).
:param path: The device path to set the value at, :param path: The device path to set the value at,
a list of element names in order, / seperated a list of element names in order, / separated
:type path: ``list``, ``str`` OR ``tuple`` :type path: ``list``, ``str`` OR ``tuple``
:param config: The new value at the given path :param config: The new value at the given path

View file

@ -1737,7 +1737,7 @@ def managed(name,
.. note:: keep does not work with salt-ssh. .. note:: keep does not work with salt-ssh.
As a consequence of how the files are transfered to the minion, and As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion cannot mirror the mode on the salt-ssh minion
@ -1968,7 +1968,7 @@ def managed(name,
tmp_ext tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers Suffix for temp file created by ``check_cmd``. Useful for checkers
dependant on config file extension (e.g. the init-checkconf upstart dependent on config file extension (e.g. the init-checkconf upstart
config checker). config checker).
.. code-block:: yaml .. code-block:: yaml

View file

@ -208,6 +208,8 @@ def _diff(old, new):
for key in old_keys: for key in old_keys:
if key == 'id' or key == 'orgId': if key == 'id' or key == 'orgId':
del old[key] del old[key]
elif key not in new.keys():
del old[key]
elif old[key] == new[key]: elif old[key] == new[key]:
del old[key] del old[key]
del new[key] del new[key]

View file

@ -279,7 +279,7 @@ def config_absent(name):
.. note:: .. note::
For certain cases extra lines could be removed based on dependencies. For certain cases extra lines could be removed based on dependencies.
In this example, included after the example for config_present, the In this example, included after the example for config_present, the
ACLs would be removed because they depend on the existance of the ACLs would be removed because they depend on the existence of the
group. group.
''' '''

View file

@ -1233,7 +1233,7 @@ def installed(
``3010`` is the only recognized exit code, ``3010`` is the only recognized exit code,
but this is subject to future refinement. but this is subject to future refinement.
The value of this param The value of this param
defaults to ``True``. This paramater has no effect defaults to ``True``. This parameter has no effect
on non-Windows systems. on non-Windows systems.
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0
@ -1828,7 +1828,7 @@ def latest(
for the remainder of the current boot session. For the time being, for the remainder of the current boot session. For the time being,
``3010`` is the only recognized exit code, but this ``3010`` is the only recognized exit code, but this
is subject to future refinement. The value of this param is subject to future refinement. The value of this param
defaults to ``True``. This paramater has no effect on defaults to ``True``. This parameter has no effect on
non-Windows systems. non-Windows systems.
.. versionadded:: 2016.11.0 .. versionadded:: 2016.11.0

View file

@ -75,7 +75,7 @@ def _default_ret(name):
def _retrieve_rpm_probes(): def _retrieve_rpm_probes():
''' '''
Will retrive the probes from the network device using salt module "probes" throught NAPALM proxy. Will retrieve the probes from the network device using salt module "probes" throught NAPALM proxy.
''' '''
return __salt__['probes.config']() return __salt__['probes.config']()

View file

@ -43,7 +43,7 @@ def __virtual__():
def _get_summary(rsync_out): def _get_summary(rsync_out):
''' '''
Get summary from the rsync successfull output. Get summary from the rsync successful output.
''' '''
return "- " + "\n- ".join([elm for elm in rsync_out.split("\n\n")[-1].replace(" ", "\n").split("\n") if elm]) return "- " + "\n- ".join([elm for elm in rsync_out.split("\n\n")[-1].replace(" ", "\n").split("\n") if elm])
@ -51,7 +51,7 @@ def _get_summary(rsync_out):
def _get_changes(rsync_out): def _get_changes(rsync_out):
''' '''
Get changes from the rsync successfull output. Get changes from the rsync successful output.
''' '''
copied = list() copied = list()
deleted = list() deleted = list()

View file

@ -44,7 +44,7 @@ Multiple policy configuration
Minimum password age: 1 Minimum password age: 1
Minimum password length: 14 Minimum password length: 14
Password must meet complexity requirements: Enabled Password must meet complexity requirements: Enabled
Store passwords using reversible encrytion: Disabled Store passwords using reversible encryption: Disabled
Configure Automatic Updates: Configure Automatic Updates:
Configure automatic updating: 4 - Auto download and schedule the intsall Configure automatic updating: 4 - Auto download and schedule the intsall
Scheduled install day: 7 - Every Saturday Scheduled install day: 7 - Every Saturday

View file

@ -2271,7 +2271,7 @@ def namespaced_function(function, global_dict, defaults=None, preserve_context=F
Redefine (clone) a function under a different globals() namespace scope Redefine (clone) a function under a different globals() namespace scope
preserve_context: preserve_context:
Allow to keep the context taken from orignal namespace, Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from and extend it with globals() taken from
new targetted namespace. new targetted namespace.
''' '''

View file

@ -227,7 +227,7 @@ def run(extension=None, name=None, description=None, salt_dir=None, merge=False,
:param salt_dir: The targeted Salt source directory :param salt_dir: The targeted Salt source directory
:type salt_dir: ``str`` :type salt_dir: ``str``
:param merge: Merge with salt directory, `False` to keep seperate, `True` to merge trees. :param merge: Merge with salt directory, `False` to keep separate, `True` to merge trees.
:type merge: ``bool`` :type merge: ``bool``
:param temp_dir: The directory for generated code, if omitted, system temp will be used :param temp_dir: The directory for generated code, if omitted, system temp will be used

View file

@ -94,7 +94,7 @@ def _generate_minion_id():
class DistinctList(list): class DistinctList(list):
''' '''
List, which allows to append only distinct objects. List, which allows one to append only distinct objects.
Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version.
Override 'filter()' for custom filtering. Override 'filter()' for custom filtering.
''' '''

View file

@ -278,7 +278,7 @@ class ProcessManager(object):
kwargs = {} kwargs = {}
if salt.utils.is_windows(): if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to # Need to ensure that 'log_queue' is correctly transferred to
# processes that inherit from 'MultiprocessingProcess'. # processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and ( if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)): issubclass(tgt, MultiprocessingProcess)):

View file

@ -255,7 +255,7 @@ def _get_jinja_error(trace, context=None):
): ):
add_log = True add_log = True
template_path = error[0] template_path = error[0]
# if we add a log, format explicitly the exeception here # if we add a log, format explicitly the exception here
# by telling to output the macro context after the macro # by telling to output the macro context after the macro
# error log place at the beginning # error log place at the beginning
if add_log: if add_log:

View file

@ -800,8 +800,8 @@ class InstallLib(install_lib):
chmod = [] chmod = []
for idx, inputfile in enumerate(inp): for idx, inputfile in enumerate(inp):
for executeable in executables: for executable in executables:
if inputfile.endswith(executeable): if inputfile.endswith(executable):
chmod.append(idx) chmod.append(idx)
for idx in chmod: for idx in chmod:
filename = out[idx] filename = out[idx]

View file

@ -468,19 +468,19 @@ class XpcomConversionTests(TestCase):
for key in expected_extras: for key in expected_extras:
self.assertIn(key, ret_keys) self.assertIn(key, ret_keys)
def test_extra_nonexistant_attributes(self): def test_extra_nonexistent_attributes(self):
expected_extra_dict = { expected_extra_dict = {
"nonexistant": "" "nonexistent": ""
} }
xpcom = XpcomConversionTests._mock_xpcom_object() xpcom = XpcomConversionTests._mock_xpcom_object()
ret = vb_xpcom_to_attribute_dict(xpcom, extra_attributes=expected_extra_dict.keys()) ret = vb_xpcom_to_attribute_dict(xpcom, extra_attributes=expected_extra_dict.keys())
self.assertDictEqual(ret, expected_extra_dict) self.assertDictEqual(ret, expected_extra_dict)
def test_extra_nonexistant_attribute_with_default(self): def test_extra_nonexistent_attribute_with_default(self):
expected_extras = [("nonexistant", list)] expected_extras = [("nonexistent", list)]
expected_extra_dict = { expected_extra_dict = {
"nonexistant": [] "nonexistent": []
} }
xpcom = XpcomConversionTests._mock_xpcom_object() xpcom = XpcomConversionTests._mock_xpcom_object()

View file

@ -90,7 +90,7 @@ class TestSaltAPIHandler(_SaltnadoIntegrationTestCase):
self.assertEqual(response.headers['Location'], '/login') self.assertEqual(response.headers['Location'], '/login')
# Local client tests # Local client tests
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_simple_local_post(self): def test_simple_local_post(self):
''' '''
Test a basic API of / Test a basic API of /
@ -326,7 +326,7 @@ class TestMinionSaltAPIHandler(_SaltnadoIntegrationTestCase):
for minion_id, grains in six.iteritems(response_obj['return'][0]): for minion_id, grains in six.iteritems(response_obj['return'][0]):
self.assertEqual(minion_id, grains['id']) self.assertEqual(minion_id, grains['id'])
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_get(self): def test_get(self):
response = self.fetch('/minions/minion', response = self.fetch('/minions/minion',
method='GET', method='GET',
@ -410,7 +410,7 @@ class TestJobsSaltAPIHandler(_SaltnadoIntegrationTestCase):
application.event_listener = saltnado.EventListener({}, self.opts) application.event_listener = saltnado.EventListener({}, self.opts)
return application return application
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_get(self): def test_get(self):
# test with no JID # test with no JID
self.http_client.fetch(self.get_url('/jobs'), self.http_client.fetch(self.get_url('/jobs'),
@ -463,7 +463,7 @@ class TestRunSaltAPIHandler(_SaltnadoIntegrationTestCase):
application.event_listener = saltnado.EventListener({}, self.opts) application.event_listener = saltnado.EventListener({}, self.opts)
return application return application
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_get(self): def test_get(self):
low = [{'client': 'local', low = [{'client': 'local',
'tgt': '*', 'tgt': '*',

View file

@ -30,7 +30,7 @@ class ManageTest(ShellCase):
self.assertEqual(ret['return'], {}) self.assertEqual(ret['return'], {})
self.assertEqual(ret['out'], []) self.assertEqual(ret['out'], [])
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_list_jobs(self): def test_list_jobs(self):
''' '''
jobs.list_jobs jobs.list_jobs

View file

@ -140,7 +140,7 @@ class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin
self.assertNotEqual(0, retcode) self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win') @skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@skipIf(True, 'to be reenabled when #23623 is merged') @skipIf(True, 'to be re-enabled when #23623 is merged')
def test_return(self): def test_return(self):
self.run_call('cmd.run "echo returnTOmaster"') self.run_call('cmd.run "echo returnTOmaster"')
jobs = [a for a in self.run_run('jobs.list_jobs')] jobs = [a for a in self.run_run('jobs.list_jobs')]

View file

@ -492,7 +492,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin):
Need to check to ensure the package has been Need to check to ensure the package has been
installed after the pkg_latest_epoch sls installed after the pkg_latest_epoch sls
file has been run. This needs to be broken up into file has been run. This needs to be broken up into
a seperate method so I can add the requires_salt_modules a separate method so I can add the requires_salt_modules
decorator to only the pkg.info_installed command. decorator to only the pkg.info_installed command.
''' '''
# Skip test if package manager not available # Skip test if package manager not available

View file

@ -183,7 +183,7 @@ class TestProgram(six.with_metaclass(TestProgramMeta, object)):
@property @property
def start_pid(self): def start_pid(self):
'''PID of the called script prior to deamonizing.''' '''PID of the called script prior to daemonizing.'''
return self.process.pid if self.process else None return self.process.pid if self.process else None
@property @property

View file

@ -43,17 +43,18 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin):
Test for Get the current system locale Test for Get the current system locale
''' '''
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}): with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.multiple(localemod, with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
_parse_dbus_locale=MagicMock(return_value={'LANG': 'A'}), with patch.multiple(localemod,
HAS_DBUS=True): _parse_dbus_locale=MagicMock(return_value={'LANG': 'A'}),
self.assertEqual('A', localemod.get_locale()) HAS_DBUS=True):
localemod._parse_dbus_locale.assert_called_once_with() self.assertEqual('A', localemod.get_locale())
localemod._parse_dbus_locale.assert_called_once_with()
with patch.multiple(localemod,
_parse_localectl=MagicMock(return_value={'LANG': 'A'}), with patch.multiple(localemod,
HAS_DBUS=False): _parse_localectl=MagicMock(return_value={'LANG': 'A'}),
self.assertEqual('A', localemod.get_locale()) HAS_DBUS=False):
localemod._parse_localectl.assert_called_once_with() self.assertEqual('A', localemod.get_locale())
localemod._parse_localectl.assert_called_once_with()
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}): with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}):
with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}): with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}):
@ -79,8 +80,9 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin):
Test for Sets the current system locale Test for Sets the current system locale
''' '''
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}): with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': True}):
with patch.object(localemod, '_localectl_set', return_value=True): with patch.dict(localemod.__grains__, {'os_family': ['Unknown']}):
self.assertTrue(localemod.set_locale('l')) with patch.object(localemod, '_localectl_set', return_value=True):
self.assertTrue(localemod.set_locale('l'))
with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}): with patch.dict(localemod.__context__, {'salt.utils.systemd.booted': False}):
with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}): with patch.dict(localemod.__grains__, {'os_family': ['Gentoo']}):

View file

@ -27,7 +27,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self): def setup_loader_modules(self):
return {powercfg: {'__grains__': {'osrelease': 8}}} return {powercfg: {'__grains__': {'osrelease': 8}}}
query_ouput = '''Subgroup GUID: 238c9fa8-0aad-41ed-83f4-97be242c8f20 (Hibernate) query_output = '''Subgroup GUID: 238c9fa8-0aad-41ed-83f4-97be242c8f20 (Hibernate)
GUID Alias: SUB_SLEEP GUID Alias: SUB_SLEEP
Power Setting GUID: 29f6c1db-86da-48c5-9fdb-f2b67b1f44da (Hibernate after) Power Setting GUID: 29f6c1db-86da-48c5-9fdb-f2b67b1f44da (Hibernate after)
GUID Alias: HIBERNATEIDLE GUID Alias: HIBERNATEIDLE
@ -43,7 +43,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can set the monitor timeout value Test to make sure we can set the monitor timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
powercfg.set_monitor_timeout(0, "dc") powercfg.set_monitor_timeout(0, "dc")
@ -58,7 +58,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can set the disk timeout value Test to make sure we can set the disk timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
powercfg.set_disk_timeout(0, "dc") powercfg.set_disk_timeout(0, "dc")
@ -73,7 +73,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can set the standby timeout value Test to make sure we can set the standby timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
powercfg.set_standby_timeout(0, "dc") powercfg.set_standby_timeout(0, "dc")
@ -88,7 +88,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can set the hibernate timeout value Test to make sure we can set the hibernate timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
powercfg.set_hibernate_timeout(0, "dc") powercfg.set_hibernate_timeout(0, "dc")
@ -103,7 +103,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the monitor timeout value Test to make sure we can get the monitor timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
ret = powercfg.get_monitor_timeout() ret = powercfg.get_monitor_timeout()
@ -120,7 +120,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the disk timeout value Test to make sure we can get the disk timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
ret = powercfg.get_disk_timeout() ret = powercfg.get_disk_timeout()
@ -137,7 +137,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the standby timeout value Test to make sure we can get the standby timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
ret = powercfg.get_standby_timeout() ret = powercfg.get_standby_timeout()
@ -154,7 +154,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the hibernate timeout value Test to make sure we can get the hibernate timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
ret = powercfg.get_hibernate_timeout() ret = powercfg.get_hibernate_timeout()
@ -171,7 +171,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the hibernate timeout value on windows 7 Test to make sure we can get the hibernate timeout value on windows 7
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_ouput] mock.side_effect = ["Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)", self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
with patch.dict(powercfg.__grains__, {'osrelease': '7'}): with patch.dict(powercfg.__grains__, {'osrelease': '7'}):
@ -189,7 +189,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can set the hibernate timeout value Test to make sure we can set the hibernate timeout value
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = [self.query_ouput] mock.side_effect = [self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
powercfg.set_hibernate_timeout(0, "dc", scheme="SCHEME_MIN") powercfg.set_hibernate_timeout(0, "dc", scheme="SCHEME_MIN")
@ -203,7 +203,7 @@ class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
Test to make sure we can get the hibernate timeout value with a specified scheme Test to make sure we can get the hibernate timeout value with a specified scheme
''' '''
mock = MagicMock() mock = MagicMock()
mock.side_effect = [self.query_ouput] mock.side_effect = [self.query_output]
with patch.dict(powercfg.__salt__, {'cmd.run': mock}): with patch.dict(powercfg.__salt__, {'cmd.run': mock}):
ret = powercfg.get_hibernate_timeout(scheme="SCHEME_MIN") ret = powercfg.get_hibernate_timeout(scheme="SCHEME_MIN")