mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch '2016.3' into 'develop'
Conflicts: - salt/fileclient.py - salt/modules/glusterfs.py
This commit is contained in:
commit
2ee175c6d3
105 changed files with 1166 additions and 546 deletions
25
.github/CONTRIBUTING.md
vendored
25
.github/CONTRIBUTING.md
vendored
|
@ -22,9 +22,10 @@ Please read the following guidelines before you `report an issue`_
|
|||
1. **Use the GitHub issue search** -- check if the issue has
|
||||
already been reported. If it has been, please comment on the existing issue.
|
||||
|
||||
2. **Check if the issue has been fixed** -- the latest `develop`
|
||||
branch may already contain a fix. Please try to reproduce the bug against
|
||||
the latest git head or the latest release.
|
||||
2. **Check if the issue has been fixed** — Various point-release branches, such
|
||||
as ``2015.5``, ``2015.8``, ``2016.3``, or even ``develop``, may already contain
|
||||
a fix. Please try to reproduce the bug against the latest git HEAD or the latest
|
||||
release.
|
||||
|
||||
3. **Isolate the demonstrable problem** -- make sure that the
|
||||
code in the project's repository is *definitely* responsible for the issue.
|
||||
|
@ -32,7 +33,7 @@ Please read the following guidelines before you `report an issue`_
|
|||
4. **Include a reproducible example** -- Provide the steps which
|
||||
led you to the problem.
|
||||
|
||||
Please try to be as detailed as possible in your report too. What is your
|
||||
Please try to be as detailed as possible in your report, too. What is your
|
||||
environment? What steps will reproduce the issue? What Operating System? What
|
||||
would you expect to be the outcome? All these details will help people to
|
||||
assess and fix any potential bugs.
|
||||
|
@ -47,16 +48,16 @@ Features
|
|||
|
||||
Salt is always working to be more powerful. Feature additions and requests are
|
||||
welcomed. When requesting a feature it will be categorized for a release or
|
||||
placed under "Approved for Future Release".
|
||||
placed under the "Feature" label.
|
||||
|
||||
If a new feature is desired, the fastest way to get it into Salt is to
|
||||
contribute the code. Before starting on a new feature an issue should be filed
|
||||
contribute the code. Before starting on a new feature, an issue should be filed
|
||||
for it. The one requesting the feature will be able to then discuss the feature
|
||||
with the Salt team and discover the best way to get the feature into Salt and
|
||||
if the feature makes sense.
|
||||
|
||||
It is extremely common that the desired feature has already been completed,
|
||||
look for it in the docs, ask about it first in IRC, and on the mailing list
|
||||
It is extremely common that the desired feature has already been completed.
|
||||
Look for it in the docs, ask about it first in IRC, and on the mailing list
|
||||
before filing the request. It is also common that the problem which would be
|
||||
solved by the new feature can be easily solved another way, which is a great
|
||||
reason to ask first.
|
||||
|
@ -64,9 +65,13 @@ reason to ask first.
|
|||
Fixing issues
|
||||
=============
|
||||
|
||||
If you wish to help us fixing the issue you're reporting, `Salt's documentation`_ already includes
|
||||
If you wish to help us fix the issue you're reporting, `Salt's documentation`_ already includes
|
||||
information to help you setup a development environment, under `Developing Salt`_.
|
||||
|
||||
`SaltStack's Contributing documentation`_ is also helpful, as it explains sending in pull requests,
|
||||
keeping your salt branches in sync, and knowing `which branch`_ new features or bug fixes should be
|
||||
submitted against.
|
||||
|
||||
Fix the issue you have in hands, if possible also add a test case to Salt's testing suite, create a
|
||||
`pull request`_, and **that's it**!
|
||||
|
||||
|
@ -78,5 +83,7 @@ salt's code.
|
|||
.. _`Salt's documentation`: http://docs.saltstack.com/en/latest/index.html
|
||||
.. _`Developing Salt`: http://docs.saltstack.com/en/latest/topics/development/hacking.html
|
||||
.. _`pull request`: http://docs.saltstack.com/en/latest/topics/development/contributing.html#sending-a-github-pull-request
|
||||
.. _`SaltStack's Contributing documentation`: https://docs.saltstack.com/en/latest/topics/development/contributing.html
|
||||
.. _`which branch`: https://docs.saltstack.com/en/latest/topics/development/contributing.html#which-salt-branch
|
||||
|
||||
.. vim: fenc=utf-8 spell spl=en
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# This file should normally be installed at: /etc/salt/cloud
|
||||
# This file should normally be installed at: /etc/salt/cloud
|
||||
|
||||
|
||||
##########################################
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
|
@ -96,7 +96,7 @@ ubuntu_lucid_ec2:
|
|||
|
||||
ubuntu_precise_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-3ed8fb7b
|
||||
image: ami-3ed8fb7b
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-west-1
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
# Official distro images for Arch, Centos, Debian, Fedora, FreeBSD, Ubuntu
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
|
@ -50,7 +50,7 @@ debian_squeeze_ec2:
|
|||
# https://fedoraproject.org/wiki/Cloud_images
|
||||
fedora_17_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-8e69e5be
|
||||
image: ami-8e69e5be
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-2
|
||||
|
|
12
conf/master
12
conf/master
|
@ -44,7 +44,7 @@
|
|||
|
||||
# Directory to store job and cache data:
|
||||
# This directory may contain sensitive data and should be protected accordingly.
|
||||
#
|
||||
#
|
||||
#cachedir: /var/cache/salt/master
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
|
@ -106,7 +106,7 @@
|
|||
#minion_data_cache: True
|
||||
|
||||
# Store all returns in the given returner.
|
||||
# Setting this option requires that any returner-specific configuration also
|
||||
# Setting this option requires that any returner-specific configuration also
|
||||
# be set. See various returners in salt/returners for details on required
|
||||
# configuration values. (See also, event_return_queue below.)
|
||||
#
|
||||
|
@ -145,12 +145,12 @@
|
|||
# the key rotation event as minions reconnect. Consider this carefully if this
|
||||
# salt master is managing a large number of minions.
|
||||
#
|
||||
# If disabled, it is recommended to handle this event by listening for the
|
||||
# If disabled, it is recommended to handle this event by listening for the
|
||||
# 'aes_key_rotate' event with the 'key' tag and acting appropriately.
|
||||
# ping_on_rotate: False
|
||||
|
||||
# By default, the master deletes its cache of minion data when the key for that
|
||||
# minion is removed. To preserve the cache after key deletion, set
|
||||
# minion is removed. To preserve the cache after key deletion, set
|
||||
# 'preserve_minion_cache' to True.
|
||||
#
|
||||
# WARNING: This may have security implications if compromised minions auth with
|
||||
|
@ -455,7 +455,7 @@
|
|||
# When using multiple environments, each with their own top file, the
|
||||
# default behaviour is an unordered merge. To prevent top files from
|
||||
# being merged together and instead to only use the top file from the
|
||||
# requested environment, set this value to 'same'.
|
||||
# requested environment, set this value to 'same'.
|
||||
#top_file_merging_strategy: merge
|
||||
|
||||
# To specify the order in which environments are merged, set the ordering
|
||||
|
@ -472,7 +472,7 @@
|
|||
# the master server. The default is md5, but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
#
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# caches should be cleared.
|
||||
#hash_type: md5
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ pdf: translations
|
|||
|
||||
cheatsheet: translations
|
||||
@echo "Running cheatsheet/salt.tex file through xelatex..."
|
||||
cd cheatsheet && xelatex salt.tex && cp salt.pdf ../salt-cheatsheet.pdf
|
||||
cd cheatsheet && xelatex salt.tex && cp salt.pdf ../salt-cheatsheet.pdf
|
||||
@echo "./salt-cheatsheet.pdf created."
|
||||
|
||||
text: translations
|
||||
|
|
|
@ -6069,7 +6069,7 @@ a.badge:focus {
|
|||
top: 20%;
|
||||
left: 50%;
|
||||
padding: 15px;
|
||||
width: 35%;
|
||||
width: 35%;
|
||||
}
|
||||
|
||||
.carousel-caption h4,
|
||||
|
|
|
@ -45,7 +45,7 @@ window.Modernizr = (function( window, document, undefined ) {
|
|||
|
||||
slice = classes.slice,
|
||||
|
||||
featureName,
|
||||
featureName,
|
||||
|
||||
|
||||
injectElementWithStyles = function( rule, callback, nodes, testnames ) {
|
||||
|
@ -105,7 +105,7 @@ window.Modernizr = (function( window, document, undefined ) {
|
|||
return bool;
|
||||
|
||||
},
|
||||
|
||||
|
||||
|
||||
isEventSupported = (function() {
|
||||
|
||||
|
@ -152,7 +152,7 @@ window.Modernizr = (function( window, document, undefined ) {
|
|||
};
|
||||
}
|
||||
else {
|
||||
hasOwnProp = function (object, property) {
|
||||
hasOwnProp = function (object, property) {
|
||||
return ((property in object) && is(object.constructor.prototype[property], 'undefined'));
|
||||
};
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ window.Modernizr = (function( window, document, undefined ) {
|
|||
|
||||
}
|
||||
|
||||
return Modernizr;
|
||||
return Modernizr;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -226,4 +226,4 @@
|
|||
<glyph unicode="" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
|
||||
<glyph unicode="" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -11.5t1 -11.5q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
|
||||
</font>
|
||||
</defs></svg>
|
||||
</defs></svg>
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 62 KiB |
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
# This script is used to generate the gource video released with each release of Salt
|
||||
#
|
||||
#
|
||||
|
|
188
doc/man/salt.7
188
doc/man/salt.7
|
@ -40711,7 +40711,7 @@ used if no arguments are required.
|
|||
\fBtgt\fP (\fIstring or list\fP) \-\- Which minions to target for the execution. Default is shell
|
||||
glob. Modified by the \fBexpr_form\fP option.
|
||||
.IP \(bu 2
|
||||
\fBfun\fP (\fIstring or list of strings\fP) \-\-
|
||||
\fBfun\fP (\fIstring or list of strings\fP) \-\-
|
||||
.sp
|
||||
The module and function to call on the specified minions of
|
||||
the form \fBmodule.function\fP\&. For example \fBtest.ping\fP or
|
||||
|
@ -40737,7 +40737,7 @@ executing a compound command.
|
|||
\fBtimeout\fP \-\- Seconds to wait after the last minion returns but
|
||||
before all minions return.
|
||||
.IP \(bu 2
|
||||
\fBexpr_form\fP \-\-
|
||||
\fBexpr_form\fP \-\-
|
||||
.sp
|
||||
The type of \fBtgt\fP\&. Allowed values:
|
||||
.INDENT 2.0
|
||||
|
@ -40770,7 +40770,7 @@ on the minions
|
|||
.IP \(bu 2
|
||||
\fBkwarg\fP \-\- A dictionary with keyword arguments for the function.
|
||||
.IP \(bu 2
|
||||
\fBkwargs\fP \-\-
|
||||
\fBkwargs\fP \-\-
|
||||
.sp
|
||||
Optional keyword arguments.
|
||||
Authentication credentials may be passed when using
|
||||
|
@ -53570,7 +53570,7 @@ salt\-cloud \-a suspend vmname
|
|||
|
||||
# Directory to store job and cache data:
|
||||
# This directory may contain sensitive data and should be protected accordingly.
|
||||
#
|
||||
#
|
||||
#cachedir: /var/cache/salt/master
|
||||
|
||||
# Directory for custom modules. This directory can contain subdirectories for
|
||||
|
@ -53632,7 +53632,7 @@ salt\-cloud \-a suspend vmname
|
|||
#minion_data_cache: True
|
||||
|
||||
# Store all returns in the given returner.
|
||||
# Setting this option requires that any returner\-specific configuration also
|
||||
# Setting this option requires that any returner\-specific configuration also
|
||||
# be set. See various returners in salt/returners for details on required
|
||||
# configuration values. (See also, event_return_queue below.)
|
||||
#
|
||||
|
@ -53671,12 +53671,12 @@ salt\-cloud \-a suspend vmname
|
|||
# the key rotation event as minions reconnect. Consider this carefully if this
|
||||
# salt master is managing a large number of minions.
|
||||
#
|
||||
# If disabled, it is recommended to handle this event by listening for the
|
||||
# If disabled, it is recommended to handle this event by listening for the
|
||||
# \(aqaes_key_rotate\(aq event with the \(aqkey\(aq tag and acting appropriately.
|
||||
# ping_on_rotate: False
|
||||
|
||||
# By default, the master deletes its cache of minion data when the key for that
|
||||
# minion is removed. To preserve the cache after key deletion, set
|
||||
# minion is removed. To preserve the cache after key deletion, set
|
||||
# \(aqpreserve_minion_cache\(aq to True.
|
||||
#
|
||||
# WARNING: This may have security implications if compromised minions auth with
|
||||
|
@ -53981,7 +53981,7 @@ salt\-cloud \-a suspend vmname
|
|||
# When using multiple environments, each with their own top file, the
|
||||
# default behaviour is an unordered merge. To prevent top files from
|
||||
# being merged together and instead to only use the top file from the
|
||||
# requested environment, set this value to \(aqsame\(aq.
|
||||
# requested environment, set this value to \(aqsame\(aq.
|
||||
#top_file_merging_strategy: merge
|
||||
|
||||
# To specify the order in which environments are merged, set the ordering
|
||||
|
@ -53998,7 +53998,7 @@ salt\-cloud \-a suspend vmname
|
|||
# the master server. The default is md5, but sha1, sha224, sha256, sha384
|
||||
# and sha512 are also supported.
|
||||
#
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# Prior to changing this value, the master should be stopped and all Salt
|
||||
# caches should be cleared.
|
||||
#hash_type: md5
|
||||
|
||||
|
@ -93457,7 +93457,7 @@ If no packages specified, all packages will be returned.
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBpackages\fP \-\-
|
||||
\fBpackages\fP \-\-
|
||||
.TP
|
||||
.B Returns
|
||||
|
||||
|
@ -107255,7 +107255,7 @@ The value to append to the grain key
|
|||
If convert is False and the grain contains non\-list contents, an error
|
||||
is given. Defaults to False.
|
||||
.IP \(bu 2
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
The key can be a nested dict key. Use this parameter to
|
||||
specify the delimiter you use, instead of the default \fB:\fP\&.
|
||||
|
@ -107389,7 +107389,7 @@ values for non\-standard package names such as when using a different
|
|||
Python version from the default Python version provided by the OS
|
||||
(e.g., \fBpython26\-mysql\fP instead of \fBpython\-mysql\fP).
|
||||
.IP \(bu 2
|
||||
\fBdefault\fP \-\-
|
||||
\fBdefault\fP \-\-
|
||||
.sp
|
||||
default lookup_dict\(aqs key used if the grain does not exists
|
||||
or if the grain value has no match on lookup_dict. If unspecified
|
||||
|
@ -107399,7 +107399,7 @@ New in version 2014.1.0.
|
|||
|
||||
|
||||
.IP \(bu 2
|
||||
\fBbase\fP \-\-
|
||||
\fBbase\fP \-\-
|
||||
.sp
|
||||
A lookup_dict key to use for a base dictionary. The
|
||||
grain\-selected \fBlookup_dict\fP is merged over this and then finally
|
||||
|
@ -107464,7 +107464,7 @@ pkg:apache
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
Specify an alternate delimiter to use when traversing a nested dict
|
||||
.sp
|
||||
|
@ -107659,7 +107659,7 @@ Remove a value from a list in the grains config file
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
The key can be a nested dict key. Use this parameter to
|
||||
specify the delimiter you use, instead of the default \fB:\fP\&.
|
||||
|
@ -111794,7 +111794,7 @@ Authentication Type that was used for the request.
|
|||
.UNINDENT
|
||||
|
||||
.IP \(bu 2
|
||||
\fBaccess_mode\fP \-\-
|
||||
\fBaccess_mode\fP \-\-
|
||||
.sp
|
||||
Access Mode for IPMI messaging
|
||||
(PEF Alerting is enabled/disabled separately from IPMI messaging)
|
||||
|
@ -128286,7 +128286,7 @@ default: group5 (Optional)
|
|||
.IP \(bu 2
|
||||
\fBprofile\fP \-\- Profile to build on (Optional)
|
||||
.IP \(bu 2
|
||||
\fBkwargs\fP \-\-
|
||||
\fBkwargs\fP \-\-
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B Returns
|
||||
|
@ -130243,7 +130243,7 @@ default: true (Optional)
|
|||
.IP \(bu 2
|
||||
\fBprofile\fP \-\- Profile to build on (Optional)
|
||||
.IP \(bu 2
|
||||
\fBkwargs\fP \-\-
|
||||
\fBkwargs\fP \-\-
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B Returns
|
||||
|
@ -147785,9 +147785,9 @@ If no packages specified, all packages will be returned.
|
|||
.B Parameters
|
||||
.INDENT 7.0
|
||||
.IP \(bu 2
|
||||
\fBpackages\fP \-\-
|
||||
\fBpackages\fP \-\-
|
||||
.IP \(bu 2
|
||||
\fBattr\fP \-\-
|
||||
\fBattr\fP \-\-
|
||||
.sp
|
||||
Comma\-separated package attributes. If no \(aqattr\(aq is specified, all available attributes returned.
|
||||
.INDENT 2.0
|
||||
|
@ -173788,7 +173788,7 @@ Reboot a running system.
|
|||
seconds depends on the value of \fBin_seconds\fP\&.
|
||||
Default is 5 minutes.
|
||||
.IP \(bu 2
|
||||
\fBin_seconds\fP (\fI\%bool\fP) \-\-
|
||||
\fBin_seconds\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
Whether to treat timeout as seconds or minutes.
|
||||
.sp
|
||||
|
@ -174008,7 +174008,7 @@ Shutdown a running system.
|
|||
.IP \(bu 2
|
||||
\fBmessage\fP (\fI\%str\fP) \-\- A message to display to the user before shutting down.
|
||||
.IP \(bu 2
|
||||
\fBtimeout\fP (\fI\%int\fP) \-\-
|
||||
\fBtimeout\fP (\fI\%int\fP) \-\-
|
||||
.sp
|
||||
The length of time that the shutdown dialog box should be displayed, in
|
||||
seconds. While this dialog box is displayed, the shutdown can be stopped
|
||||
|
@ -174028,7 +174028,7 @@ dialog box, and the shutdown cannot be stopped by shutdown_abort.
|
|||
Default is 5 minutes
|
||||
|
||||
.IP \(bu 2
|
||||
\fBin_seconds\fP (\fI\%bool\fP) \-\-
|
||||
\fBin_seconds\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
Whether to treat timeout as seconds or minutes.
|
||||
.sp
|
||||
|
@ -175998,7 +175998,7 @@ New in version 2015.5.6.
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBsam\fP (\fI\%bool\fP) \-\-
|
||||
\fBsam\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
False returns just the username without any domain notation. True
|
||||
returns the domain with the username in the SAM format. Ie:
|
||||
|
@ -176760,7 +176760,7 @@ returned by this function. Run this function first to see what will be
|
|||
installed, then set install=True to install the updates. This will
|
||||
override download=True
|
||||
.IP \(bu 2
|
||||
\fBcategories\fP (\fIlist\fP) \-\-
|
||||
\fBcategories\fP (\fIlist\fP) \-\-
|
||||
.sp
|
||||
Specify the categories to list. Must be passed as a list. All
|
||||
categories returned by default.
|
||||
|
@ -176796,7 +176796,7 @@ Windows Defender
|
|||
.UNINDENT
|
||||
|
||||
.IP \(bu 2
|
||||
\fBseverities\fP (\fIlist\fP) \-\-
|
||||
\fBseverities\fP (\fIlist\fP) \-\-
|
||||
.sp
|
||||
Specify the severities to include. Must be passed as a list. All
|
||||
severities returned by default.
|
||||
|
@ -177078,11 +177078,11 @@ explicitly will overwrite any in the CSR.
|
|||
X509v3 Basic Constraints extension.
|
||||
.TP
|
||||
.B extensions:
|
||||
The following arguments set X509v3 Extension values. If the value starts with
|
||||
The following arguments set X509v3 Extension values. If the value starts with
|
||||
.nf
|
||||
\(ga\(ga
|
||||
.fi
|
||||
critical
|
||||
critical
|
||||
.nf
|
||||
\(ga\(ga
|
||||
.fi
|
||||
|
@ -183519,7 +183519,7 @@ Return the information of the named package(s), installed on the system.
|
|||
.IP \(bu 2
|
||||
\fBnames\fP \-\- Names of the packages to get information about.
|
||||
.IP \(bu 2
|
||||
\fBattr\fP \-\-
|
||||
\fBattr\fP \-\-
|
||||
.sp
|
||||
Comma\-separated package attributes. If no \(aqattr\(aq is specified, all available attributes returned.
|
||||
.INDENT 2.0
|
||||
|
@ -225488,7 +225488,7 @@ if a bare repo is not desired.
|
|||
.TP
|
||||
.B template
|
||||
If a new repository is initialized, this argument will specify an
|
||||
alternate
|
||||
alternate
|
||||
.nf
|
||||
\(gatemplate directory\(ga_
|
||||
.fi
|
||||
|
@ -226262,7 +226262,7 @@ The grain name
|
|||
\fBdestructive\fP \-\- If destructive is True, delete the entire grain. If
|
||||
destructive is False, set the grain\(aqs value to None. Defaults to False.
|
||||
.IP \(bu 2
|
||||
\fBforce\fP \-\-
|
||||
\fBforce\fP \-\-
|
||||
.sp
|
||||
If force is True, the existing grain will be overwritten
|
||||
regardless of its existing or provided value type. Defaults to False
|
||||
|
@ -226271,7 +226271,7 @@ New in version Boron.
|
|||
|
||||
|
||||
.IP \(bu 2
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
A delimiter different from the default can be provided.
|
||||
.sp
|
||||
|
@ -226324,7 +226324,7 @@ The value to append
|
|||
If convert is False and the grain contains non\-list contents, an error
|
||||
is given. Defaults to False.
|
||||
.IP \(bu 2
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
A delimiter different from the default can be provided.
|
||||
.sp
|
||||
|
@ -226364,7 +226364,7 @@ The value to delete from the grain list.
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
A delimiter different from the default \fB:\fP can be provided.
|
||||
.sp
|
||||
|
@ -226423,7 +226423,7 @@ The value is present in the list type grain.
|
|||
.INDENT 7.0
|
||||
.TP
|
||||
.B Parameters
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
A delimiter different from the default \fB:\fP can be provided.
|
||||
.sp
|
||||
|
@ -226482,7 +226482,7 @@ The value to set on the grain
|
|||
.B Parameters
|
||||
.INDENT 7.0
|
||||
.IP \(bu 2
|
||||
\fBforce\fP \-\-
|
||||
\fBforce\fP \-\-
|
||||
.sp
|
||||
If force is True, the existing grain will be overwritten
|
||||
regardless of its existing or provided value type. Defaults to False
|
||||
|
@ -226491,7 +226491,7 @@ New in version Boron.
|
|||
|
||||
|
||||
.IP \(bu 2
|
||||
\fBdelimiter\fP \-\-
|
||||
\fBdelimiter\fP \-\-
|
||||
.sp
|
||||
A delimiter different from the default can be provided.
|
||||
.sp
|
||||
|
@ -233318,7 +233318,7 @@ option can only be used to install packages from a software repository.
|
|||
To install a package file manually, use the "sources" option detailed
|
||||
below.
|
||||
.IP \(bu 2
|
||||
\fBversion\fP (\fI\%str\fP) \-\-
|
||||
\fBversion\fP (\fI\%str\fP) \-\-
|
||||
.sp
|
||||
Install a specific version of a package. This option is ignored if
|
||||
either "pkgs" or "sources" is used. Currently, this option is supported
|
||||
|
@ -233378,7 +233378,7 @@ as version specifiers in pkg states.
|
|||
\fBrefresh\fP (\fI\%bool\fP) \-\- Update the repo database of available packages prior to installing the
|
||||
requested package.
|
||||
.IP \(bu 2
|
||||
\fBfromrepo\fP (\fI\%str\fP) \-\-
|
||||
\fBfromrepo\fP (\fI\%str\fP) \-\-
|
||||
.sp
|
||||
Specify a repository from which to install
|
||||
.sp
|
||||
|
@ -233451,7 +233451,7 @@ release name is the part before the slash, so to install version
|
|||
.IP \(bu 2
|
||||
\fBskip_verify\fP (\fI\%bool\fP) \-\- Skip the GPG verification check for the package to be installed
|
||||
.IP \(bu 2
|
||||
\fBskip_suggestions\fP (\fI\%bool\fP) \-\-
|
||||
\fBskip_suggestions\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
Force strict package naming. Disables lookup of package alternatives.
|
||||
.sp
|
||||
|
@ -233459,7 +233459,7 @@ New in version 2014.1.1.
|
|||
|
||||
|
||||
.IP \(bu 2
|
||||
\fBpkgs\fP (\fIlist\fP) \-\-
|
||||
\fBpkgs\fP (\fIlist\fP) \-\-
|
||||
.sp
|
||||
A list of packages to install from a software repository. All packages
|
||||
listed under \fBpkgs\fP will be installed via a single command.
|
||||
|
@ -233554,7 +233554,7 @@ mypkgs:
|
|||
pkgng)\fP
|
||||
|
||||
.IP \(bu 2
|
||||
\fBsources\fP (\fIlist\fP) \-\-
|
||||
\fBsources\fP (\fIlist\fP) \-\-
|
||||
.sp
|
||||
A list of packages to install, along with the source URI or local path
|
||||
from which to install each package. In the example below, \fBfoo\fP,
|
||||
|
@ -233579,7 +233579,7 @@ mypkgs:
|
|||
.UNINDENT
|
||||
|
||||
.IP \(bu 2
|
||||
\fBallow_updates\fP (\fI\%bool\fP) \-\-
|
||||
\fBallow_updates\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
Allow the package to be updated outside Salt\(aqs control (e.g. auto
|
||||
updates on Windows). This means a package on the Minion can have a
|
||||
|
@ -233610,7 +233610,7 @@ httpd:
|
|||
.UNINDENT
|
||||
|
||||
.IP \(bu 2
|
||||
\fBpkg_verify\fP (\fI\%bool\fP) \-\-
|
||||
\fBpkg_verify\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
For requested packages that are already installed and would not be
|
||||
targeted for upgrade or downgrade, use \fBpkg.verify\fP to determine if
|
||||
|
@ -233665,7 +233665,7 @@ mypkgs:
|
|||
.UNINDENT
|
||||
|
||||
.IP \(bu 2
|
||||
\fBnormalize\fP (\fI\%bool\fP) \-\-
|
||||
\fBnormalize\fP (\fI\%bool\fP) \-\-
|
||||
.sp
|
||||
Normalize the package name by removing the architecture, if the
|
||||
architecture of the package is different from the architecture of the
|
||||
|
@ -255104,11 +255104,11 @@ The \fBfile.serialize\fP state can provide a
|
|||
shorthand for creating some files from data structures. There are also many
|
||||
examples within Salt Formulas of creating one\-off "serializers" (often as Jinja
|
||||
macros) that reformat a data structure to a specific config file format. For
|
||||
example,
|
||||
example,
|
||||
.nf
|
||||
\(gaNginx vhosts\(ga__
|
||||
.fi
|
||||
or the
|
||||
or the
|
||||
.nf
|
||||
\(gaphp.ini\(ga__
|
||||
.fi
|
||||
|
@ -256428,7 +256428,7 @@ Pull Requests:
|
|||
.IP \(bu 2
|
||||
\fI\%#26659\fP: (\fITheBigBear\fP) minor doc edits \- spelling
|
||||
.IP \(bu 2
|
||||
\fI\%#26654\fP: (\fIjfindlay\fP) merge
|
||||
\fI\%#26654\fP: (\fIjfindlay\fP) merge
|
||||
.nf
|
||||
\(ga#26650\(ga_
|
||||
.fi
|
||||
|
@ -256480,7 +256480,7 @@ Pull Requests:
|
|||
.IP \(bu 2
|
||||
\fI\%#26521\fP: (\fIterminalmage\fP) Work around upstream git bug when cloning repo as root
|
||||
.IP \(bu 2
|
||||
\fI\%#26518\fP: (\fIkrak3n\fP) Fix for
|
||||
\fI\%#26518\fP: (\fIkrak3n\fP) Fix for
|
||||
.nf
|
||||
\(ga#25492\(ga_
|
||||
.fi
|
||||
|
@ -256608,7 +256608,7 @@ Pull Requests:
|
|||
.IP \(bu 2
|
||||
\fI\%#26260\fP: (\fInmadhok\fP) Correct spelling of integration in docs
|
||||
.IP \(bu 2
|
||||
\fI\%#26226\fP: (\fIrallytime\fP) Fix
|
||||
\fI\%#26226\fP: (\fIrallytime\fP) Fix
|
||||
.nf
|
||||
\(ga#25463\(ga_
|
||||
.fi
|
||||
|
@ -256864,7 +256864,7 @@ Pull Requests:
|
|||
.IP \(bu 2
|
||||
\fI\%#25451\fP: (\fIs0undt3ch\fP) Salt\-SSH Scan roster bugfixes (And Py3 support)
|
||||
.IP \(bu 2
|
||||
\fI\%#25449\fP: (\fIruzarowski\fP) Exclude dotfiles and directories from minion key lists (Fixes
|
||||
\fI\%#25449\fP: (\fIruzarowski\fP) Exclude dotfiles and directories from minion key lists (Fixes
|
||||
.nf
|
||||
\(ga#25448\(ga_
|
||||
.fi
|
||||
|
@ -256938,7 +256938,7 @@ Pull Requests:
|
|||
.IP \(bu 2
|
||||
\fI\%#25242\fP: (\fIruzarowski\fP) Make SourceDestCheck flag available to network interface definition
|
||||
.IP \(bu 2
|
||||
\fI\%#25226\fP: (\fInmadhok\fP) Backporting fix for issue
|
||||
\fI\%#25226\fP: (\fInmadhok\fP) Backporting fix for issue
|
||||
.nf
|
||||
\(ga#25223\(ga_
|
||||
.fi
|
||||
|
@ -257065,7 +257065,7 @@ in \fBlog_fmt_console\fP in the config file for any of \fBsalt\-master\fP,
|
|||
.SS Git Pillar
|
||||
.sp
|
||||
The git external pillar has been rewritten to bring it up to feature parity
|
||||
with \fBgitfs\fP\&. Support for
|
||||
with \fBgitfs\fP\&. Support for
|
||||
.nf
|
||||
pygit2_
|
||||
.fi
|
||||
|
@ -257683,7 +257683,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27380\fP: (\fIjtand\fP) Skipping Async tests
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27382\fP: (\fIterminalmage\fP) Revert "fixes
|
||||
\fBPR\fP \fI\%#27382\fP: (\fIterminalmage\fP) Revert "fixes
|
||||
.nf
|
||||
\(ga#27217\(ga_
|
||||
.fi
|
||||
|
@ -257910,7 +257910,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28695\fP: (\fIs0undt3ch\fP) [2015.8] Update to latest bootstrap script v2015.11.09
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28656\fP: (\fIclarkperkins\fP)
|
||||
\fBPR\fP \fI\%#28656\fP: (\fIclarkperkins\fP)
|
||||
.nf
|
||||
\(ga#28526\(ga_
|
||||
.fi
|
||||
|
@ -257976,7 +257976,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28564\fP: (\fIs0undt3ch\fP) [2015.8] Update to latest bootstrap script v2015.11.04
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28561\fP: (\fIOro\fP) Issue
|
||||
\fBPR\fP \fI\%#28561\fP: (\fIOro\fP) Issue
|
||||
.nf
|
||||
\(ga#28527\(ga_
|
||||
.fi
|
||||
|
@ -258024,13 +258024,13 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28491\fP: (\fIrallytime\fP) Back\-port \fI\%#28388\fP to 2015.8
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28465\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28465\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#12363\(ga_
|
||||
.fi
|
||||
: Password Expiration in Windows
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28485\fP: (\fInasenbaer13\fP) Fix invalid usage of _get_conn causing
|
||||
\fBPR\fP \fI\%#28485\fP: (\fInasenbaer13\fP) Fix invalid usage of _get_conn causing
|
||||
.nf
|
||||
\(ga#28484\(ga_
|
||||
.fi
|
||||
|
@ -258050,7 +258050,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28422\fP: (\fIcachedout\fP) Handle windows logging on thread_multi [WIP]
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28425\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28425\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#13513\(ga_
|
||||
.fi
|
||||
|
@ -258170,7 +258170,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28098\fP: (\fIjacksontj\fP) For all multi\-part messages, check the headers. If the header is not …
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28134\fP: (\fIbernieke\fP) fix unicode pillar values
|
||||
\fBPR\fP \fI\%#28134\fP: (\fIbernieke\fP) fix unicode pillar values
|
||||
.nf
|
||||
\(ga#3436\(ga_
|
||||
.fi
|
||||
|
@ -258226,7 +258226,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27983\fP: (\fIrallytime\fP) Pip state run result should be False, not None, if installation error occurs.
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27991\fP: (\fItwangboy\fP) Fix for
|
||||
\fBPR\fP \fI\%#27991\fP: (\fItwangboy\fP) Fix for
|
||||
.nf
|
||||
\(ga#20678\(ga_
|
||||
.fi
|
||||
|
@ -258238,7 +258238,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27992\fP: (\fIcachedout\fP) Make load beacon config into list
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28003\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28003\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#26336\(ga_
|
||||
.fi
|
||||
|
@ -258318,7 +258318,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27859\fP: (\fIticosax\fP) [dockerng] Clarify doc port bindings
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27748\fP: (\fImultani\fP) Fix
|
||||
\fBPR\fP \fI\%#27748\fP: (\fImultani\fP) Fix
|
||||
.nf
|
||||
\(ga#8646\(ga_
|
||||
.fi
|
||||
|
@ -258342,7 +258342,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27827\fP: (\fItechhat\fP) Add additional error checking to SPM
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27826\fP: (\fImartinhoefling\fP) Fixes
|
||||
\fBPR\fP \fI\%#27826\fP: (\fImartinhoefling\fP) Fixes
|
||||
.nf
|
||||
\(ga#27825\(ga_
|
||||
.fi
|
||||
|
@ -258382,7 +258382,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27705\fP: (\fIrallytime\fP) Merge \fI\%#27602\fP with final pylint fix
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27691\fP: (\fInotpeter\fP) Faster timeout (3s vs 2min) for instance metadata lookups.
|
||||
\fBPR\fP \fI\%#27691\fP: (\fInotpeter\fP) Faster timeout (3s vs 2min) for instance metadata lookups.
|
||||
.nf
|
||||
\(ga#13850\(ga_
|
||||
.fi
|
||||
|
@ -258469,7 +258469,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#29148\fP: (\fIlomeroe\fP) correcting parameter calls to boto get_zone/create_zone functions in …
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#29108\fP: (\fIlorengordon\fP) Enforce length as an int, fixes
|
||||
\fBPR\fP \fI\%#29108\fP: (\fIlorengordon\fP) Enforce length as an int, fixes
|
||||
.nf
|
||||
\(ga#29107\(ga_
|
||||
.fi
|
||||
|
@ -258523,7 +258523,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28969\fP: (\fIrallytime\fP) Back\-port \fI\%#28825\fP to 2015.8
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28787\fP: (\fIchrigl\fP) closes
|
||||
\fBPR\fP \fI\%#28787\fP: (\fIchrigl\fP) closes
|
||||
.nf
|
||||
\(ga#28784\(ga_
|
||||
.fi
|
||||
|
@ -258557,7 +258557,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28895\fP: (\fIrallytime\fP) Back\-port \fI\%#28823\fP to 2015.8
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28885\fP: (\fIkt97679\fP) fix for: service.enabled fails on xen server
|
||||
\fBPR\fP \fI\%#28885\fP: (\fIkt97679\fP) fix for: service.enabled fails on xen server
|
||||
.nf
|
||||
\(ga#28754\(ga_
|
||||
.fi
|
||||
|
@ -258635,7 +258635,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28695\fP: (\fIs0undt3ch\fP) [2015.8] Update to latest bootstrap script v2015.11.09
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28656\fP: (\fIclarkperkins\fP)
|
||||
\fBPR\fP \fI\%#28656\fP: (\fIclarkperkins\fP)
|
||||
.nf
|
||||
\(ga#28526\(ga_
|
||||
.fi
|
||||
|
@ -258701,7 +258701,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28564\fP: (\fIs0undt3ch\fP) [2015.8] Update to latest bootstrap script v2015.11.04
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28561\fP: (\fIOro\fP) Issue
|
||||
\fBPR\fP \fI\%#28561\fP: (\fIOro\fP) Issue
|
||||
.nf
|
||||
\(ga#28527\(ga_
|
||||
.fi
|
||||
|
@ -258749,13 +258749,13 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28491\fP: (\fIrallytime\fP) Back\-port \fI\%#28388\fP to 2015.8
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28465\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28465\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#12363\(ga_
|
||||
.fi
|
||||
: Password Expiration in Windows
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28485\fP: (\fInasenbaer13\fP) Fix invalid usage of _get_conn causing
|
||||
\fBPR\fP \fI\%#28485\fP: (\fInasenbaer13\fP) Fix invalid usage of _get_conn causing
|
||||
.nf
|
||||
\(ga#28484\(ga_
|
||||
.fi
|
||||
|
@ -258775,7 +258775,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28422\fP: (\fIcachedout\fP) Handle windows logging on thread_multi [WIP]
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28425\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28425\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#13513\(ga_
|
||||
.fi
|
||||
|
@ -258895,7 +258895,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28098\fP: (\fIjacksontj\fP) For all multi\-part messages, check the headers. If the header is not …
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28134\fP: (\fIbernieke\fP) fix unicode pillar values
|
||||
\fBPR\fP \fI\%#28134\fP: (\fIbernieke\fP) fix unicode pillar values
|
||||
.nf
|
||||
\(ga#3436\(ga_
|
||||
.fi
|
||||
|
@ -258951,7 +258951,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27983\fP: (\fIrallytime\fP) Pip state run result should be False, not None, if installation error occurs.
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27991\fP: (\fItwangboy\fP) Fix for
|
||||
\fBPR\fP \fI\%#27991\fP: (\fItwangboy\fP) Fix for
|
||||
.nf
|
||||
\(ga#20678\(ga_
|
||||
.fi
|
||||
|
@ -258963,7 +258963,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27992\fP: (\fIcachedout\fP) Make load beacon config into list
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28003\fP: (\fItwangboy\fP) Fix
|
||||
\fBPR\fP \fI\%#28003\fP: (\fItwangboy\fP) Fix
|
||||
.nf
|
||||
\(ga#26336\(ga_
|
||||
.fi
|
||||
|
@ -259043,7 +259043,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27859\fP: (\fIticosax\fP) [dockerng] Clarify doc port bindings
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27748\fP: (\fImultani\fP) Fix
|
||||
\fBPR\fP \fI\%#27748\fP: (\fImultani\fP) Fix
|
||||
.nf
|
||||
\(ga#8646\(ga_
|
||||
.fi
|
||||
|
@ -259067,7 +259067,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27827\fP: (\fItechhat\fP) Add additional error checking to SPM
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27826\fP: (\fImartinhoefling\fP) Fixes
|
||||
\fBPR\fP \fI\%#27826\fP: (\fImartinhoefling\fP) Fixes
|
||||
.nf
|
||||
\(ga#27825\(ga_
|
||||
.fi
|
||||
|
@ -259107,7 +259107,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27705\fP: (\fIrallytime\fP) Merge \fI\%#27602\fP with final pylint fix
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27691\fP: (\fInotpeter\fP) Faster timeout (3s vs 2min) for instance metadata lookups.
|
||||
\fBPR\fP \fI\%#27691\fP: (\fInotpeter\fP) Faster timeout (3s vs 2min) for instance metadata lookups.
|
||||
.nf
|
||||
\(ga#13850\(ga_
|
||||
.fi
|
||||
|
@ -260306,7 +260306,7 @@ fdb6424 lxc: Fix salt config (no more a kwarg)
|
|||
.IP \(bu 2
|
||||
b818f72 Merge pull request \fI\%#23922\fP from garethgreenaway/23900_2015_5_bonding_interface_fixes
|
||||
.IP \(bu 2
|
||||
0bba536 Fixing issue reported when using bonded interfaces on Ubuntu. Attributes should be bond\-, but the code was attempting to split just on
|
||||
0bba536 Fixing issue reported when using bonded interfaces on Ubuntu. Attributes should be bond\-, but the code was attempting to split just on
|
||||
.nf
|
||||
bond_
|
||||
.fi
|
||||
|
@ -266541,7 +266541,7 @@ Changes:
|
|||
\fBPR\fP \fI\%#26292\fP: (\fIjquast\fP) Rabbitmq 3.2.4 on Ubuntu has "...done.", not "...done"
|
||||
@ \fI2015\-08\-13T19:53:29Z\fP
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#26296\fP: (\fIjquast\fP) bugfix missing
|
||||
\fBPR\fP \fI\%#26296\fP: (\fIjquast\fP) bugfix missing
|
||||
.nf
|
||||
\(ga
|
||||
.fi
|
||||
|
@ -268288,7 +268288,7 @@ Changes:
|
|||
\fBPR\fP \fI\%#26292\fP: (\fIjquast\fP) Rabbitmq 3.2.4 on Ubuntu has "...done.", not "...done"
|
||||
@ \fI2015\-08\-13T19:53:29Z\fP
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#26296\fP: (\fIjquast\fP) bugfix missing
|
||||
\fBPR\fP \fI\%#26296\fP: (\fIjquast\fP) bugfix missing
|
||||
.nf
|
||||
\(ga
|
||||
.fi
|
||||
|
@ -270281,7 +270281,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27468\fP: (\fIcachedout\fP) Fix 27351
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27479\fP: (\fIaboe76\fP) fix locale on opensuse and suse
|
||||
\fBPR\fP \fI\%#27479\fP: (\fIaboe76\fP) fix locale on opensuse and suse
|
||||
.nf
|
||||
\(ga#27438\(ga_
|
||||
.fi
|
||||
|
@ -270301,7 +270301,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27408\fP: (\fIrallytime\fP) Fix avail_locations function for the softlayer_hw driver in 2015.5
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27410\fP: (\fIjacobhammons\fP) Fix css layout Refs
|
||||
\fBPR\fP \fI\%#27410\fP: (\fIjacobhammons\fP) Fix css layout Refs
|
||||
.nf
|
||||
\(ga#27389\(ga_
|
||||
.fi
|
||||
|
@ -270309,7 +270309,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27336\fP: (\fIrallytime\fP) [2015.5] Fixup salt\-cloud logging
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#27358\fP: (\fIlorengordon\fP) Escape search replacement text, fixes
|
||||
\fBPR\fP \fI\%#27358\fP: (\fIlorengordon\fP) Escape search replacement text, fixes
|
||||
.nf
|
||||
\(ga#27356\(ga_
|
||||
.fi
|
||||
|
@ -270469,7 +270469,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#26759\fP: (\fIterminalmage\fP) Backport PR \fI\%#26726\fP to 2015.5 branch
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#26768\fP: (\fIgarethgreenaway\fP) Fixes to ipset in 2015.5 for
|
||||
\fBPR\fP \fI\%#26768\fP: (\fIgarethgreenaway\fP) Fixes to ipset in 2015.5 for
|
||||
.nf
|
||||
\(ga#26628\(ga_
|
||||
.fi
|
||||
|
@ -270584,13 +270584,13 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28829\fP: (\fIbasepi\fP) [2015.5] Merge forward from 2014.7 to 2015.5
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28756\fP: (\fIMrCitron\fP) Fix
|
||||
\fBPR\fP \fI\%#28756\fP: (\fIMrCitron\fP) Fix
|
||||
.nf
|
||||
\(ga#25775\(ga_
|
||||
.fi
|
||||
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28786\fP: (\fIchrigl\fP) closes
|
||||
\fBPR\fP \fI\%#28786\fP: (\fIchrigl\fP) closes
|
||||
.nf
|
||||
\(ga#28783\(ga_
|
||||
.fi
|
||||
|
@ -270654,7 +270654,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28366\fP: (\fIerchn\fP) mark repo not enabled when pkgrepo state passes in disable: True
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28373\fP: (\fIbeverlcl\fP) Fixing bug
|
||||
\fBPR\fP \fI\%#28373\fP: (\fIbeverlcl\fP) Fixing bug
|
||||
.nf
|
||||
\(ga#28372\(ga_
|
||||
.fi
|
||||
|
@ -270684,7 +270684,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28174\fP: (\fIlorengordon\fP) Add support for multiline regex in file.replace
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28175\fP: (\fItwangboy\fP) Fixes
|
||||
\fBPR\fP \fI\%#28175\fP: (\fItwangboy\fP) Fixes
|
||||
.nf
|
||||
\(ga#19673\(ga_
|
||||
.fi
|
||||
|
@ -270839,13 +270839,13 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28829\fP: (\fIbasepi\fP) [2015.5] Merge forward from 2014.7 to 2015.5
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28756\fP: (\fIMrCitron\fP) Fix
|
||||
\fBPR\fP \fI\%#28756\fP: (\fIMrCitron\fP) Fix
|
||||
.nf
|
||||
\(ga#25775\(ga_
|
||||
.fi
|
||||
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28786\fP: (\fIchrigl\fP) closes
|
||||
\fBPR\fP \fI\%#28786\fP: (\fIchrigl\fP) closes
|
||||
.nf
|
||||
\(ga#28783\(ga_
|
||||
.fi
|
||||
|
@ -270909,7 +270909,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28366\fP: (\fIerchn\fP) mark repo not enabled when pkgrepo state passes in disable: True
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28373\fP: (\fIbeverlcl\fP) Fixing bug
|
||||
\fBPR\fP \fI\%#28373\fP: (\fIbeverlcl\fP) Fixing bug
|
||||
.nf
|
||||
\(ga#28372\(ga_
|
||||
.fi
|
||||
|
@ -270939,7 +270939,7 @@ Changes:
|
|||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28174\fP: (\fIlorengordon\fP) Add support for multiline regex in file.replace
|
||||
.IP \(bu 2
|
||||
\fBPR\fP \fI\%#28175\fP: (\fItwangboy\fP) Fixes
|
||||
\fBPR\fP \fI\%#28175\fP: (\fItwangboy\fP) Fixes
|
||||
.nf
|
||||
\(ga#19673\(ga_
|
||||
.fi
|
||||
|
|
|
@ -46,13 +46,13 @@ Writing a Returner
|
|||
==================
|
||||
|
||||
A returner is a Python module containing at minimum a ``returner`` function.
|
||||
Other optional functions can be included to add support for
|
||||
Other optional functions can be included to add support for
|
||||
:ref:`master_job_cache`, :ref:`external_job_cache`, and `Event Returners`_.
|
||||
|
||||
``returner``
|
||||
The ``returner`` function must accept a single argument. The argument
|
||||
contains return data from the called minion function. If the minion
|
||||
function ``test.ping`` is called, the value of the argument will be a
|
||||
function ``test.ping`` is called, the value of the argument will be a
|
||||
dictionary. Run the following command from a Salt master to get a sample
|
||||
of the dictionary:
|
||||
|
||||
|
@ -90,8 +90,8 @@ Salt's :ref:`master_job_cache` allows returners to be used as a pluggable
|
|||
replacement for the :ref:`default_job_cache`. In order to do so, a returner
|
||||
must implement the following functions:
|
||||
|
||||
.. note::
|
||||
|
||||
.. note::
|
||||
|
||||
The code samples contained in this section were taken from the cassandra_cql
|
||||
returner.
|
||||
|
||||
|
@ -128,7 +128,7 @@ must implement the following functions:
|
|||
) VALUES (
|
||||
'{0}', '{1}'
|
||||
);'''.format(jid, json.dumps(load))
|
||||
|
||||
|
||||
# cassandra_cql.cql_query may raise a CommandExecutionError
|
||||
try:
|
||||
__salt__['cassandra_cql.cql_query'](query)
|
||||
|
@ -141,7 +141,7 @@ must implement the following functions:
|
|||
raise
|
||||
|
||||
|
||||
``get_load``
|
||||
``get_load``
|
||||
must accept a job id (jid) and return the job load stored by ``save_load``,
|
||||
or an empty dictionary when not found.
|
||||
|
||||
|
@ -152,9 +152,9 @@ must implement the following functions:
|
|||
Return the load data that marks a specified jid
|
||||
'''
|
||||
query = '''SELECT load FROM salt.jids WHERE jid = '{0}';'''.format(jid)
|
||||
|
||||
|
||||
ret = {}
|
||||
|
||||
|
||||
# cassandra_cql.cql_query may raise a CommandExecutionError
|
||||
try:
|
||||
data = __salt__['cassandra_cql.cql_query'](query)
|
||||
|
@ -169,9 +169,9 @@ must implement the following functions:
|
|||
log.critical('''Unexpected error while getting load from
|
||||
jids: {0}'''.format(str(e)))
|
||||
raise
|
||||
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
||||
External Job Cache Support
|
||||
--------------------------
|
||||
|
@ -270,7 +270,7 @@ contains the jid and therefore is guaranteed to be unique.
|
|||
def event_return(events):
|
||||
'''
|
||||
Return event to mysql server
|
||||
|
||||
|
||||
Requires that configuration be enabled via 'event_return'
|
||||
option in master config.
|
||||
'''
|
||||
|
@ -281,7 +281,7 @@ contains the jid and therefore is guaranteed to be unique.
|
|||
sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id` )
|
||||
VALUES (%s, %s, %s)'''
|
||||
cur.execute(sql, (tag, json.dumps(data), __opts__['id']))
|
||||
|
||||
|
||||
Custom Returners
|
||||
----------------
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ the following syntax is used:
|
|||
- dev: http
|
||||
|
||||
**NOTE**: ``include`` does not simply inject the states where you place it
|
||||
in the SLS file. If you need to guarantee order of execution, consider using
|
||||
in the SLS file. If you need to guarantee order of execution, consider using
|
||||
requisites.
|
||||
|
||||
.. include:: ../../_incl/_incl/sls_filename_cant_contain_period.rst
|
||||
|
|
|
@ -44,9 +44,9 @@ Requisites
|
|||
The Salt requisite system is used to create relationships between states. The
|
||||
core idea being that, when one state is dependent somehow on another, that
|
||||
inter-dependency can be easily defined. These dependencies are expressed by
|
||||
declaring the relationships using state names and ID's or names. The
|
||||
declaring the relationships using state names and ID's or names. The
|
||||
generalized form of a requisite target is ``<state name> : <ID or name>``.
|
||||
The specific form is defined as a :ref:`Requisite Reference
|
||||
The specific form is defined as a :ref:`Requisite Reference
|
||||
<requisite-reference>`
|
||||
|
||||
Requisites come in two types: Direct requisites (such as ``require``),
|
||||
|
|
|
@ -51,7 +51,7 @@ parameters are discussed in more detail below.
|
|||
# with a one second delay betwee retries
|
||||
win_deploy_auth_retries: 10
|
||||
win_deploy_auth_retry_delay: 1
|
||||
|
||||
|
||||
# Set the EC2 access credentials (see below)
|
||||
#
|
||||
id: 'use-instance-role-credentials'
|
||||
|
@ -106,7 +106,7 @@ parameters are discussed in more detail below.
|
|||
# with a one second delay betwee retries
|
||||
win_deploy_auth_retries: 10
|
||||
win_deploy_auth_retry_delay: 1
|
||||
|
||||
|
||||
# Set the EC2 access credentials (see below)
|
||||
#
|
||||
id: 'use-instance-role-credentials'
|
||||
|
@ -982,7 +982,7 @@ the network interfaces of your virtual machines, for example:-
|
|||
SubnetId: subnet-XXXXXXXX
|
||||
SecurityGroupId:
|
||||
- sg-XXXXXXXX
|
||||
|
||||
|
||||
# Uncomment this line if you would like to set an explicit private
|
||||
# IP address for the ec2 instance
|
||||
#
|
||||
|
|
|
@ -112,7 +112,7 @@ then the necessary port to log in, then a successful login that can be used to
|
|||
install Salt. Minion configuration and keys will then be uploaded to a temporary
|
||||
directory on the target by the appropriate function. On a Windows target, the
|
||||
Windows Minion Installer will be run in silent mode. On a Linux/Unix target, a
|
||||
deploy script (bootstrap-salt.sh, by default) will be run, which will
|
||||
deploy script (``bootstrap-salt.sh``, by default) will be run, which will
|
||||
auto-detect the operating system, and install Salt using its native package
|
||||
manager. These do not need to be handled by the developer in the cloud module.
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ executes it.
|
|||
|
||||
Updating Salt Bootstrap
|
||||
=======================
|
||||
Salt Bootstrap can be updated automatically with salt-cloud:
|
||||
Salt Bootstrap can be updated automatically with ``salt-cloud``:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
@ -143,8 +143,22 @@ Salt Bootstrap can be updated automatically with salt-cloud:
|
|||
salt-cloud --update-bootstrap
|
||||
|
||||
|
||||
Bear in mind that this updates to the latest (unstable) version, so use with
|
||||
caution.
|
||||
Bear in mind that this updates to the latest **stable** version from:
|
||||
|
||||
https://bootstrap.saltstack.com/stable/bootstrap-salt.sh
|
||||
|
||||
To update Salt Bootstrap script to the **develop** version, run the following
|
||||
command on the Salt minion host with ``salt-cloud`` installed:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call config.gather_bootstrap_script 'https://bootstrap.saltstack.com/develop/bootstrap-salt.sh'
|
||||
|
||||
Or just download the file manually:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -L 'https://bootstrap.saltstack.com/develop' > /etc/salt/cloud.deploy.d/bootstrap-salt.sh
|
||||
|
||||
|
||||
Keeping /tmp/ Files
|
||||
|
|
|
@ -52,7 +52,7 @@ Google Compute Engine Setup
|
|||
*Service Account* and click the *Create Client ID* button. This will
|
||||
automatically download a ``.json`` file, which may or may not be used
|
||||
in later steps, depending on your version of ``libcloud``.
|
||||
|
||||
|
||||
Look for a new *Service Account* section in the page and record the generated
|
||||
email address for the matching key/fingerprint. The email address will be used
|
||||
in the ``service_account_email_address`` of the ``/etc/salt/cloud.providers``
|
||||
|
@ -80,7 +80,7 @@ Google Compute Engine Setup
|
|||
openssl pkcs12 -in ORIG.p12 -passin pass:notasecret \
|
||||
-nodes -nocerts | openssl rsa -out NEW.pem
|
||||
|
||||
|
||||
|
||||
|
||||
Provider Configuration
|
||||
======================
|
||||
|
|
|
@ -182,7 +182,7 @@ configuration please add:
|
|||
Private Subnets
|
||||
---------------
|
||||
By default salt-cloud will not add Rackspace private networks to new servers. To enable
|
||||
a private network to a server instantiated by salt cloud, add the following section
|
||||
a private network to a server instantiated by salt cloud, add the following section
|
||||
to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
|
||||
|
||||
.. code-block:: yaml
|
||||
|
@ -198,13 +198,13 @@ to the provider file (typically ``/etc/salt/cloud.providers.d/rackspace.conf``)
|
|||
|
||||
To get the Rackspace private network ID, go to Networking, Networks and hover over the private network name.
|
||||
|
||||
The order of the networks in the above code block does not map to the order of the
|
||||
The order of the networks in the above code block does not map to the order of the
|
||||
ethernet devices on newly created servers. Public IP will always be first ( eth0 )
|
||||
followed by servicenet ( eth1 ) and then private networks.
|
||||
|
||||
Enabling the private network per above gives the option of using the private subnet for
|
||||
all master-minion communication, including the bootstrap install of salt-minion. To
|
||||
enable the minion to use the private subnet, update the master: line in the minion:
|
||||
section of the providers file. To configure the master to only listen on the private
|
||||
subnet IP, update the interface: line in the /etc/salt/master file to be the private
|
||||
all master-minion communication, including the bootstrap install of salt-minion. To
|
||||
enable the minion to use the private subnet, update the master: line in the minion:
|
||||
section of the providers file. To configure the master to only listen on the private
|
||||
subnet IP, update the interface: line in the /etc/salt/master file to be the private
|
||||
subnet IP of the salt master.
|
||||
|
|
|
@ -415,9 +415,9 @@ from the Salt Master. For example:
|
|||
{% set some_data = salt.pillar.get('some_data', {'sane default': True}) %}
|
||||
|
||||
{# or #}
|
||||
|
||||
|
||||
{% import_yaml 'path/to/file.yaml' as some_data %}
|
||||
|
||||
|
||||
{# or #}
|
||||
|
||||
{% import_json 'path/to/file.json' as some_data %}
|
||||
|
@ -581,7 +581,7 @@ read it will be hard to maintain -- switch to a format that is easier to read.
|
|||
Using alternate renderers is very simple to do using Salt's "she-bang" syntax
|
||||
at the top of the file. The Python renderer must simply return the correct
|
||||
:ref:`highstate data structure <states-highstate-example>`. The following
|
||||
example is a state tree of two sls files, one simple and one complicated.
|
||||
example is a state tree of two sls files, one simple and one complicated.
|
||||
|
||||
``/srv/salt/top.sls``:
|
||||
|
||||
|
@ -966,7 +966,7 @@ XML.)
|
|||
|
||||
{% import_yaml 'tomcat/defaults.yaml' as server_xml_defaults %}
|
||||
{% set server_xml_final_values = salt.pillar.get(
|
||||
'appX:server_xml_overrides',
|
||||
'appX:server_xml_overrides',
|
||||
default=server_xml_defaults,
|
||||
merge=True)
|
||||
%}
|
||||
|
|
|
@ -31,7 +31,7 @@ In an issue report, please include the following information:
|
|||
with the ``-ldebug`` flag set.
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
Please be certain to scrub any logs or SLS files for sensitive data!
|
||||
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ will not get scheduled.
|
|||
Failover can be combined with PKI-style encrypted keys, but PKI is NOT
|
||||
REQUIRED to use failover.
|
||||
|
||||
Multimaster with PKI and Failover is discussed in
|
||||
Multimaster with PKI and Failover is discussed in
|
||||
:doc:`this tutorial </topics/tutorials/multimaster_pki>`
|
||||
|
||||
``master_type: failover`` can be combined with ``master_shuffle: True``
|
||||
|
|
|
@ -1,8 +1,17 @@
|
|||
.. _installation-debian:
|
||||
|
||||
================
|
||||
Debian GNU/Linux
|
||||
================
|
||||
===========================
|
||||
Debian GNU/Linux / Raspbian
|
||||
===========================
|
||||
|
||||
Debian GNU/Linux distribution and some devariatives such as Raspbian already
|
||||
have included Salt packages to their repositories. However, current stable
|
||||
release codenamed "Jessie" contains old outdated Salt release. It is
|
||||
recommended to use SaltStack repository for Debian as described
|
||||
:ref:`below <installation-debian-repo>`.
|
||||
|
||||
Installation from official Debian and Raspbian repositories is described
|
||||
:ref:`here <installation-debian-raspbian>`.
|
||||
|
||||
.. _installation-debian-repo:
|
||||
|
||||
|
@ -131,13 +140,63 @@ Update the package database
|
|||
|
||||
apt-get update
|
||||
|
||||
Installation from the Debian Main Repository
|
||||
============================================
|
||||
.. _installation-debian-raspbian:
|
||||
|
||||
Installation from the Debian / Raspbian Official Repository
|
||||
===========================================================
|
||||
|
||||
Stretch (Testing) and Sid (Unstable) distributions are already contain mostly
|
||||
up-to-date Salt packages built by Debian Salt Team. You can install Salt
|
||||
components directly from Debian.
|
||||
|
||||
On Jessie (Stable) there is an option to install Salt minion from Stretch with
|
||||
`python-tornado` dependency from `jessie-backports` repositories.
|
||||
|
||||
To install fresh release of Salt minion on Jessie:
|
||||
|
||||
#. Add `jessie-backports` and `stretch` repositories:
|
||||
|
||||
**Debian**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo 'deb http://httpredir.debian.org/debian jessie-backports main' >> /etc/apt/sources.list
|
||||
echo 'deb http://httpredir.debian.org/debian stretch main' >> /etc/apt/sources.list
|
||||
|
||||
**Raspbian**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo 'deb http://archive.raspbian.org/raspbian/ stretch main' >> /etc/apt/sources.list
|
||||
|
||||
#. Make Jessie a default release:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo 'APT::Default-Release "jessie";' > /etc/apt/apt.conf.d/10apt
|
||||
|
||||
#. Install Salt dependencies:
|
||||
|
||||
**Debian**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get update
|
||||
apt-get install python-zmq python-tornado/jessie-backports salt-common/stretch
|
||||
|
||||
**Raspbian**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get update
|
||||
apt-get install python-zmq python-tornado/stretch salt-common/stretch
|
||||
|
||||
#. Install Salt minion package from Stretch:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
apt-get install salt-minion/stretch
|
||||
|
||||
.. _debian-install-pkgs:
|
||||
|
||||
Install Packages
|
||||
|
|
|
@ -17,7 +17,7 @@ FreeBSD repo
|
|||
============
|
||||
|
||||
The FreeBSD pkgng repository is preconfigured on systems 10.x and above. No
|
||||
configuration is needed to pull from these repositories.
|
||||
configuration is needed to pull from these repositories.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
|
@ -36,7 +36,7 @@ following file to your system:
|
|||
|
||||
**/usr/local/etc/pkg/repos/saltstack.conf:**
|
||||
|
||||
.. code-block:: json
|
||||
.. code-block:: json
|
||||
|
||||
saltstack: {
|
||||
url: "https://repo.saltstack.com/freebsd/${ABI}/",
|
||||
|
|
|
@ -13,7 +13,7 @@ mechanisms to send job results to other systems (databases, local syslog,
|
|||
and others):
|
||||
|
||||
* External Job Cache
|
||||
* Master Job Cache
|
||||
* Master Job Cache
|
||||
|
||||
The major difference between these two
|
||||
mechanism is from where results are returned (from the Salt Master or Salt
|
||||
|
@ -62,7 +62,7 @@ Before you configure a job cache, it is essential to understand Salt returner
|
|||
modules ("returners"). Returners are pluggable Salt Modules that take the data
|
||||
returned by jobs, and then perform any necessary steps to send the data to an
|
||||
external system. For example, a returner might establish a connection,
|
||||
authenticate, and then format and transfer data.
|
||||
authenticate, and then format and transfer data.
|
||||
|
||||
The Salt Returner system provides the core functionality used by the External
|
||||
and Master Job Cache systems, and the same returners are used by both systems.
|
||||
|
@ -92,7 +92,7 @@ Step 2: Configure the Returner
|
|||
After you understand the configuration and have the external system ready, add
|
||||
the returner configuration settings to the Salt Minion configuration file for
|
||||
the External Job Cache, or to the Salt Master configuration file for the Master
|
||||
Job Cache.
|
||||
Job Cache.
|
||||
|
||||
For example, MySQL requires:
|
||||
|
||||
|
@ -108,7 +108,7 @@ Slack requires:
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
slack.channel: 'channel'
|
||||
slack.channel: 'channel'
|
||||
slack.api_key: 'key'
|
||||
slack.from_name: 'name'
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ passed, an empty list must be added:
|
|||
Mine Functions Aliases
|
||||
----------------------
|
||||
|
||||
Function aliases can be used to provide friendly names, usage intentions or to allow
|
||||
Function aliases can be used to provide friendly names, usage intentions or to allow
|
||||
multiple calls of the same function with different arguments. There is a different
|
||||
syntax for passing positional and key-value arguments. Mixing positional and
|
||||
key-value arguments is not supported.
|
||||
|
@ -63,7 +63,7 @@ key-value arguments is not supported.
|
|||
ip_list:
|
||||
- mine_function: grains.get
|
||||
- ip_interfaces
|
||||
|
||||
|
||||
|
||||
.. _mine_interval:
|
||||
|
||||
|
|
|
@ -82,9 +82,9 @@ the 'url' key above should say ``url: http://127.0.0.1:8000``
|
|||
salt-proxy --proxyid=p8000 -l debug
|
||||
|
||||
6. Accept your proxy's key on your salt-master
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
||||
salt-key -y -a p8000
|
||||
The following keys are going to be accepted:
|
||||
Unaccepted Keys:
|
||||
|
@ -108,8 +108,8 @@ the 'url' key above should say ``url: http://127.0.0.1:8000``
|
|||
can target proxies via grains if you like.
|
||||
|
||||
10. You can also start and stop the available services (apache, redbull, and
|
||||
postgresql with ``service.start``, etc.
|
||||
|
||||
postgresql with ``service.start``, etc.
|
||||
|
||||
11. States can be written to target the proxy. Feel free to experiment with
|
||||
them.
|
||||
|
||||
|
||||
|
|
|
@ -544,8 +544,8 @@ Ink servers in the master configuration.
|
|||
|
||||
The above will also return the highstate result data using the `smtp_return`
|
||||
returner (use virtualname like when using from the command line with `--return`).
|
||||
The returner needs to be configured on the minion for this to work.
|
||||
See :mod:`salt.returners.smtp_return <salt.returners.smtp_return>` documentation
|
||||
The returner needs to be configured on the minion for this to work.
|
||||
See :mod:`salt.returners.smtp_return <salt.returners.smtp_return>` documentation
|
||||
for that.
|
||||
|
||||
.. _minion-start-reactor:
|
||||
|
|
|
@ -551,7 +551,7 @@ New Salt-Cloud Providers
|
|||
Salt Call Change
|
||||
================
|
||||
|
||||
When used with a returner, salt-call now contacts a master if ``--local``
|
||||
When used with a returner, salt-call now contacts a master if ``--local``
|
||||
is not specicified.
|
||||
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ Changes:
|
|||
works. Fixing it requires either passing through the file twice (the
|
||||
first time only to search for content and set a flag), or rewriting
|
||||
`file.replace` so it doesn't use `fileinput`
|
||||
|
||||
|
||||
- VCS filreserver fixes/optimizations
|
||||
|
||||
- Catch fileserver configuration errors on master start
|
||||
|
@ -137,4 +137,4 @@ Known issues:
|
|||
- In multimaster mode, a minion may become temporarily unresponsive
|
||||
if modules or pillars are refreshed at the same time that one
|
||||
or more masters are down. This can be worked around by setting
|
||||
'auth_timeout' and 'auth_tries' down to shorter periods.
|
||||
'auth_timeout' and 'auth_tries' down to shorter periods.
|
||||
|
|
|
@ -101,7 +101,7 @@ Changes:
|
|||
works. Fixing it requires either passing through the file twice (the
|
||||
first time only to search for content and set a flag), or rewriting
|
||||
`file.replace` so it doesn't use `fileinput`
|
||||
|
||||
|
||||
- VCS filreserver fixes/optimizations
|
||||
|
||||
- Catch fileserver configuration errors on master start
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Salt 2014.7.5 Release Notes
|
||||
===========================
|
||||
|
||||
:release: 2015-04-16
|
||||
:release: 2015-04-16
|
||||
|
||||
Version 2014.7.5 is a bugfix release for :doc:`2014.7.0
|
||||
</topics/releases/2014.7.0>`.
|
||||
|
|
|
@ -704,7 +704,7 @@ Extended Changelog Courtesy of Todd Stansell (https://github.com/tjstansell/salt
|
|||
* 8bb4664 Merge pull request `#23145`_ from rallytime/`bp-23089`_
|
||||
* 93c41af Stringify version number before lstrip
|
||||
|
||||
- **PR** `#23144`_: (*rallytime*) Backport `#23124`_ to 2014.7
|
||||
- **PR** `#23144`_: (*rallytime*) Backport `#23124`_ to 2014.7
|
||||
@ *2015-04-28T20:44:46Z*
|
||||
|
||||
- **ISSUE** `#16188`_: (*drawks*) salt.modules.parted has various functions with bogus input validation.
|
||||
|
|
|
@ -1365,7 +1365,7 @@ Changes:
|
|||
|
||||
- **ISSUE** `#26366`_: (*GreatSnoopy*) The development tree produces hanging, 100%cpu salt-master processes
|
||||
| refs: `#26443`_
|
||||
- **ISSUE** `#26301`_: (*waynew*) CPU pegged out running salt-master (after running command)
|
||||
- **ISSUE** `#26301`_: (*waynew*) CPU pegged out running salt-master (after running command)
|
||||
| refs: `#26443`_
|
||||
- **ISSUE** `#25998`_: (*driskell*) Event subsystem discarding required events during --batch breaking it for slow running commands
|
||||
| refs: `#26000`_
|
||||
|
|
|
@ -196,7 +196,7 @@ Changes:
|
|||
|
||||
- **PR** `#26817`_: (*jfindlay*) modify groupadd for rhel 5
|
||||
|
||||
- **PR** `#26824`_: (*pravka*) [salt-cloud] Fix creating droplet from snapshot in digital_ocean provider
|
||||
- **PR** `#26824`_: (*pravka*) [salt-cloud] Fix creating droplet from snapshot in digital_ocean provider
|
||||
|
||||
- **PR** `#26823`_: (*joejulian*) use dbus instead of localectl
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ Changes:
|
|||
|
||||
- **PR** `#28263`_: (*cachedout*) New channel for event.send
|
||||
|
||||
- **PR** `#28293`_: (*cachedout*) Minor grammar changes
|
||||
- **PR** `#28293`_: (*cachedout*) Minor grammar changes
|
||||
|
||||
- **PR** `#28271`_: (*gwaters*) Update tutorial documentation
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ Changes:
|
|||
|
||||
- **PR** `#28782`_: (*rallytime*) Fixes to rabbitmq user state
|
||||
|
||||
- **PR** `#28789`_: (*nmadhok*) Provide ability to enable/disable customization for newly create VMs using VMware salt-cloud driver
|
||||
- **PR** `#28789`_: (*nmadhok*) Provide ability to enable/disable customization for newly create VMs using VMware salt-cloud driver
|
||||
|
||||
- **PR** `#28768`_: (*mrosedale*) 2015.8
|
||||
|
||||
|
@ -123,7 +123,7 @@ Changes:
|
|||
|
||||
- **PR** `#28602`_: (*nasenbaer13*) Allow setting of custom dimensions in asg alarm specification
|
||||
|
||||
- **PR** `#28596`_: (*rallytime*) Merge branch '2015.5' into '2015.8'
|
||||
- **PR** `#28596`_: (*rallytime*) Merge branch '2015.5' into '2015.8'
|
||||
|
||||
- **PR** `#28593`_: (*blueyed*) doc: fix typo with salt.states.file: s/preseve/preserve/
|
||||
|
||||
|
|
|
@ -77,21 +77,21 @@ nodegroup`` on the line directly following the nodegroup name.
|
|||
Using Nodegroups in SLS files
|
||||
=============================
|
||||
|
||||
To use Nodegroups in Jinja logic for SLS files, the :conf_master:`pillar_opts` option in
|
||||
``/etc/salt/master`` must be set to "True". This will pass the master's configuration as
|
||||
To use Nodegroups in Jinja logic for SLS files, the :conf_master:`pillar_opts` option in
|
||||
``/etc/salt/master`` must be set to "True". This will pass the master's configuration as
|
||||
Pillar data to each minion.
|
||||
|
||||
.. note::
|
||||
|
||||
If the master's configuration contains any sensitive data, this will be passed to each minion.
|
||||
If the master's configuration contains any sensitive data, this will be passed to each minion.
|
||||
Do not enable this option if you have any configuration data that you do not want to get
|
||||
on your minions.
|
||||
|
||||
Also, if you make changes to your nodegroups, you might need to run
|
||||
Also, if you make changes to your nodegroups, you might need to run
|
||||
``salt '*' saltutil.refresh_pillar`` after restarting the master.
|
||||
|
||||
Once pillar_opts is enabled, you can find the nodegroups under the "master" pillar.
|
||||
To make sure that only the correct minions are targeted,
|
||||
Once pillar_opts is enabled, you can find the nodegroups under the "master" pillar.
|
||||
To make sure that only the correct minions are targeted,
|
||||
you should use each matcher for the nodegroup definition.
|
||||
For example, to check if a minion is in the 'webserver' nodegroup:
|
||||
|
||||
|
@ -102,13 +102,13 @@ For example, to check if a minion is in the 'webserver' nodegroup:
|
|||
|
||||
.. code-block:: yaml
|
||||
|
||||
{% if grains.id in salt['pillar.get']('master:nodegroups:webserver', [])
|
||||
{% if grains.id in salt['pillar.get']('master:nodegroups:webserver', [])
|
||||
and grains.os in salt['pillar.get']('master:nodegroups:webserver', []) %}
|
||||
...
|
||||
{% endif %}
|
||||
|
||||
.. note::
|
||||
|
||||
If you do not include all of the matchers used to define a nodegroup,
|
||||
Salt might incorrectly target minions that meet some of the nodegroup
|
||||
If you do not include all of the matchers used to define a nodegroup,
|
||||
Salt might incorrectly target minions that meet some of the nodegroup
|
||||
requirements, but not all of them.
|
||||
|
|
|
@ -150,20 +150,20 @@ and all changes made.
|
|||
|
||||
Note that in the :ref:`example <targeting-minions>` above, the SLS file
|
||||
``webserver.sls`` was referred to simply as ``webserver``. The namespace
|
||||
for SLS files when referenced in :conf_master:`top.sls <state_top>` or an :ref:`include-declaration`
|
||||
for SLS files when referenced in :conf_master:`top.sls <state_top>` or an :ref:`include-declaration`
|
||||
follows a few simple rules:
|
||||
|
||||
1. The ``.sls`` is discarded (i.e. ``webserver.sls`` becomes
|
||||
``webserver``).
|
||||
2. Subdirectories can be used for better organization.
|
||||
a. Each subdirectory can be represented with a dot (following the python
|
||||
import model) or a slash. ``webserver/dev.sls`` can also be referred to
|
||||
a. Each subdirectory can be represented with a dot (following the python
|
||||
import model) or a slash. ``webserver/dev.sls`` can also be referred to
|
||||
as ``webserver.dev``
|
||||
b. Because slashes can be represented as dots, SLS files can not contain
|
||||
dots in the name besides the dot for the SLS suffix. The SLS file
|
||||
webserver_1.0.sls can not be matched, and webserver_1.0 would match
|
||||
dots in the name besides the dot for the SLS suffix. The SLS file
|
||||
webserver_1.0.sls can not be matched, and webserver_1.0 would match
|
||||
the directory/file webserver_1/0.sls
|
||||
|
||||
|
||||
3. A file called ``init.sls`` in a subdirectory is referred to by the path
|
||||
of the directory. So, ``webserver/init.sls`` is referred to as
|
||||
``webserver``.
|
||||
|
|
|
@ -51,13 +51,13 @@ pkgver() {
|
|||
|
||||
#build() {
|
||||
# cd "${srcdir}/${_gitname}"
|
||||
# python2 setup.py build
|
||||
# python2 setup.py build
|
||||
# no need to build setup.py install will do this
|
||||
#}
|
||||
|
||||
package() {
|
||||
cd "${srcdir}/${_gitname}"
|
||||
|
||||
|
||||
python2 setup.py install --root=${pkgdir}/ --optimize=1
|
||||
|
||||
install -Dm644 ${srcdir}/salt/pkg/arch/salt-master.service ${pkgdir}/usr/lib/systemd/system/salt-master.service
|
||||
|
|
|
@ -17,7 +17,7 @@ depends=('python2'
|
|||
'python2-psutil')
|
||||
makedepends=('git')
|
||||
provides=()
|
||||
backup=('etc/salt/master'
|
||||
backup=('etc/salt/master'
|
||||
'etc/salt/minion')
|
||||
options=()
|
||||
srcdir="$PWD/../.."
|
||||
|
@ -26,9 +26,9 @@ package() {
|
|||
cd "$srcdir"
|
||||
|
||||
python2 setup.py install --root=$pkgdir/ --optimize=1
|
||||
|
||||
|
||||
mkdir -p $pkgdir/etc/rc.d/
|
||||
cp $srcdir/pkg/arch/salt-master $pkgdir/etc/rc.d/
|
||||
cp $srcdir/pkg/arch/salt-minion $pkgdir/etc/rc.d/
|
||||
chmod +x $pkgdir/etc/rc.d/*
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ pkgver() {
|
|||
|
||||
#build() {
|
||||
# cd "${srcdir}/${_gitname}"
|
||||
# python2 setup.py build
|
||||
# python2 setup.py build
|
||||
# no need to build setup.py install will do this
|
||||
#}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ md5sums=('e9239a7184ced5d426696735456ee829'
|
|||
package() {
|
||||
cd ${srcdir}/${pkgname}-${pkgver}
|
||||
python2 setup.py install --root=${pkgdir}/ --optimize=1
|
||||
|
||||
|
||||
install -Dm644 ${srcdir}/salt-api.service ${pkgdir}/usr/lib/systemd/system/salt-api.service
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ pkgver() {
|
|||
|
||||
#build() {
|
||||
# cd "${srcdir}/${_gitname}"
|
||||
# python2 setup.py build
|
||||
# python2 setup.py build
|
||||
# no need to build setup.py install will do this
|
||||
#}
|
||||
|
||||
|
@ -51,7 +51,7 @@ package() {
|
|||
cd "${srcdir}/${_gitname}"
|
||||
export USE_SETUPTOOLS=true
|
||||
python2 setup.py install --root=${pkgdir}/ --optimize=1
|
||||
|
||||
|
||||
install -Dm644 ${srcdir}/salt-api/pkg/salt-api.service ${pkgdir}/usr/lib/systemd/system/salt-api.service
|
||||
|
||||
# remove vcs leftovers
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
|
||||
# hack to load functions from salt_common completion
|
||||
complete --do-complete='salt_common --' >/dev/null
|
||||
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ end
|
|||
|
||||
function __fish_salt_save_first_commandline_token_not_matching_args_to
|
||||
if status --is-interactive
|
||||
set -l cli (commandline -pco)
|
||||
set -l cli (commandline -pco)
|
||||
for i in $cli
|
||||
if echo "$i" | grep -Ev (__fish_salt_join '|' $argv)
|
||||
set -g $argv[1] $i
|
||||
|
@ -257,7 +257,7 @@ set -g __fish_salt_format_options --no-color --log-level=quiet
|
|||
|
||||
function __fish_salt_exec
|
||||
set -l program (__fish_salt_program)
|
||||
set -l exe $program $__fish_salt_format_options $__fish_salt_format_options_temp
|
||||
set -l exe $program $__fish_salt_format_options $__fish_salt_format_options_temp
|
||||
if [ $program = salt ]
|
||||
set exe $exe (__fish_salt_minion)
|
||||
end
|
||||
|
@ -350,7 +350,7 @@ function __fish_salt_argspec_function
|
|||
end
|
||||
|
||||
function __fish_salt_argspec_args
|
||||
__fish_salt_lines_between '^\s*args:' '^\s*defaults:' | grep -v ':'
|
||||
__fish_salt_lines_between '^\s*args:' '^\s*defaults:' | grep -v ':'
|
||||
end
|
||||
|
||||
function __fish_salt_list_arg_name
|
||||
|
@ -431,7 +431,7 @@ function __fish_salt_prefix_with_arg_name
|
|||
if [ $arg_name != '_' ]
|
||||
sed "p;s/^/$arg_name=/g"
|
||||
else
|
||||
# leave stdout as is; don't remove this line, because if construction
|
||||
# leave stdout as is; don't remove this line, because if construction
|
||||
# clears stdout if condition fails
|
||||
tee
|
||||
end
|
||||
|
|
|
@ -91,7 +91,7 @@ This port configures a LaunchItem for salt-minion.
|
|||
It also installs LaunchItems for the salt-master and the salt-syndic.
|
||||
|
||||
To start the salt-master via launchd, run
|
||||
|
||||
|
||||
sudo launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.master.plist
|
||||
|
||||
To start the salt-syndic via launchd, run
|
||||
|
|
|
@ -6,7 +6,7 @@ Salt runs well on the Mac, but does have some limitations.
|
|||
|
||||
In this directory you will find scripts and collateral to build an OSX
|
||||
.pkg-style package that uses a custom-built Python. This process has
|
||||
been tested on Mac OSX Lion (10.7) and following.
|
||||
been tested on Mac OSX Lion (10.7) and following.
|
||||
|
||||
In addition, because of changes in launchd from version
|
||||
to version of the OS, a simpler approach is taken for
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# This script can be passed 3 parameters
|
||||
# $1 : <package dir> : the staging area for the package
|
||||
# defaults to /tmp/salt-pkg
|
||||
# $2 : <version> : the version of salt to build
|
||||
# $2 : <version> : the version of salt to build
|
||||
# (a git tag, not a branch)
|
||||
# (defaults to git-repo state)
|
||||
#
|
||||
|
|
|
@ -127,7 +127,7 @@ cp $PKGRESOURCES/scripts/com.saltstack.salt.syndic.plist $PKGDIR/Library/LaunchD
|
|||
cp $PKGRESOURCES/scripts/com.saltstack.salt.api.plist $PKGDIR/Library/LaunchDaemons
|
||||
|
||||
############################################################################
|
||||
# Remove pkg-config files from the distro
|
||||
# Remove pkg-config files from the distro
|
||||
############################################################################
|
||||
|
||||
echo -n -e "\033]0;Build_Pkg: Remove pkg-config files\007"
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
These packages are *optional* dependencies for salt. By default, they are not included in the salt RPMs.
|
||||
Install any of these packages to enable the functionality within salt.
|
||||
|
||||
MySQL-python
|
||||
libvirt-python
|
||||
MySQL-python
|
||||
libvirt-python
|
||||
python-mako
|
||||
pymongo
|
||||
python-redis / redis
|
||||
|
||||
A semi-canonical list of the optional salt modules can be found at
|
||||
A semi-canonical list of the optional salt modules can be found at
|
||||
https://github.com/saltstack/salt/blob/develop/doc/conf.py#L30
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: 345 99 99
|
||||
# chkconfig: 345 99 99
|
||||
# description: This is a daemon that controls the Salt API.
|
||||
#
|
||||
# processname: /usr/bin/salt-api
|
||||
|
@ -40,7 +40,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -61,17 +61,17 @@ start() {
|
|||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif $PYTHON $SALTAPI; then
|
||||
echo -n "OK"
|
||||
RETVAL=0
|
||||
fi
|
||||
else
|
||||
if status $PROCESS &> /dev/null; then
|
||||
if status $PROCESS &> /dev/null; then
|
||||
failure "Already running."
|
||||
RETVAL=1
|
||||
else
|
||||
else
|
||||
daemon --pidfile=$PID_FILE --check $SERVICE $SALTAPI $CONFIG_ARGS
|
||||
RETVAL=$?
|
||||
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$SERVICE
|
||||
|
|
|
@ -28,7 +28,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -56,7 +56,7 @@ start() {
|
|||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif $PYTHON $SALTMASTER -d $MASTER_ARGS >& /dev/null; then
|
||||
echo -n "OK"
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
### BEGIN INIT INFO
|
||||
# Provides: salt-syndic
|
||||
# Required-Start: $all
|
||||
# Required-Stop:
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Salt syndic master-minion passthrough daemon
|
||||
|
@ -19,7 +19,7 @@
|
|||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: - 99 99
|
||||
# chkconfig: - 99 99
|
||||
# description: This is a the Salt syndic daemon that enables Salt master-minion remote control passthrough.
|
||||
#
|
||||
# processname: /usr/bin/salt-syndic
|
||||
|
@ -29,7 +29,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -57,7 +57,7 @@ start() {
|
|||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif $PYTHON $SALTSYNDIC -d $SYNDIC_ARGS >& /dev/null; then
|
||||
echo -n "OK"
|
||||
|
|
|
@ -117,15 +117,15 @@ Requires: systemd-python
|
|||
%endif
|
||||
|
||||
%description
|
||||
Salt is a distributed remote execution system used to execute commands and
|
||||
query data. It was developed in order to bring the best solutions found in
|
||||
the world of remote execution together and make them better, faster and more
|
||||
malleable. Salt accomplishes this via its ability to handle larger loads of
|
||||
information, and not just dozens, but hundreds or even thousands of individual
|
||||
Salt is a distributed remote execution system used to execute commands and
|
||||
query data. It was developed in order to bring the best solutions found in
|
||||
the world of remote execution together and make them better, faster and more
|
||||
malleable. Salt accomplishes this via its ability to handle larger loads of
|
||||
information, and not just dozens, but hundreds or even thousands of individual
|
||||
servers, handle them quickly and through a simple and manageable interface.
|
||||
|
||||
%package master
|
||||
Summary: Management component for salt, a parallel remote execution system
|
||||
Summary: Management component for salt, a parallel remote execution system
|
||||
Group: System Environment/Daemons
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
%if (0%{?rhel} >= 7 || 0%{?fedora} >= 15)
|
||||
|
@ -136,7 +136,7 @@ Requires: systemd-python
|
|||
The Salt master is the central server to which all minions connect.
|
||||
|
||||
%package minion
|
||||
Summary: Client component for Salt, a parallel remote execution system
|
||||
Summary: Client component for Salt, a parallel remote execution system
|
||||
Group: System Environment/Daemons
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
|
||||
|
@ -145,7 +145,7 @@ The Salt minion is the agent component of Salt. It listens for instructions
|
|||
from the master, runs jobs, and returns results back to the master.
|
||||
|
||||
%package syndic
|
||||
Summary: Master-of-master component for Salt, a parallel remote execution system
|
||||
Summary: Master-of-master component for Salt, a parallel remote execution system
|
||||
Group: System Environment/Daemons
|
||||
Requires: %{name} = %{version}-%{release}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
|
||||
_salt_get_grains(){
|
||||
if [ "$1" = 'local' ] ; then
|
||||
if [ "$1" = 'local' ] ; then
|
||||
salt-call --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
|
||||
else
|
||||
salt '*' --timeout 2 --out=txt -- grains.ls | sed 's/^.*\[//' | tr -d ",']" |sed 's:\([a-z0-9]\) :\1\: :g'
|
||||
|
@ -22,15 +22,15 @@ _salt_get_grains(){
|
|||
|
||||
_salt_get_grain_values(){
|
||||
if [ "$1" = 'local' ] ; then
|
||||
salt-call --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
|
||||
salt-call --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
|
||||
else
|
||||
salt '*' --timeout 2 --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
|
||||
salt '*' --timeout 2 --out=txt -- grains.item $1 |sed 's/^\S*:\s//' |grep -v '^\s*$'
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_salt(){
|
||||
local cur prev opts _salt_grains _salt_coms pprev ppprev
|
||||
local cur prev opts _salt_grains _salt_coms pprev ppprev
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
@ -62,7 +62,7 @@ _salt(){
|
|||
return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
esac
|
||||
case "${ppprev}" in
|
||||
-G|--grain|--grain-pcre)
|
||||
if [ "${prev}" = ":" ]; then
|
||||
|
@ -70,17 +70,17 @@ _salt(){
|
|||
return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
esac
|
||||
|
||||
if [ "${cur}" = "=" ] && [[ "${prev}" == --* ]]; then
|
||||
cur=""
|
||||
cur=""
|
||||
fi
|
||||
if [ "${prev}" = "=" ] && [[ "${pprev}" == --* ]]; then
|
||||
prev="${pprev}"
|
||||
fi
|
||||
|
||||
|
||||
case "${prev}" in
|
||||
|
||||
|
||||
-c|--config)
|
||||
COMPREPLY=($(compgen -f -- ${cur}))
|
||||
return 0
|
||||
|
@ -89,12 +89,12 @@ _salt(){
|
|||
COMPREPLY=($(compgen -W "\'*\' ${opts} `salt-key --no-color -l acc`" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-E|--pcre)
|
||||
-E|--pcre)
|
||||
COMPREPLY=($(compgen -W "`salt-key --no-color -l acc`" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-G|--grain|--grain-pcre)
|
||||
COMPREPLY=($(compgen -W "$(_salt_get_grains)" -- ${cur}))
|
||||
COMPREPLY=($(compgen -W "$(_salt_get_grains)" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
-C|--compound)
|
||||
|
@ -109,10 +109,10 @@ _salt(){
|
|||
COMPREPLY=($(compgen -W "1 2 3 4 5 6 7 8 9 10 15 20 30 40 50 60 70 80 90 100 120 150 200"))
|
||||
return 0
|
||||
;;
|
||||
-N|--nodegroup)
|
||||
-N|--nodegroup)
|
||||
MASTER_CONFIG='/etc/salt/master'
|
||||
COMPREPLY=($(compgen -W "`awk -F ':' 'BEGIN {print_line = 0}; /^nodegroups/ {print_line = 1;getline } print_line && /^ */ {print $1} /^[^ ]/ {print_line = 0}' <${MASTER_CONFIG}`" -- ${cur}))
|
||||
return 0
|
||||
COMPREPLY=($(compgen -W "`awk -F ':' 'BEGIN {print_line = 0}; /^nodegroups/ {print_line = 1;getline } print_line && /^ */ {print $1} /^[^ ]/ {print_line = 0}' <${MASTER_CONFIG}`" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@ -133,8 +133,8 @@ _saltkey(){
|
|||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
opts="-c --config-dir= -h --help --version --versions-report -q --quiet \
|
||||
-y --yes --gen-keys= --gen-keys-dir= --keysize= --key-logfile= \
|
||||
-l --list= -L --list-all -a --accept= -A --accept-all \
|
||||
-r --reject= -R --reject-all -p --print= -P --print-all \
|
||||
-l --list= -L --list-all -a --accept= -A --accept-all \
|
||||
-r --reject= -R --reject-all -p --print= -P --print-all \
|
||||
-d --delete= -D --delete-all -f --finger= -F --finger-all \
|
||||
--out=pprint --out=yaml --out=overstatestage --out=json --out=raw \
|
||||
--out=highstate --out=key --out=txt --no-color --out-indent= "
|
||||
|
@ -150,13 +150,13 @@ _saltkey(){
|
|||
fi
|
||||
|
||||
if [ "${cur}" = "=" ] && [[ "${prev}" == --* ]]; then
|
||||
cur=""
|
||||
cur=""
|
||||
fi
|
||||
if [ "${prev}" = "=" ] && [[ "${pprev}" == --* ]]; then
|
||||
prev="${pprev}"
|
||||
fi
|
||||
|
||||
case "${prev}" in
|
||||
case "${prev}" in
|
||||
-a|--accept)
|
||||
COMPREPLY=($(compgen -W "$(salt-key -l un --no-color; salt-key -l rej --no-color)" -- ${cur}))
|
||||
return 0
|
||||
|
@ -177,7 +177,7 @@ _saltkey(){
|
|||
COMPREPLY=($(compgen -W "2048 3072 4096 5120 6144" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
--gen-keys)
|
||||
--gen-keys)
|
||||
return 0
|
||||
;;
|
||||
--gen-keys-dir)
|
||||
|
@ -221,14 +221,14 @@ _saltcall(){
|
|||
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
if [ "${cur}" = "=" ] && [[ ${prev} == --* ]]; then
|
||||
cur=""
|
||||
fi
|
||||
if [ "${prev}" = "=" ] && [[ ${pprev} == --* ]]; then
|
||||
prev="${pprev}"
|
||||
fi
|
||||
|
||||
|
||||
case ${prev} in
|
||||
-m|--module-dirs)
|
||||
COMPREPLY=( $(compgen -d ${cur} ))
|
||||
|
@ -262,7 +262,7 @@ _saltcp(){
|
|||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
opts="-t --timeout= -s --static -b --batch= --batch-size= \
|
||||
-h --help --version --versions-report -c --config-dir= \
|
||||
-E --pcre -L --list -G --grain --grain-pcre -N --nodegroup \
|
||||
-E --pcre -L --list -G --grain --grain-pcre -N --nodegroup \
|
||||
-R --range -C --compound -I --pillar \
|
||||
--out=pprint --out=yaml --out=overstatestage --out=json --out=raw \
|
||||
--out=highstate --out=key --out=txt --no-color --out-indent= "
|
||||
|
@ -270,19 +270,19 @@ _saltcp(){
|
|||
COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
if [ "${cur}" = "=" ] && [[ "${prev}" == --* ]]; then
|
||||
cur=""
|
||||
cur=""
|
||||
fi
|
||||
if [ "${prev}" = "=" ] && [[ "${pprev}" == --* ]]; then
|
||||
prev=${pprev}
|
||||
fi
|
||||
|
||||
|
||||
case ${prev} in
|
||||
salt-cp)
|
||||
COMPREPLY=($(compgen -W "${opts} `salt-key -l acc --no-color`" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
;;
|
||||
-t|--timeout)
|
||||
# those numbers are just a hint
|
||||
COMPREPLY=($(compgen -W "2 3 4 8 10 15 20 25 30 40 60 90 120 180 240 300" -- ${cur} ))
|
||||
|
@ -303,7 +303,7 @@ _saltcp(){
|
|||
return 0
|
||||
;;
|
||||
-G|--grain|--grain-pcre)
|
||||
COMPREPLY=($(compgen -W "$(_salt_get_grains)" -- ${cur}))
|
||||
COMPREPLY=($(compgen -W "$(_salt_get_grains)" -- ${cur}))
|
||||
return 0
|
||||
;;
|
||||
# FIXME
|
||||
|
@ -320,7 +320,7 @@ _saltcp(){
|
|||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# default is using opts:
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
}
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
# firewall rules to systems with UFW. Activate with 'ufw allow salt'
|
||||
[Salt]
|
||||
title=salt
|
||||
description=fast and powerful configuration management and remote execution
|
||||
description=fast and powerful configuration management and remote execution
|
||||
ports=4505,4506/tcp
|
||||
|
|
|
@ -21,7 +21,7 @@ if test -z "$pyver"; then
|
|||
elif test -f /etc/arch-release; then
|
||||
python=python2
|
||||
fi
|
||||
|
||||
|
||||
if test -z "$pyver"; then
|
||||
test -z "$python" && python=python
|
||||
pyver=`/usr/bin/env $python -V 2>&1 | cut -f2 -d' ' | cut -f1,2 -d.`
|
||||
|
|
|
@ -28,4 +28,5 @@ Running the salt-master daemon as a root user is considers by some a security ri
|
|||
running as root, enables the pam external auth system, as this system needs root access to check authentication.
|
||||
|
||||
For more information:
|
||||
http://docs.saltstack.com/en/latest/ref/configuration/nonroot.html
|
||||
http://docs.saltstack.com/en/latest/ref/configuration/nonroot.html
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -58,7 +58,7 @@ start() {
|
|||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif $PYTHON $SALTMASTER -d $MASTER_ARGS >& /dev/null; then
|
||||
echo -n "OK"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
# chkconfig header
|
||||
|
||||
# chkconfig: - 99 99
|
||||
# chkconfig: - 99 99
|
||||
# description: This is a the Salt syndic daemon that enables Salt master-minion remote control passthrough.
|
||||
#
|
||||
# processname: /usr/bin/salt-syndic
|
||||
|
@ -31,7 +31,7 @@ DEBIAN_VERSION=/etc/debian_version
|
|||
SUSE_RELEASE=/etc/SuSE-release
|
||||
# Source function library.
|
||||
if [ -f $DEBIAN_VERSION ]; then
|
||||
break
|
||||
break
|
||||
elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
|
||||
. /etc/rc.status
|
||||
else
|
||||
|
@ -59,7 +59,7 @@ start() {
|
|||
rc_status -v
|
||||
elif [ -e $DEBIAN_VERSION ]; then
|
||||
if [ -f $LOCKFILE ]; then
|
||||
echo -n "already started, lock file found"
|
||||
echo -n "already started, lock file found"
|
||||
RETVAL=1
|
||||
elif $PYTHON $SALTSYNDIC -d $SYNDIC_ARGS >& /dev/null; then
|
||||
echo -n "OK"
|
||||
|
|
|
@ -251,9 +251,9 @@ Thu Jun 4 19:46:19 UTC 2015 - aboe76@gmail.com
|
|||
Sat May 23 18:31:44 UTC 2015 - aboe76@gmail.com
|
||||
|
||||
- New Bugfix release 2015.5.1
|
||||
salt.runners.cloud.action() has changed the fun keyword argument to func.
|
||||
salt.runners.cloud.action() has changed the fun keyword argument to func.
|
||||
Please update any calls to this function in the cloud runner.
|
||||
|
||||
|
||||
for more details:
|
||||
http://docs.saltstack.com/en/latest/topics/releases/2015.5.1.html
|
||||
|
||||
|
@ -341,8 +341,8 @@ Changes:
|
|||
+ Fixed argument passing to the reactor
|
||||
+ Fixed glibc caching to prevent bug where salt-minion getaddrinfo in dns_check() never got updated nameservers
|
||||
Known Issues:
|
||||
+ In multimaster mode, a minion may become temporarily unresponsive if modules or pillars are refreshed at the
|
||||
same time that one or more masters are down. This can be worked around by setting 'auth_timeout' and 'auth_tries'
|
||||
+ In multimaster mode, a minion may become temporarily unresponsive if modules or pillars are refreshed at the
|
||||
same time that one or more masters are down. This can be worked around by setting 'auth_timeout' and 'auth_tries'
|
||||
down to shorter periods.
|
||||
|
||||
-------------------------------------------------------------------
|
||||
|
@ -436,11 +436,11 @@ Thu Feb 12 19:35:34 UTC 2015 - aboe76@gmail.com
|
|||
- keep sle 11 sp3 support.
|
||||
+ Fix erroneous warnings for systemd service enabled check (issue 19606)
|
||||
+ Fix FreeBSD kernel module loading, listing, and persistence kmod (issue 197151, issue 19682)
|
||||
+ Allow case-sensitive npm package names in the npm state. This may break behavior
|
||||
for people expecting the state to lowercase their npm package names for them.
|
||||
+ Allow case-sensitive npm package names in the npm state. This may break behavior
|
||||
for people expecting the state to lowercase their npm package names for them.
|
||||
The npm module was never affected by mandatory lowercasing. (issue 20329)
|
||||
+ Deprecate the activate parameter for pip.install for both the module and the state.
|
||||
If bin_env is given and points to a virtualenv, there is no need to activate that virtualenv
|
||||
+ Deprecate the activate parameter for pip.install for both the module and the state.
|
||||
If bin_env is given and points to a virtualenv, there is no need to activate that virtualenv
|
||||
in a shell for pip to install to the virtualenv.
|
||||
+ Fix a file-locking bug in gitfs (issue 18839)
|
||||
|
||||
|
@ -510,7 +510,7 @@ Mon Nov 3 21:35:31 UTC 2014 - aboe76@gmail.com
|
|||
+ New hgfs Features
|
||||
+ mountpoints support
|
||||
+ New svnfs Features:
|
||||
+ mountpoints
|
||||
+ mountpoints
|
||||
+ minionfs Featuressupport
|
||||
+ mountpoints
|
||||
- New Salt Modules:
|
||||
|
@ -544,7 +544,7 @@ Thu Oct 16 19:26:57 UTC 2014 - aboe76@gmail.com
|
|||
- Updated to 2014.1.13 a bugfix release on 2014.1.12
|
||||
+ fix module run exit code (issue 16420)
|
||||
+ salt cloud Check the exit status code of scp before assuming it has failed. (issue 16599)
|
||||
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Oct 10 18:47:07 UTC 2014 - aboe76@gmail.com
|
||||
|
@ -565,7 +565,7 @@ Wed Sep 10 18:10:50 UTC 2014 - aboe76@gmail.com
|
|||
+ Fix json outputter null case
|
||||
+ Fix for minion error if one of multiple masters are down (issue 14099)
|
||||
+ Updated the use-forking-daemon.patch with the right version
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon Aug 18 13:06:07 UTC 2014 - tserong@suse.com
|
||||
|
||||
|
@ -639,7 +639,7 @@ Sat Jul 19 07:58:18 UTC 2014 - aboe76@gmail.com
|
|||
Thu Jul 10 18:25:05 UTC 2014 - aboe76@gmail.com
|
||||
|
||||
- Update to 2014.7
|
||||
This release was a hotfix release for the regression listed above which was present in the 2014.1.6
|
||||
This release was a hotfix release for the regression listed above which was present in the 2014.1.6
|
||||
- Fix batch mode regression (issue 14046)
|
||||
|
||||
-------------------------------------------------------------------
|
||||
|
@ -759,11 +759,11 @@ Thu Apr 17 18:06:56 UTC 2014 - aboe76@gmail.com
|
|||
- Fix git.latest with test=True (issue 11595)
|
||||
- Fix file.check_perms hardcoded follow_symlinks (issue 11387)
|
||||
- Fix certain pkg states for RHEL5/Cent5 machines (issue 11719)
|
||||
- Packaging:
|
||||
- Packaging:
|
||||
- python-psutil depencies (more functional modules out of the box)
|
||||
- python-yaml depencies (more functional modules out of the box)
|
||||
- python-requests depencies (salt-cloud)
|
||||
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Wed Mar 19 19:29:13 UTC 2014 - aboe76@gmail.com
|
||||
|
@ -925,7 +925,7 @@ Thu Nov 14 22:05:06 UTC 2013 - aboe76@gmail.com
|
|||
- Fix for matching minions under syndics (issue 7671)
|
||||
- Improve exception handling for missing ID (issue 8259)
|
||||
- Add configuration option for minion_id_caching
|
||||
- Fix open mode auth errors (issue 8402)
|
||||
- Fix open mode auth errors (issue 8402)
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Sun Nov 10 07:52:54 UTC 2013 - aboe76@gmail.com
|
||||
|
@ -1044,30 +1044,30 @@ Thu Sep 19 17:18:06 UTC 2013 - aboe76@gmail.com
|
|||
Minor features:
|
||||
- 0.17.0 release wil be last release for 0.XX.X numbering system
|
||||
Next release will be <Year>.<Month>.<Minor>
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Sat Sep 7 22:44:41 UTC 2013 - aboe76@gmail.com
|
||||
|
||||
- Update 0.16.4 bugfix release:
|
||||
- Multiple documentation improvements/additions
|
||||
- Added the osfinger and osarch grains
|
||||
- Fix bug in :mod:`hg.latest <salt.states.hg.latest>` state
|
||||
- Fix bug in :mod:`hg.latest <salt.states.hg.latest>` state
|
||||
that would erroneously delete directories (:issue:`6661`)
|
||||
- Fix bug related to pid not existing for
|
||||
- Fix bug related to pid not existing for
|
||||
:mod:`ps.top <salt.modules.ps.top>` (:issue:`6679`)
|
||||
- Fix regression in :mod:`MySQL returner <salt.returners.mysql>`
|
||||
- Fix regression in :mod:`MySQL returner <salt.returners.mysql>`
|
||||
(:issue:`6695`)
|
||||
- Fix IP addresses grains (ipv4 and ipv6) to include all addresses
|
||||
(:issue:`6656`)
|
||||
- Fix regression preventing authenticated FTP (:issue:`6733`)
|
||||
- Fix :mod:`file.contains <salt.modules.file.contains>` on values
|
||||
- Fix :mod:`file.contains <salt.modules.file.contains>` on values
|
||||
YAML parses as non-string (:issue:`6817`)
|
||||
- Fix :mod:`file.get_gid <salt.modules.file.get_gid>`,
|
||||
:mod:`file.get_uid <salt.modules.file.get_uid>`, and
|
||||
:mod:`file.chown <salt.modules.file.chown>` for broken symlinks
|
||||
- Fix :mod:`file.get_gid <salt.modules.file.get_gid>`,
|
||||
:mod:`file.get_uid <salt.modules.file.get_uid>`, and
|
||||
:mod:`file.chown <salt.modules.file.chown>` for broken symlinks
|
||||
(:issue:`6826`)
|
||||
- Fix comment for service reloads in service state (:issue:`6851`)
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Aug 9 18:08:12 UTC 2013 - aboe76@gmail.com
|
||||
|
||||
|
@ -1130,7 +1130,7 @@ Mon Jul 29 18:06:03 UTC 2013 - aboe76@gmail.com
|
|||
- http authentication issues fixed #6356
|
||||
- warning of deprecation runas in favor of user
|
||||
- more information at https://github.com/saltstack/salt/commits/v0.16.1
|
||||
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Fri Jul 5 21:24:21 UTC 2013 - aboe76@gmail.com
|
||||
|
||||
|
@ -1195,7 +1195,7 @@ Wed May 29 16:10:42 UTC 2013 - aboe76@gmail.com
|
|||
xinetd service name not appended
|
||||
virt-module uses qemu-img
|
||||
publish.publish returns same info as salt-master
|
||||
updated gitfs module
|
||||
updated gitfs module
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Mon May 27 20:42:06 UTC 2013 - aboe76@gmail.com
|
||||
|
@ -1295,7 +1295,7 @@ Wed Mar 13 22:04:43 UTC 2013 - aboe76@gmail.com
|
|||
-------------------------------------------------------------------
|
||||
Mon Feb 25 17:52:59 UTC 2013 - aboe76@gmail.com
|
||||
|
||||
- Updated spec file, postun removal of init.d files
|
||||
- Updated spec file, postun removal of init.d files
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Sat Feb 16 09:25:30 UTC 2013 - aboe76@gmail.com
|
||||
|
|
|
@ -91,7 +91,7 @@ pki_dir: /conf/pki/minion
|
|||
# seconds, between those reconnection attempts.
|
||||
#acceptance_wait_time: 10
|
||||
|
||||
# If this is set, the time between reconnection attempts will increase by
|
||||
# If this is set, the time between reconnection attempts will increase by
|
||||
# acceptance_wait_time seconds per iteration, up to this maximum. If this
|
||||
# is not set, the time between reconnection attempts will stay constant.
|
||||
#acceptance_wait_time_max: None
|
||||
|
@ -192,11 +192,11 @@ pki_dir: /conf/pki/minion
|
|||
# Run states when the minion daemon starts. To enable, set startup_states to:
|
||||
# 'highstate' -- Execute state.highstate
|
||||
# 'sls' -- Read in the sls_list option and execute the named sls files
|
||||
# 'top' -- Read top_file option and execute based on that file on the Master
|
||||
# 'top' -- Read top_file option and execute based on that file on the Master
|
||||
#startup_states: ''
|
||||
#
|
||||
# list of states to run when the minion starts up if startup_states is 'sls'
|
||||
#sls_list:
|
||||
#sls_list:
|
||||
# - edit.vim
|
||||
# - hyper
|
||||
#
|
||||
|
@ -377,12 +377,12 @@ pki_dir: /conf/pki/minion
|
|||
# without informing either party that their connection has been taken away.
|
||||
# Enabling TCP Keepalives prevents this from happening.
|
||||
#
|
||||
# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
|
||||
# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
|
||||
# or leave to the OS defaults (-1), on Linux, typically disabled. Default True, enabled.
|
||||
#tcp_keepalive: True
|
||||
#
|
||||
# How long before the first keepalive should be sent in seconds. Default 300
|
||||
# to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds
|
||||
# to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds
|
||||
# on Linux see /proc/sys/net/ipv4/tcp_keepalive_time.
|
||||
#tcp_keepalive_idle: 300
|
||||
#
|
||||
|
@ -390,8 +390,8 @@ pki_dir: /conf/pki/minion
|
|||
# to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes.
|
||||
#tcp_keepalive_cnt: -1
|
||||
#
|
||||
# How often, in seconds, to send keepalives after the first one. Default -1 to
|
||||
# use OS defaults, typically 75 seconds on Linux, see
|
||||
# How often, in seconds, to send keepalives after the first one. Default -1 to
|
||||
# use OS defaults, typically 75 seconds on Linux, see
|
||||
# /proc/sys/net/ipv4/tcp_keepalive_intvl.
|
||||
#tcp_keepalive_intvl: -1
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Salt - Remote execution system
|
||||
Salt - Remote execution system
|
||||
|
||||
Copyright 2011 Thomas S Hatch
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@ modification, Salt can be fine-tuned to meet specific needs.
|
|||
Granular Controls
|
||||
=================
|
||||
|
||||
Salt also introduces powerful granular controls to the realm of remote execution.
|
||||
By default, commands are executed in parallel. However, using more advanced
|
||||
options, commands can be executed in batch groups or even in serial. By using
|
||||
simple built-in filters or regular expression matching, systems can be targeted by
|
||||
Salt also introduces powerful granular controls to the realm of remote execution.
|
||||
By default, commands are executed in parallel. However, using more advanced
|
||||
options, commands can be executed in batch groups or even in serial. By using
|
||||
simple built-in filters or regular expression matching, systems can be targeted by
|
||||
hostname, metadata or system properties such as number of cpus or OS type.
|
||||
|
||||
Building on Proven Technology
|
||||
|
|
|
@ -225,7 +225,7 @@ Section "MainSection" SEC01
|
|||
SetOverwrite off
|
||||
CreateDirectory $INSTDIR\conf\pki\minion
|
||||
File /r "..\buildenv\"
|
||||
Exec 'icacls c:\salt /inheritance:r /grant:r "BUILTIN\Administrators":(OI)(CI)F /grant:r "NT AUTHORITY\SYSTEM":(OI)(CI)F'
|
||||
Exec 'icacls c:\salt /inheritance:r /grant:r "*S-1-5-32-544":(OI)(CI)F /grant:r "*S-1-5-18":(OI)(CI)F'
|
||||
|
||||
SectionEnd
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ def salt_refs(data, ret=None):
|
|||
return ret
|
||||
|
||||
|
||||
def prep_trans_tar(file_client, chunks, file_refs, pillar=None):
|
||||
def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None):
|
||||
'''
|
||||
Generate the execution package from the saltenv file refs and a low state
|
||||
data structure
|
||||
|
@ -145,6 +145,7 @@ def prep_trans_tar(file_client, chunks, file_refs, pillar=None):
|
|||
if pillar:
|
||||
with salt.utils.fopen(pillarfn, 'w+') as fp_:
|
||||
fp_.write(json.dumps(pillar._dict()))
|
||||
cachedir = os.path.join('salt-ssh', id_)
|
||||
for saltenv in file_refs:
|
||||
file_refs[saltenv].extend(sync_refs)
|
||||
env_root = os.path.join(gendir, saltenv)
|
||||
|
@ -153,7 +154,7 @@ def prep_trans_tar(file_client, chunks, file_refs, pillar=None):
|
|||
for ref in file_refs[saltenv]:
|
||||
for name in ref:
|
||||
short = salt.utils.url.parse(name)[0]
|
||||
path = file_client.cache_file(name, saltenv)
|
||||
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
|
||||
if path:
|
||||
tgt = os.path.join(env_root, short)
|
||||
tgt_dir = os.path.dirname(tgt)
|
||||
|
@ -161,7 +162,7 @@ def prep_trans_tar(file_client, chunks, file_refs, pillar=None):
|
|||
os.makedirs(tgt_dir)
|
||||
shutil.copy(path, tgt)
|
||||
continue
|
||||
files = file_client.cache_dir(name, saltenv)
|
||||
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
|
||||
if files:
|
||||
for filename in files:
|
||||
fn = filename[filename.find(short) + len(short):]
|
||||
|
|
|
@ -7,6 +7,7 @@ from __future__ import absolute_import
|
|||
# Import salt libs
|
||||
import salt.client.ssh
|
||||
import logging
|
||||
import os
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -33,7 +34,10 @@ def get_file(path,
|
|||
if template is not None:
|
||||
(path, dest) = _render_filenames(path, dest, saltenv, template)
|
||||
|
||||
src = __context__['fileclient'].cache_file(path, saltenv)
|
||||
src = __context__['fileclient'].cache_file(
|
||||
path,
|
||||
saltenv,
|
||||
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
|
||||
single = salt.client.ssh.Single(
|
||||
__opts__,
|
||||
'',
|
||||
|
@ -46,7 +50,10 @@ def get_dir(path, dest, saltenv='base'):
|
|||
'''
|
||||
Transfer a directory down
|
||||
'''
|
||||
src = __context__['fileclient'].cache_dir(path, saltenv)
|
||||
src = __context__['fileclient'].cache_dir(
|
||||
path,
|
||||
saltenv,
|
||||
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
|
||||
src = ' '.join(src)
|
||||
single = salt.client.ssh.Single(
|
||||
__opts__,
|
||||
|
@ -60,7 +67,10 @@ def get_url(path, dest, saltenv='base'):
|
|||
'''
|
||||
retrive a URL
|
||||
'''
|
||||
src = __context__['fileclient'].get_url(path, saltenv)
|
||||
src = __context__['fileclient'].get_url(
|
||||
path,
|
||||
saltenv,
|
||||
cachedir=os.path.join('salt-ssh', __salt__.kwargs['id_']))
|
||||
single = salt.client.ssh.Single(
|
||||
__opts__,
|
||||
'',
|
||||
|
|
|
@ -94,11 +94,13 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
|
|||
__opts__.get('extra_filerefs', '')
|
||||
)
|
||||
)
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__['thin_dir'],
|
||||
|
@ -162,11 +164,13 @@ def low(data, **kwargs):
|
|||
__opts__.get('extra_filerefs', '')
|
||||
)
|
||||
)
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
|
||||
__opts__['thin_dir'],
|
||||
|
@ -227,11 +231,13 @@ def high(data, **kwargs):
|
|||
__opts__.get('extra_filerefs', '')
|
||||
)
|
||||
)
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
|
||||
__opts__['thin_dir'],
|
||||
|
@ -320,11 +326,13 @@ def highstate(test=None, **kwargs):
|
|||
for chunk in chunks:
|
||||
if not isinstance(chunk, dict):
|
||||
return chunks
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__['thin_dir'],
|
||||
|
@ -392,11 +400,13 @@ def top(topfn, test=None, **kwargs):
|
|||
__opts__.get('extra_filerefs', '')
|
||||
)
|
||||
)
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
|
||||
__opts__['thin_dir'],
|
||||
|
@ -610,12 +620,13 @@ def single(fun, name, test=None, **kwargs):
|
|||
)
|
||||
)
|
||||
|
||||
# Create the tar containing the state pkg and relevant files
|
||||
# Create the tar containing the state pkg and relevant files.
|
||||
trans_tar = salt.client.ssh.state.prep_trans_tar(
|
||||
__context__['fileclient'],
|
||||
chunks,
|
||||
file_refs,
|
||||
__pillar__)
|
||||
__pillar__,
|
||||
id_=st_kwargs['id_'])
|
||||
|
||||
# Create a hash so we can verify the tar on the target system
|
||||
trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type'])
|
||||
|
|
|
@ -2045,7 +2045,7 @@ class Map(Cloud):
|
|||
master_temp_pub = salt.utils.mkstemp()
|
||||
with salt.utils.fopen(master_temp_pub, 'w') as mtp:
|
||||
mtp.write(pub)
|
||||
master_finger = salt.utils.pem_finger(master_temp_pub)
|
||||
master_finger = salt.utils.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
|
||||
os.unlink(master_temp_pub)
|
||||
|
||||
if master_profile.get('make_minion', True) is True:
|
||||
|
@ -2130,7 +2130,7 @@ class Map(Cloud):
|
|||
# mitigate man-in-the-middle attacks
|
||||
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
|
||||
if os.path.isfile(master_pub):
|
||||
master_finger = salt.utils.pem_finger(master_pub)
|
||||
master_finger = salt.utils.pem_finger(master_pub, sum_type=self.opts['hash_type'])
|
||||
|
||||
opts = self.opts.copy()
|
||||
if self.opts['parallel']:
|
||||
|
|
|
@ -2583,6 +2583,10 @@ def create(vm_=None, call=None):
|
|||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
# Ensure that the latest node data is returned
|
||||
node = _get_node(instance_id=vm_['instance_id'])
|
||||
ret.update(node)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ EOF
|
|||
|
||||
# add-apt-repository requires an additional dep and is in different packages
|
||||
# on different systems. Although seemingly ubiquitous it is not a standard,
|
||||
# and is only a convenience script intended to accomplish the below two steps
|
||||
# and is only a convenience script intended to accomplish the below two steps
|
||||
# doing it this way is universal across all debian and ubuntu systems.
|
||||
echo deb http://ppa.launchpad.net/saltstack/salt/ubuntu `lsb_release -sc` main | tee /etc/apt/sources.list.d/saltstack.list
|
||||
wget -q -O- "http://keyserver.ubuntu.com:11371/pks/lookup?op=get&search=0x4759FA960E27C0A6" | apt-key add -
|
||||
|
|
|
@ -15,7 +15,7 @@ EOF
|
|||
|
||||
# add-apt-repository requires an additional dep and is in different packages
|
||||
# on different systems. Although seemingly ubiquitous it is not a standard,
|
||||
# and is only a convenience script intended to accomplish the below two steps
|
||||
# and is only a convenience script intended to accomplish the below two steps
|
||||
# doing it this way is universal across all debian and ubuntu systems.
|
||||
echo deb http://ppa.launchpad.net/saltstack/salt/ubuntu `lsb_release -sc` main | tee /etc/apt/sources.list.d/saltstack.list
|
||||
wget -q -O- "http://keyserver.ubuntu.com:11371/pks/lookup?op=get&search=0x4759FA960E27C0A6" | apt-key add -
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# This is a generic wrapper for the salt-bootstrap script at:
|
||||
#
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
#
|
||||
#
|
||||
# It has been designed as an example, to be customized for your own needs.
|
||||
|
||||
curl -L https://bootstrap.saltstack.com | sudo sh -s -- "$@" git develop
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# This is a generic wrapper for the salt-bootstrap script at:
|
||||
#
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
#
|
||||
#
|
||||
# It has been designed as an example, to be customized for your own needs.
|
||||
|
||||
curl -L https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# This is a generic wrapper for the salt-bootstrap script at:
|
||||
#
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
#
|
||||
#
|
||||
# It has been designed as an example, to be customized for your own needs.
|
||||
|
||||
python -c 'import urllib; print urllib.urlopen("https://bootstrap.saltstack.com").read()' | sudo sh -s -- "$@"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# This is a generic wrapper for the salt-bootstrap script at:
|
||||
#
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
#
|
||||
#
|
||||
# It has been designed as an example, to be customized for your own needs.
|
||||
|
||||
wget --no-check-certificate -O - https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# This is a generic wrapper for the salt-bootstrap script at:
|
||||
#
|
||||
# https://github.com/saltstack/salt-bootstrap
|
||||
#
|
||||
#
|
||||
# It has been designed as an example, to be customized for your own needs.
|
||||
|
||||
wget -O - https://bootstrap.saltstack.com | sudo sh -s -- "$@"
|
||||
|
|
|
@ -1098,11 +1098,11 @@ class SAuth(AsyncAuth):
|
|||
if self.opts.get('syndic_master', False): # Is syndic
|
||||
syndic_finger = self.opts.get('syndic_finger', self.opts.get('master_finger', False))
|
||||
if syndic_finger:
|
||||
if salt.utils.pem_finger(m_pub_fn) != syndic_finger:
|
||||
if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != syndic_finger:
|
||||
self._finger_fail(syndic_finger, m_pub_fn)
|
||||
else:
|
||||
if self.opts.get('master_finger', False):
|
||||
if salt.utils.pem_finger(m_pub_fn) != self.opts['master_finger']:
|
||||
if salt.utils.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != self.opts['master_finger']:
|
||||
self._finger_fail(self.opts['master_finger'], m_pub_fn)
|
||||
auth['publish_port'] = payload['publish_port']
|
||||
return auth
|
||||
|
@ -1116,7 +1116,7 @@ class SAuth(AsyncAuth):
|
|||
'this minion is not subject to a man-in-the-middle attack.'
|
||||
.format(
|
||||
finger,
|
||||
salt.utils.pem_finger(master_key)
|
||||
salt.utils.pem_finger(master_key, sum_type=self.opts['hash_type'])
|
||||
)
|
||||
)
|
||||
sys.exit(42)
|
||||
|
|
|
@ -106,11 +106,16 @@ class Client(object):
|
|||
return filelist
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _cache_loc(self, path, saltenv='base'):
|
||||
def _cache_loc(self, path, saltenv='base', cachedir=None):
|
||||
'''
|
||||
Return the local location to cache the file, cache dirs will be made
|
||||
'''
|
||||
dest = salt.utils.path_join(self.opts['cachedir'],
|
||||
if cachedir is None:
|
||||
cachedir = self.opts['cachedir']
|
||||
elif not os.path.isabs(cachedir):
|
||||
cachedir = os.path.join(self.opts['cachedir'], cachedir)
|
||||
|
||||
dest = salt.utils.path_join(cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
path)
|
||||
|
@ -130,7 +135,8 @@ class Client(object):
|
|||
dest='',
|
||||
makedirs=False,
|
||||
saltenv='base',
|
||||
gzip=None):
|
||||
gzip=None,
|
||||
cachedir=None):
|
||||
'''
|
||||
Copies a file from the local files or master depending on
|
||||
implementation
|
||||
|
@ -143,14 +149,14 @@ class Client(object):
|
|||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def cache_file(self, path, saltenv='base'):
|
||||
def cache_file(self, path, saltenv='base', cachedir=None):
|
||||
'''
|
||||
Pull a file down from the file server and store it in the minion
|
||||
file cache
|
||||
'''
|
||||
return self.get_url(path, '', True, saltenv)
|
||||
return self.get_url(path, '', True, saltenv, cachedir=cachedir)
|
||||
|
||||
def cache_files(self, paths, saltenv='base'):
|
||||
def cache_files(self, paths, saltenv='base', cachedir=None):
|
||||
'''
|
||||
Download a list of files stored on the master and put them in the
|
||||
minion file cache
|
||||
|
@ -159,20 +165,23 @@ class Client(object):
|
|||
if isinstance(paths, str):
|
||||
paths = paths.split(',')
|
||||
for path in paths:
|
||||
ret.append(self.cache_file(path, saltenv))
|
||||
ret.append(self.cache_file(path, saltenv, cachedir=cachedir))
|
||||
return ret
|
||||
|
||||
def cache_master(self, saltenv='base'):
|
||||
def cache_master(self, saltenv='base', cachedir=None):
|
||||
'''
|
||||
Download and cache all files on a master in a specified environment
|
||||
'''
|
||||
ret = []
|
||||
for path in self.file_list(saltenv):
|
||||
ret.append(self.cache_file(salt.utils.url.create(path), saltenv))
|
||||
ret.append(
|
||||
self.cache_file(
|
||||
salt.utils.url.create(path), saltenv, cachedir=cachedir)
|
||||
)
|
||||
return ret
|
||||
|
||||
def cache_dir(self, path, saltenv='base', include_empty=False,
|
||||
include_pat=None, exclude_pat=None):
|
||||
include_pat=None, exclude_pat=None, cachedir=None):
|
||||
'''
|
||||
Download all of the files in a subdir of the master
|
||||
'''
|
||||
|
@ -197,7 +206,8 @@ class Client(object):
|
|||
if fn_.strip() and fn_.startswith(path):
|
||||
if salt.utils.check_include_exclude(
|
||||
fn_, include_pat, exclude_pat):
|
||||
fn_ = self.cache_file(salt.utils.url.create(fn_), saltenv)
|
||||
fn_ = self.cache_file(
|
||||
salt.utils.url.create(fn_), saltenv, cachedir=cachedir)
|
||||
if fn_:
|
||||
ret.append(fn_)
|
||||
|
||||
|
@ -211,11 +221,12 @@ class Client(object):
|
|||
# prefix = ''
|
||||
# else:
|
||||
# prefix = separated[0]
|
||||
dest = salt.utils.path_join(
|
||||
self.opts['cachedir'],
|
||||
'files',
|
||||
saltenv
|
||||
)
|
||||
if cachedir is None:
|
||||
cachedir = self.opts['cachedir']
|
||||
elif not os.path.isabs(cachedir):
|
||||
cachedir = os.path.join(self.opts['cachdir'], cachedir)
|
||||
|
||||
dest = salt.utils.path_join(cachedir, 'files', saltenv)
|
||||
for fn_ in self.file_list_emptydirs(saltenv):
|
||||
fn_ = sdecode(fn_)
|
||||
if fn_.startswith(path):
|
||||
|
@ -268,7 +279,7 @@ class Client(object):
|
|||
'''
|
||||
return {}
|
||||
|
||||
def is_cached(self, path, saltenv='base'):
|
||||
def is_cached(self, path, saltenv='base', cachedir=None):
|
||||
'''
|
||||
Returns the full path to a file if it is cached locally on the minion
|
||||
otherwise returns a blank string
|
||||
|
@ -285,12 +296,14 @@ class Client(object):
|
|||
self.opts['cachedir'], 'localfiles', path.lstrip('|/'))
|
||||
filesdest = os.path.join(
|
||||
self.opts['cachedir'], 'files', saltenv, path.lstrip('|/'))
|
||||
extrndest = self._extrn_path(path, saltenv)
|
||||
extrndest = self._extrn_path(path, saltenv, cachedir=cachedir)
|
||||
|
||||
if os.path.exists(filesdest):
|
||||
return salt.utils.url.escape(filesdest) if escaped else filesdest
|
||||
elif os.path.exists(localsfilesdest):
|
||||
return salt.utils.url.escape(localsfilesdest) if escaped else localsfilesdest
|
||||
return salt.utils.url.escape(localsfilesdest) \
|
||||
if escaped \
|
||||
else localsfilesdest
|
||||
elif os.path.exists(extrndest):
|
||||
return extrndest
|
||||
|
||||
|
@ -348,7 +361,7 @@ class Client(object):
|
|||
states.append(path.replace('/', '.')[:-4])
|
||||
return states
|
||||
|
||||
def get_state(self, sls, saltenv):
|
||||
def get_state(self, sls, saltenv, cachedir=None):
|
||||
'''
|
||||
Get a state file from the master and store it in the local minion
|
||||
cache; return the location of the file
|
||||
|
@ -358,12 +371,13 @@ class Client(object):
|
|||
sls_url = salt.utils.url.create(sls + '.sls')
|
||||
init_url = salt.utils.url.create(sls + '/init.sls')
|
||||
for path in [sls_url, init_url]:
|
||||
dest = self.cache_file(path, saltenv)
|
||||
dest = self.cache_file(path, saltenv, cachedir=cachedir)
|
||||
if dest:
|
||||
return {'source': path, 'dest': dest}
|
||||
return {}
|
||||
|
||||
def get_dir(self, path, dest='', saltenv='base', gzip=None):
|
||||
def get_dir(self, path, dest='', saltenv='base', gzip=None,
|
||||
cachedir=None):
|
||||
'''
|
||||
Get a directory recursively from the salt-master
|
||||
'''
|
||||
|
@ -421,7 +435,8 @@ class Client(object):
|
|||
ret.sort()
|
||||
return ret
|
||||
|
||||
def get_url(self, url, dest, makedirs=False, saltenv='base', no_cache=False):
|
||||
def get_url(self, url, dest, makedirs=False, saltenv='base',
|
||||
no_cache=False, cachedir=None):
|
||||
'''
|
||||
Get a single file from a URL.
|
||||
'''
|
||||
|
@ -436,7 +451,8 @@ class Client(object):
|
|||
return url_data.path
|
||||
|
||||
if url_data.scheme == 'salt':
|
||||
return self.get_file(url, dest, makedirs, saltenv)
|
||||
return self.get_file(
|
||||
url, dest, makedirs, saltenv, cachedir=cachedir)
|
||||
if dest:
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
|
@ -445,7 +461,7 @@ class Client(object):
|
|||
else:
|
||||
return ''
|
||||
elif not no_cache:
|
||||
dest = self._extrn_path(url, saltenv)
|
||||
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
os.makedirs(destdir)
|
||||
|
@ -473,7 +489,9 @@ class Client(object):
|
|||
location=s3_opt('location'))
|
||||
return dest
|
||||
except Exception as exc:
|
||||
raise MinionError('Could not fetch from {0}. Exception: {1}'.format(url, exc))
|
||||
raise MinionError(
|
||||
'Could not fetch from {0}. Exception: {1}'.format(url, exc)
|
||||
)
|
||||
if url_data.scheme == 'ftp':
|
||||
try:
|
||||
ftp = ftplib.FTP(url_data.hostname)
|
||||
|
@ -593,6 +611,7 @@ class Client(object):
|
|||
template='jinja',
|
||||
makedirs=False,
|
||||
saltenv='base',
|
||||
cachedir=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Cache a file then process it as a template
|
||||
|
@ -608,7 +627,7 @@ class Client(object):
|
|||
|
||||
kwargs['saltenv'] = saltenv
|
||||
url_data = urlparse(url)
|
||||
sfn = self.cache_file(url, saltenv)
|
||||
sfn = self.cache_file(url, saltenv, cachedir=cachedir)
|
||||
if not os.path.exists(sfn):
|
||||
return ''
|
||||
if template in salt.utils.templates.TEMPLATE_REGISTRY:
|
||||
|
@ -630,7 +649,7 @@ class Client(object):
|
|||
return ''
|
||||
if not dest:
|
||||
# No destination passed, set the dest as an extrn_files cache
|
||||
dest = self._extrn_path(url, saltenv)
|
||||
dest = self._extrn_path(url, saltenv, cachedir=cachedir)
|
||||
# If Salt generated the dest name, create any required dirs
|
||||
makedirs = True
|
||||
|
||||
|
@ -644,7 +663,7 @@ class Client(object):
|
|||
shutil.move(data['data'], dest)
|
||||
return dest
|
||||
|
||||
def _extrn_path(self, url, saltenv):
|
||||
def _extrn_path(self, url, saltenv, cachedir=None):
|
||||
'''
|
||||
Return the extn_filepath for a given url
|
||||
'''
|
||||
|
@ -654,8 +673,13 @@ class Client(object):
|
|||
else:
|
||||
netloc = url_data.netloc
|
||||
|
||||
if cachedir is None:
|
||||
cachedir = self.opts['cachedir']
|
||||
elif not os.path.isabs(cachedir):
|
||||
cachedir = os.path.join(self.opts['cachedir'], cachedir)
|
||||
|
||||
return salt.utils.path_join(
|
||||
self.opts['cachedir'],
|
||||
cachedir,
|
||||
'extrn_files',
|
||||
saltenv,
|
||||
netloc,
|
||||
|
@ -695,7 +719,8 @@ class LocalClient(Client):
|
|||
dest='',
|
||||
makedirs=False,
|
||||
saltenv='base',
|
||||
gzip=None):
|
||||
gzip=None,
|
||||
cachedir=None):
|
||||
'''
|
||||
Copies a file from the local files directory into :param:`dest`
|
||||
gzip compression settings are ignored for local files
|
||||
|
@ -847,14 +872,14 @@ class RemoteClient(Client):
|
|||
dest='',
|
||||
makedirs=False,
|
||||
saltenv='base',
|
||||
gzip=None):
|
||||
gzip=None,
|
||||
cachedir=None):
|
||||
'''
|
||||
Get a single file from the salt-master
|
||||
path must be a salt server location, aka, salt://path/to/file, if
|
||||
dest is omitted, then the downloaded file will be placed in the minion
|
||||
cache
|
||||
'''
|
||||
|
||||
path, senv = salt.utils.url.split_env(path)
|
||||
if senv:
|
||||
saltenv = senv
|
||||
|
@ -880,7 +905,8 @@ class RemoteClient(Client):
|
|||
'In saltenv \'{0}\', looking at rel_path \'{1}\' to resolve '
|
||||
'\'{2}\''.format(saltenv, rel_path, path)
|
||||
)
|
||||
with self._cache_loc(rel_path, saltenv) as cache_dest:
|
||||
with self._cache_loc(
|
||||
rel_path, saltenv, cachedir=cachedir) as cache_dest:
|
||||
dest2check = cache_dest
|
||||
|
||||
log.debug(
|
||||
|
@ -935,7 +961,10 @@ class RemoteClient(Client):
|
|||
if not data['data']:
|
||||
if not fn_ and data['dest']:
|
||||
# This is a 0 byte file on the master
|
||||
with self._cache_loc(data['dest'], saltenv) as cache_dest:
|
||||
with self._cache_loc(
|
||||
data['dest'],
|
||||
saltenv,
|
||||
cachedir=cachedir) as cache_dest:
|
||||
dest = cache_dest
|
||||
with salt.utils.fopen(cache_dest, 'wb+') as ofile:
|
||||
ofile.write(data['data'])
|
||||
|
@ -950,7 +979,10 @@ class RemoteClient(Client):
|
|||
continue
|
||||
break
|
||||
if not fn_:
|
||||
with self._cache_loc(data['dest'], saltenv) as cache_dest:
|
||||
with self._cache_loc(
|
||||
data['dest'],
|
||||
saltenv,
|
||||
cachedir=cachedir) as cache_dest:
|
||||
dest = cache_dest
|
||||
# If a directory was formerly cached at this path, then
|
||||
# remove it to avoid a traceback trying to write the file
|
||||
|
|
|
@ -1351,6 +1351,13 @@ def os_data():
|
|||
grains['osfullname'] = \
|
||||
grains.get('lsb_distrib_id', osname).strip()
|
||||
if 'osrelease' not in grains:
|
||||
# NOTE: This is a workaround for CentOS 7 os-release bug
|
||||
# https://bugs.centos.org/view.php?id=8359
|
||||
# /etc/os-release contains no minor distro release number so we fall back to parse
|
||||
# /etc/centos-release file instead.
|
||||
# Commit introducing this comment should be reverted after the upstream bug is released.
|
||||
if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
|
||||
grains.pop('lsb_distrib_release', None)
|
||||
grains['osrelease'] = \
|
||||
grains.get('lsb_distrib_release', osrelease).strip()
|
||||
grains['oscodename'] = grains.get('lsb_distrib_codename',
|
||||
|
|
|
@ -1002,7 +1002,7 @@ class Key(object):
|
|||
path = os.path.join(self.opts['pki_dir'], key)
|
||||
else:
|
||||
path = os.path.join(self.opts['pki_dir'], status, key)
|
||||
ret[status][key] = salt.utils.pem_finger(path)
|
||||
ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
|
||||
return ret
|
||||
|
||||
def finger_all(self):
|
||||
|
@ -1017,7 +1017,7 @@ class Key(object):
|
|||
path = os.path.join(self.opts['pki_dir'], key)
|
||||
else:
|
||||
path = os.path.join(self.opts['pki_dir'], status, key)
|
||||
ret[status][key] = salt.utils.pem_finger(path)
|
||||
ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -531,7 +531,12 @@ class SMinion(MinionBase):
|
|||
# Clean out the proc directory (default /var/cache/salt/minion/proc)
|
||||
if (self.opts.get('file_client', 'remote') == 'remote'
|
||||
or self.opts.get('use_master_when_local', False)):
|
||||
self.eval_master(self.opts, failed=True)
|
||||
if HAS_ZMQ:
|
||||
zmq.eventloop.ioloop.install()
|
||||
io_loop = LOOP_CLASS()
|
||||
io_loop.run_sync(
|
||||
lambda: self.eval_master(self.opts, failed=True)
|
||||
)
|
||||
self.gen_modules(initial_load=True)
|
||||
|
||||
# If configured, cache pillar data on the minion
|
||||
|
|
|
@ -9,11 +9,6 @@ import logging
|
|||
import sys
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
# Import 3rd-party libs
|
||||
# pylint: disable=import-error,redefined-builtin
|
||||
from salt.ext.six.moves import range
|
||||
# pylint: enable=import-error,redefined-builtin
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.cloud as suc
|
||||
|
@ -36,8 +31,7 @@ def _get_minor_version():
|
|||
version = 6
|
||||
cmd = 'gluster --version'
|
||||
result = __salt__['cmd.run'](cmd).splitlines()
|
||||
for line_number in range(len(result)):
|
||||
line = result[line_number]
|
||||
for line in result:
|
||||
if line.startswith('glusterfs'):
|
||||
version = int(line.split()[1].split('.')[1])
|
||||
return version
|
||||
|
@ -50,6 +44,22 @@ def _gluster_ok(xml_data):
|
|||
return int(xml_data.find('opRet').text) == 0
|
||||
|
||||
|
||||
def _gluster_output_cleanup(result):
|
||||
'''
|
||||
Gluster versions prior to 6 have a bug that requires tricking
|
||||
isatty. This adds "gluster> " to the output. Strip it off and
|
||||
produce clean xml for ElementTree.
|
||||
'''
|
||||
ret = ''
|
||||
for line in result.splitlines():
|
||||
if line.startswith('gluster>'):
|
||||
ret += line[9:].strip()
|
||||
else:
|
||||
ret += line.strip()
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _gluster_xml(cmd):
|
||||
'''
|
||||
Perform a gluster --xml command and log result.
|
||||
|
@ -57,10 +67,15 @@ def _gluster_xml(cmd):
|
|||
# We will pass the command string as stdin to allow for much longer
|
||||
# command strings. This is especially useful for creating large volumes
|
||||
# where the list of bricks exceeds 128 characters.
|
||||
root = ET.fromstring(
|
||||
__salt__['cmd.run'](
|
||||
if _get_minor_version() < 6:
|
||||
result = __salt__['cmd.run'](
|
||||
'script -q -c "gluster --xml --mode=script"', stdin="{0}\n\004".format(cmd)
|
||||
)
|
||||
else:
|
||||
result = __salt__['cmd.run'](
|
||||
'gluster --xml --mode=script', stdin="{0}\n".format(cmd)
|
||||
).replace("\n", ""))
|
||||
)
|
||||
root = ET.fromstring(_gluster_output_cleanup(result))
|
||||
if _gluster_ok(root):
|
||||
output = root.find('output')
|
||||
if output:
|
||||
|
|
|
@ -22,7 +22,8 @@ def finger():
|
|||
salt '*' key.finger
|
||||
'''
|
||||
return salt.utils.pem_finger(
|
||||
os.path.join(__opts__['pki_dir'], 'minion.pub')
|
||||
os.path.join(__opts__['pki_dir'], 'minion.pub'),
|
||||
sum_type=__opts__['hash_type']
|
||||
)
|
||||
|
||||
|
||||
|
@ -37,5 +38,6 @@ def finger_master():
|
|||
salt '*' key.finger_master
|
||||
'''
|
||||
return salt.utils.pem_finger(
|
||||
os.path.join(__opts__['pki_dir'], 'minion_master.pub')
|
||||
os.path.join(__opts__['pki_dir'], 'minion_master.pub'),
|
||||
sum_type=__opts__['hash_type']
|
||||
)
|
||||
|
|
|
@ -39,11 +39,11 @@ def __virtual__():
|
|||
return (False, 'Failed to load the mac_service module:\n'
|
||||
'Required binary not found: "/bin/launchctl"')
|
||||
|
||||
if LooseVersion(__grains__['osmajorrelease']) >= '10.11':
|
||||
if LooseVersion(__grains__['osrelease']) >= LooseVersion('10.11'):
|
||||
return (False, 'Failed to load the mac_service module:\n'
|
||||
'Not available on El Capitan, uses mac_service.py')
|
||||
|
||||
if LooseVersion(__grains__['osmajorrelease']) >= '10.10':
|
||||
if LooseVersion(__grains__['osrelease']) >= LooseVersion('10.10'):
|
||||
global BEFORE_YOSEMITE
|
||||
BEFORE_YOSEMITE = False
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ def __virtual__():
|
|||
'''
|
||||
Only work on Mac OS
|
||||
'''
|
||||
if salt.utils.is_darwin() and LooseVersion(__grains__['osmajorrelease']) >= '10.9':
|
||||
if salt.utils.is_darwin() and LooseVersion(__grains__['osrelease']) >= LooseVersion('10.9'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
@ -469,7 +469,7 @@ def config(name,
|
|||
r'''
|
||||
Modify the named service.
|
||||
|
||||
.. versionadded:: 2015.8.6
|
||||
.. versionadded:: 2015.8.8
|
||||
|
||||
Required parameters:
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ except ImportError:
|
|||
# pylint: enable=import-error,redefined-builtin,no-name-in-module
|
||||
|
||||
from xml.dom import minidom as dom
|
||||
from xml.parsers.expat import ExpatError
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
@ -71,6 +72,53 @@ def _zypper(*opts):
|
|||
return cmd
|
||||
|
||||
|
||||
def _is_zypper_error(retcode):
|
||||
'''
|
||||
Return True in case the exist code indicate a zypper errror.
|
||||
Otherwise False
|
||||
'''
|
||||
# see man zypper for existing exit codes
|
||||
return not int(retcode) in [0, 100, 101, 102, 103]
|
||||
|
||||
|
||||
def _zypper_check_result(result, xml=False):
|
||||
'''
|
||||
Check the result of a zypper command. In case of an error, it raise
|
||||
a CommandExecutionError. Otherwise it returns stdout string of the
|
||||
command.
|
||||
|
||||
result
|
||||
The result of a zypper command called with cmd.run_all
|
||||
|
||||
xml
|
||||
Set to True if zypper command was called with --xmlout.
|
||||
In this case it try to read an error message out of the XML
|
||||
stream. Default is False.
|
||||
'''
|
||||
if _is_zypper_error(result['retcode']):
|
||||
msg = list()
|
||||
if not xml:
|
||||
msg.append(result['stderr'] and result['stderr'] or "")
|
||||
else:
|
||||
try:
|
||||
doc = dom.parseString(result['stdout'])
|
||||
except ExpatError as err:
|
||||
log.error(err)
|
||||
doc = None
|
||||
if doc:
|
||||
msg_nodes = doc.getElementsByTagName('message')
|
||||
for node in msg_nodes:
|
||||
if node.getAttribute('type') == 'error':
|
||||
msg.append(node.childNodes[0].nodeValue)
|
||||
elif result['stderr'].strip():
|
||||
msg.append(result['stderr'].strip())
|
||||
|
||||
raise CommandExecutionError("zypper command failed: {0}".format(
|
||||
msg and os.linesep.join(msg) or "Check zypper logs"))
|
||||
|
||||
return result['stdout']
|
||||
|
||||
|
||||
def list_upgrades(refresh=True):
|
||||
'''
|
||||
List all available package upgrades on this system
|
||||
|
@ -94,15 +142,7 @@ def list_upgrades(refresh=True):
|
|||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
if run_data['retcode'] != 0:
|
||||
msg = list()
|
||||
for chnl in ['stderr', 'stdout']:
|
||||
if run_data.get(chnl, ''):
|
||||
msg.append(run_data[chnl])
|
||||
raise CommandExecutionError(os.linesep.join(msg) or
|
||||
'Zypper returned non-zero system exit. See Zypper logs for more details.')
|
||||
|
||||
doc = dom.parseString(run_data['stdout'])
|
||||
doc = dom.parseString(_zypper_check_result(run_data, xml=True))
|
||||
for update_node in doc.getElementsByTagName('update'):
|
||||
if update_node.getAttribute('kind') == 'package':
|
||||
ret[update_node.getAttribute('name')] = update_node.getAttribute('edition')
|
||||
|
@ -519,13 +559,12 @@ def del_repo(repo):
|
|||
for alias in repos_cfg.sections():
|
||||
if alias == repo:
|
||||
cmd = _zypper('-x', 'rr', '--loose-auth', '--loose-query', alias)
|
||||
doc = dom.parseString(
|
||||
__salt__['cmd.run'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
ret = __salt__['cmd.run'](
|
||||
cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
msg = doc.getElementsByTagName('message')
|
||||
if doc.getElementsByTagName('progress') and msg:
|
||||
return {
|
||||
|
@ -619,13 +658,12 @@ def mod_repo(repo, **kwargs):
|
|||
try:
|
||||
# Try to parse the output and find the error,
|
||||
# but this not always working (depends on Zypper version)
|
||||
doc = dom.parseString(
|
||||
__salt__['cmd.run'](
|
||||
_zypper('-x', 'ar', url, repo),
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
ret = __salt__['cmd.run'](
|
||||
_zypper('-x', 'ar', url, repo),
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
except Exception:
|
||||
# No XML out available, but the the result is still unknown
|
||||
pass
|
||||
|
@ -676,8 +714,10 @@ def mod_repo(repo, **kwargs):
|
|||
|
||||
if cmd_opt:
|
||||
cmd_opt.append(repo)
|
||||
__salt__['cmd.run'](_zypper('-x', 'mr', *cmd_opt),
|
||||
output_loglevel='trace', python_shell=False)
|
||||
ret = __salt__['cmd.run_all'](_zypper('-x', 'mr', *cmd_opt),
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
_zypper_check_result(ret, xml=True)
|
||||
|
||||
# If repo nor added neither modified, error should be thrown
|
||||
if not added and not cmd_opt:
|
||||
|
@ -708,14 +748,7 @@ def refresh_db():
|
|||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
if call['retcode'] != 0:
|
||||
msg = 'Failed to refresh zypper'
|
||||
if call['stderr']:
|
||||
msg += ': ' + call['stderr']
|
||||
|
||||
raise CommandExecutionError(msg)
|
||||
else:
|
||||
out = call['stdout']
|
||||
out = _zypper_check_result(call)
|
||||
|
||||
for line in out.splitlines():
|
||||
if not line:
|
||||
|
@ -878,13 +911,11 @@ def install(name=None,
|
|||
cmd = cmd_install + targets[:500]
|
||||
targets = targets[500:]
|
||||
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
|
||||
if call['retcode'] != 0:
|
||||
raise CommandExecutionError(call['stderr']) # Fixme: This needs a proper report mechanism.
|
||||
else:
|
||||
for line in call['stdout'].splitlines():
|
||||
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
|
||||
if match:
|
||||
downgrades.append(match.group(1))
|
||||
out = _zypper_check_result(call)
|
||||
for line in out.splitlines():
|
||||
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
|
||||
if match:
|
||||
downgrades.append(match.group(1))
|
||||
|
||||
while downgrades:
|
||||
cmd = cmd_install + ['--force'] + downgrades[:500]
|
||||
|
@ -892,9 +923,7 @@ def install(name=None,
|
|||
out = __salt__['cmd.run_all'](cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
|
||||
if out['retcode'] != 0 and out['stderr']:
|
||||
errors.append(out['stderr'])
|
||||
_zypper_check_result(out)
|
||||
|
||||
__context__.pop('pkg.list_pkgs', None)
|
||||
new = list_pkgs()
|
||||
|
@ -955,7 +984,7 @@ def upgrade(refresh=True, skip_verify=False):
|
|||
redirect_stderr=True
|
||||
)
|
||||
|
||||
if call['retcode'] != 0:
|
||||
if _is_zypper_error(call['retcode']):
|
||||
ret['result'] = False
|
||||
if call['stdout']:
|
||||
ret['comment'] = call['stdout']
|
||||
|
@ -988,10 +1017,7 @@ def _uninstall(name=None, pkgs=None):
|
|||
out = __salt__['cmd.run_all'](cmd,
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
|
||||
if out['retcode'] != 0 and out['stderr']:
|
||||
errors.append(out['stderr'])
|
||||
|
||||
_zypper_check_result(out)
|
||||
targets = targets[500:]
|
||||
|
||||
__context__.pop('pkg.list_pkgs', None)
|
||||
|
@ -1113,7 +1139,10 @@ def clean_locks():
|
|||
if not os.path.exists("/etc/zypp/locks"):
|
||||
return out
|
||||
|
||||
doc = dom.parseString(__salt__['cmd.run'](_zypper('-x', 'cl'), output_loglevel='trace', python_shell=False))
|
||||
ret = __salt__['cmd.run_all'](_zypper('-x', 'cl'),
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
for node in doc.getElementsByTagName("message"):
|
||||
text = node.childNodes[0].nodeValue.lower()
|
||||
if text.startswith(LCK):
|
||||
|
@ -1151,7 +1180,9 @@ def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|||
missing.append(pkg)
|
||||
|
||||
if removed:
|
||||
__salt__['cmd.run'](_zypper('rl', *removed), output_loglevel='trace', python_shell=False)
|
||||
_zypper_check_result(__salt__['cmd.run_all'](_zypper('rl', *removed),
|
||||
output_loglevel='trace',
|
||||
python_shell=False))
|
||||
|
||||
return {'removed': len(removed), 'not_found': missing}
|
||||
|
||||
|
@ -1180,7 +1211,9 @@ def add_lock(packages, **kwargs): # pylint: disable=unused-argument
|
|||
added.append(pkg)
|
||||
|
||||
if added:
|
||||
__salt__['cmd.run'](_zypper('al', *added), output_loglevel='trace', python_shell=False)
|
||||
_zypper_check_result(__salt__['cmd.run_all'](_zypper('al', *added),
|
||||
output_loglevel='trace',
|
||||
python_shell=False))
|
||||
|
||||
return {'added': len(added), 'packages': added}
|
||||
|
||||
|
@ -1312,13 +1345,10 @@ def _get_patterns(installed_only=None):
|
|||
List all known patterns in repos.
|
||||
'''
|
||||
patterns = {}
|
||||
doc = dom.parseString(
|
||||
__salt__['cmd.run'](
|
||||
_zypper('--xmlout', 'se', '-t', 'pattern'),
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
)
|
||||
ret = __salt__['cmd.run_all'](_zypper('--xmlout', 'se', '-t', 'pattern'),
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
for element in doc.getElementsByTagName('solvable'):
|
||||
installed = element.getAttribute('status') == 'installed'
|
||||
if (installed_only and installed) or not installed_only:
|
||||
|
@ -1382,13 +1412,10 @@ def search(criteria, refresh=False):
|
|||
if refresh:
|
||||
refresh_db()
|
||||
|
||||
doc = dom.parseString(
|
||||
__salt__['cmd.run'](
|
||||
_zypper('--xmlout', 'se', criteria),
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
)
|
||||
ret = __salt__['cmd.run_all'](_zypper('--xmlout', 'se', criteria),
|
||||
output_loglevel='trace',
|
||||
pyton_shell=False)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
solvables = doc.getElementsByTagName('solvable')
|
||||
if not solvables:
|
||||
raise CommandExecutionError(
|
||||
|
@ -1449,7 +1476,9 @@ def list_products(all=False, refresh=False):
|
|||
cmd = _zypper('-x', 'products')
|
||||
if not all:
|
||||
cmd.append('-i')
|
||||
doc = dom.parseString(__salt__['cmd.run'](cmd, output_loglevel='trace'))
|
||||
|
||||
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
||||
doc = dom.parseString(_zypper_check_result(call, xml=True))
|
||||
for prd in doc.getElementsByTagName('product-list')[0].getElementsByTagName('product'):
|
||||
p_nfo = dict()
|
||||
for k_p_nfo, v_p_nfo in prd.attributes.items():
|
||||
|
@ -1496,13 +1525,10 @@ def download(*packages, **kwargs):
|
|||
if refresh:
|
||||
refresh_db()
|
||||
|
||||
doc = dom.parseString(
|
||||
__salt__['cmd.run'](
|
||||
_zypper('-x', 'download', *packages),
|
||||
output_loglevel='trace',
|
||||
python_shell=False
|
||||
)
|
||||
)
|
||||
ret = __salt__['cmd.run_all'](_zypper('-x', 'download', *packages),
|
||||
output_loglevel='trace',
|
||||
python_shell=False)
|
||||
doc = dom.parseString(_zypper_check_result(ret, xml=True))
|
||||
pkg_ret = {}
|
||||
for dld_result in doc.getElementsByTagName('download-result'):
|
||||
repo = dld_result.getElementsByTagName('repository')[0]
|
||||
|
|
|
@ -128,14 +128,12 @@ def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM):
|
|||
- web
|
||||
- dev
|
||||
'''
|
||||
|
||||
name = re.sub(delimiter, DEFAULT_TARGET_DELIM, name)
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
grain = __salt__['grains.get'](name)
|
||||
|
||||
if grain:
|
||||
# check whether grain is a list
|
||||
if not isinstance(grain, list):
|
||||
|
@ -146,6 +144,17 @@ def list_present(name, value, delimiter=DEFAULT_TARGET_DELIM):
|
|||
if set(value).issubset(set(__salt__['grains.get'](name))):
|
||||
ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value)
|
||||
return ret
|
||||
elif name in __context__.get('pending_grains', {}):
|
||||
# elements common to both
|
||||
intersection = set(value).intersection(__context__.get('pending_grains', {})[name])
|
||||
if intersection:
|
||||
value = list(set(value).difference(__context__['pending_grains'][name]))
|
||||
ret['comment'] = 'Removed value {0} from update due to context found in "{1}".\n'.format(value, name)
|
||||
if 'pending_grains' not in __context__:
|
||||
__context__['pending_grains'] = {}
|
||||
if name not in __context__['pending_grains']:
|
||||
__context__['pending_grains'][name] = set()
|
||||
__context__['pending_grains'][name].update(value)
|
||||
else:
|
||||
if value in grain:
|
||||
ret['comment'] = 'Value {1} is already in grain {0}'.format(name, value)
|
||||
|
|
|
@ -29,7 +29,7 @@ def __virtual__():
|
|||
'''
|
||||
Only work on Mac OS
|
||||
'''
|
||||
if salt.utils.is_darwin() and LooseVersion(__grains__['osmajorrelease']) >= '10.9':
|
||||
if salt.utils.is_darwin() and LooseVersion(__grains__['osrelease']) >= LooseVersion('10.9'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
{%- set iface = data.data.inet -%}
|
||||
{% if iface['user'] %}user {{ iface['user']}}
|
||||
{% if iface['password']%}password {{ iface['password'] }} {% endif %}
|
||||
plugin rp-pppoe.so {{iface['pppoe_iface']|default('eth0')}}
|
||||
plugin rp-pppoe.so {{iface['pppoe_iface']|default('eth0')}}
|
||||
|
||||
{% if iface['noipdefault']!="False"|default("True") %}noipdefault{% endif %}
|
||||
{% if iface['usepeerdns']!="False"|default("True") %}usepeerdns {% endif %}
|
||||
{% if iface['noipdefault']!="False"|default("True") %}noipdefault{% endif %}
|
||||
{% if iface['usepeerdns']!="False"|default("True") %}usepeerdns {% endif %}
|
||||
{% if iface['defaultroute']!="False"|default("True") %}defaultroute{% endif %}
|
||||
|
||||
{% if iface['holdoff']!=""|default('15') %}holdoff {{iface['holdoff']|default('15')}}{% endif %}
|
||||
{% if iface['maxfail']!=""|default('0') %}maxfail {{iface['maxfail']|default('0')}}{% endif %}
|
||||
|
||||
{% if iface['hide-password']!="False"|default("True") %}hide-password{% endif %}
|
||||
{% if iface['lcp-echo-interval']!=""|default('20') %}lcp-echo-interval {{ iface['lcp-echo-interval']|default('20')}} {% endif %}
|
||||
{% if iface['lcp-echo-interval']!=""|default('20') %}lcp-echo-interval {{ iface['lcp-echo-interval']|default('20')}} {% endif %}
|
||||
{% if iface['lcp-echo-failure']!=""|default('3') %}lcp-echo-failure {{iface['lcp-echo-failure']|default('3')}} {% endif %}
|
||||
|
||||
{% if iface['noauth']!="False"|default("True") %}noauth{% endif %}
|
||||
{% if iface['persist']!="False"|default("True") %}persist{% endif %}
|
||||
{% if iface['mtu']!=""|default('1492') %}mtu {{iface['mtu']|default('1492')}} {% endif %}
|
||||
{% if iface['noaccomp']!="False"|default("True") %}noaccomp{% endif %}
|
||||
{% if iface['default-asyncmap'] %}default-asyncmap{% endif %}
|
||||
{% if iface['persist']!="False"|default("True") %}persist{% endif %}
|
||||
{% if iface['mtu']!=""|default('1492') %}mtu {{iface['mtu']|default('1492')}} {% endif %}
|
||||
{% if iface['noaccomp']!="False"|default("True") %}noaccomp{% endif %}
|
||||
{% if iface['default-asyncmap'] %}default-asyncmap{% endif %}
|
||||
{% if iface['linkname'] is defined %}
|
||||
linkname {{ iface['linkname'] }}
|
||||
{% endif %}
|
||||
|
|
|
@ -62,8 +62,9 @@ class SyncWrapper(object):
|
|||
def __getattribute__(self, key):
|
||||
try:
|
||||
return object.__getattribute__(self, key)
|
||||
except AttributeError:
|
||||
pass
|
||||
except AttributeError as ex:
|
||||
if key == 'async':
|
||||
raise ex
|
||||
attr = getattr(self.async, key)
|
||||
if hasattr(attr, '__call__'):
|
||||
def wrap(*args, **kwargs):
|
||||
|
|
334
tests/integration/fileclient_test.py
Normal file
334
tests/integration/fileclient_test.py
Normal file
|
@ -0,0 +1,334 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Erik Johnson <erik@saltstack.com>`
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||
|
||||
ensure_in_syspath('../..')
|
||||
|
||||
# Import Python libs
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
from salt import fileclient
|
||||
from salt.ext import six
|
||||
from salttesting.helpers import ensure_in_syspath, destructiveTest
|
||||
ensure_in_syspath('..')
|
||||
|
||||
SALTENVS = ('base', 'dev')
|
||||
FS_ROOT = os.path.join(integration.TMP, 'fileclient_fs_root')
|
||||
CACHE_ROOT = os.path.join(integration.TMP, 'fileclient_cache_root')
|
||||
SUBDIR = 'subdir'
|
||||
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
|
||||
|
||||
|
||||
def _get_file_roots():
|
||||
return dict(
|
||||
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
|
||||
)
|
||||
|
||||
|
||||
fileclient.__opts__ = {}
|
||||
MOCKED_OPTS = {
|
||||
'file_roots': _get_file_roots(),
|
||||
'fileserver_backend': ['roots'],
|
||||
'cachedir': CACHE_ROOT,
|
||||
'file_client': 'local',
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@destructiveTest
|
||||
class FileclientTest(integration.ModuleCase):
|
||||
'''
|
||||
Tests for the fileclient. The LocalClient is the only thing we can test as
|
||||
it is the only way we can mock the fileclient (the tests run from the
|
||||
minion process, so the master cannot be mocked from test code).
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
No need to add a dummy foo.txt to muddy up the github repo, just make
|
||||
our own fileserver root on-the-fly.
|
||||
'''
|
||||
def _new_dir(path):
|
||||
'''
|
||||
Add a new dir at ``path`` using os.makedirs. If the directory
|
||||
already exists, remove it recursively and then try to create it
|
||||
again.
|
||||
'''
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EEXIST:
|
||||
# Just in case a previous test was interrupted, remove the
|
||||
# directory and try adding it again.
|
||||
shutil.rmtree(path)
|
||||
os.makedirs(path)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Crete the FS_ROOT
|
||||
for saltenv in SALTENVS:
|
||||
saltenv_root = os.path.join(FS_ROOT, saltenv)
|
||||
# Make sure we have a fresh root dir for this saltenv
|
||||
_new_dir(saltenv_root)
|
||||
|
||||
path = os.path.join(saltenv_root, 'foo.txt')
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is a test file in the \'{0}\' saltenv.\n'
|
||||
.format(saltenv)
|
||||
)
|
||||
|
||||
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
|
||||
os.makedirs(subdir_abspath)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
path = os.path.join(subdir_abspath, subdir_file)
|
||||
with salt.utils.fopen(path, 'w') as fp_:
|
||||
fp_.write(
|
||||
'This is file \'{0}\' in subdir \'{1} from saltenv '
|
||||
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
|
||||
)
|
||||
|
||||
# Create the CACHE_ROOT
|
||||
_new_dir(CACHE_ROOT)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Remove the directories created for these tests
|
||||
'''
|
||||
shutil.rmtree(FS_ROOT)
|
||||
shutil.rmtree(CACHE_ROOT)
|
||||
|
||||
def test_cache_dir(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=None
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure entire directory is cached to correct location when an alternate
|
||||
cachedir is specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_dir(
|
||||
'salt://{0}'.format(SUBDIR),
|
||||
saltenv,
|
||||
cachedir=alt_cachedir
|
||||
)
|
||||
)
|
||||
for subdir_file in SUBDIR_FILES:
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
SUBDIR,
|
||||
subdir_file)
|
||||
# Double check that the content of the cached file
|
||||
# identifies it as being from the correct saltenv. The
|
||||
# setUp function creates the file with the name of the
|
||||
# saltenv mentioned in the file, so a simple 'in' check is
|
||||
# sufficient here. If opening the file raises an exception,
|
||||
# this is a problem, so we are not catching the exception
|
||||
# and letting it be raised so that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(subdir_file in content)
|
||||
self.assertTrue(SUBDIR in content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file(self):
|
||||
'''
|
||||
Ensure file is cached to correct location
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
|
||||
)
|
||||
cache_loc = os.path.join(
|
||||
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is an absolute path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = os.path.join(integration.TMP, 'abs_cachedir')
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
|
||||
'''
|
||||
Ensure file is cached to correct location when an alternate cachedir is
|
||||
specified and that cachedir is a relative path
|
||||
'''
|
||||
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
|
||||
patched_opts.update(MOCKED_OPTS)
|
||||
alt_cachedir = 'foo'
|
||||
|
||||
with patch.dict(fileclient.__opts__, patched_opts):
|
||||
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
|
||||
for saltenv in SALTENVS:
|
||||
self.assertTrue(
|
||||
client.cache_file('salt://foo.txt',
|
||||
saltenv,
|
||||
cachedir=alt_cachedir)
|
||||
)
|
||||
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
|
||||
alt_cachedir,
|
||||
'files',
|
||||
saltenv,
|
||||
'foo.txt')
|
||||
# Double check that the content of the cached file identifies
|
||||
# it as being from the correct saltenv. The setUp function
|
||||
# creates the file with the name of the saltenv mentioned in
|
||||
# the file, so a simple 'in' check is sufficient here. If
|
||||
# opening the file raises an exception, this is a problem, so
|
||||
# we are not catching the exception and letting it be raised so
|
||||
# that the test fails.
|
||||
with salt.utils.fopen(cache_loc) as fp_:
|
||||
content = fp_.read()
|
||||
log.debug('cache_loc = %s', cache_loc)
|
||||
log.debug('content = %s', content)
|
||||
self.assertTrue(saltenv in content)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
integration.run_tests(FileclientTest)
|
|
@ -5,4 +5,4 @@
|
|||
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
|
||||
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
|
||||
. /etc/bash_completion
|
||||
fi
|
||||
|
||||
|
||||
|
|
15
tests/integration/files/file/base/issue-31427.sls
Normal file
15
tests/integration/files/file/base/issue-31427.sls
Normal file
|
@ -0,0 +1,15 @@
|
|||
roles:
|
||||
grains.list_present:
|
||||
- name: roles
|
||||
- value:
|
||||
- elliptic
|
||||
- parabolic
|
||||
- hyperbolic
|
||||
|
||||
additional-roles:
|
||||
grains.list_present:
|
||||
- name: roles
|
||||
- value:
|
||||
- hyperbolic
|
||||
- diabolic
|
||||
|
|
@ -11,7 +11,7 @@ A:
|
|||
- connection_pass: poney
|
||||
- connection_use_unicode: True
|
||||
- connection_charset: utf8
|
||||
- saltenv:
|
||||
- saltenv:
|
||||
- LC_ALL: "en_US.utf8"
|
||||
B:
|
||||
mysql_database.absent:
|
||||
|
@ -22,7 +22,7 @@ B:
|
|||
- connection_pass: poney
|
||||
- connection_use_unicode: True
|
||||
- connection_charset: utf8
|
||||
- saltenv:
|
||||
- saltenv:
|
||||
- LC_ALL: "en_US.utf8"
|
||||
- require:
|
||||
- mysql_database: A
|
||||
|
|
|
@ -4,7 +4,7 @@ fail_stage:
|
|||
- failparse
|
||||
req_fail:
|
||||
match: '*'
|
||||
sls:
|
||||
sls:
|
||||
- fail
|
||||
require:
|
||||
- fail_stage
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue