mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'master' into master-port/53508
This commit is contained in:
commit
f8ab0f1865
285 changed files with 11889 additions and 3134 deletions
|
@ -304,6 +304,207 @@ repos:
|
|||
- --py-version=3.7
|
||||
- --platform=linux
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-linux-py3.8-zmq-requirements
|
||||
name: Linux Py3.8 ZeroMQ Requirements
|
||||
files: ^requirements/((base|zeromq|pytest)\.txt|static/linux\.in)$
|
||||
exclude: ^requirements/static/(centos-6|amzn-2018\.03|lint|cloud|docs|darwin|windows)\.in$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=linux
|
||||
- --include=requirements/base.txt
|
||||
- --include=requirements/zeromq.txt
|
||||
- --include=requirements/pytest.txt
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-darwin-py3.8-zmq-requirements
|
||||
name: Darwin Py3.8 ZeroMQ Requirements
|
||||
files: ^(pkg/osx/(req|req_ext)\.txt|requirements/((base|zeromq|pytest)\.txt|static/darwin\.in))$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=darwin
|
||||
- --include=pkg/osx/req.txt
|
||||
- --include=pkg/osx/req_ext.txt
|
||||
- --include=requirements/base.txt
|
||||
- --include=requirements/zeromq.txt
|
||||
- --include=requirements/pytest.txt
|
||||
- --passthrough-line-from-input=^pyobjc(.*)$
|
||||
|
||||
# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
|
||||
# - id: pip-tools-compile
|
||||
# alias: compile-windows-py3.8-zmq-requirements
|
||||
# name: Windows Py3.8 ZeroMQ Requirements
|
||||
# files: ^(pkg/windows/(req|req_win)\.txt|requirements/((base|zeromq|pytest)\.txt|static/windows\.in))$
|
||||
# args:
|
||||
# - -v
|
||||
# - --py-version=3.8
|
||||
# - --platform=windows
|
||||
# - --include=pkg/windows/req.txt
|
||||
# - --include=pkg/windows/req_win.txt
|
||||
# - --include=requirements/base.txt
|
||||
# - --include=requirements/zeromq.txt
|
||||
# - --include=requirements/pytest.txt
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-cloud-py3.8-requirements
|
||||
name: Cloud Py3.8 Requirements
|
||||
files: ^requirements/(static/cloud\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-doc-requirements
|
||||
name: Docs Py3.8 Requirements
|
||||
files: ^requirements/((base|zeromq|pytest)\.txt|static/docs\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=linux
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-linux-crypto-py3.8-requirements
|
||||
name: Linux Py3.8 Crypto Requirements
|
||||
files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=linux
|
||||
- --out-prefix=linux
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-darwin-crypto-py3.8-requirements
|
||||
name: Darwin Py3.8 Crypto Requirements
|
||||
files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=darwin
|
||||
- --out-prefix=darwin
|
||||
|
||||
# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
|
||||
# - id: pip-tools-compile
|
||||
# alias: compile-windows-crypto-py3.8-requirements
|
||||
# name: Windows Py3.8 Crypto Requirements
|
||||
# files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
# args:
|
||||
# - -v
|
||||
# - --py-version=3.8
|
||||
# - --platform=windows
|
||||
# - --out-prefix=windows
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-lint-py3.8-requirements
|
||||
name: Lint Py3.8 Requirements
|
||||
files: ^requirements/static/lint\.in$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.8
|
||||
- --platform=linux
|
||||
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-linux-py3.9-zmq-requirements
|
||||
name: Linux Py3.9 ZeroMQ Requirements
|
||||
files: ^requirements/((base|zeromq|pytest)\.txt|static/linux\.in)$
|
||||
exclude: ^requirements/static/(centos-6|amzn-2018\.03|lint|cloud|docs|darwin|windows)\.in$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=linux
|
||||
- --include=requirements/base.txt
|
||||
- --include=requirements/zeromq.txt
|
||||
- --include=requirements/pytest.txt
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-darwin-py3.9-zmq-requirements
|
||||
name: Darwin Py3.9 ZeroMQ Requirements
|
||||
files: ^(pkg/osx/(req|req_ext)\.txt|requirements/((base|zeromq|pytest)\.txt|static/darwin\.in))$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=darwin
|
||||
- --include=pkg/osx/req.txt
|
||||
- --include=pkg/osx/req_ext.txt
|
||||
- --include=requirements/base.txt
|
||||
- --include=requirements/zeromq.txt
|
||||
- --include=requirements/pytest.txt
|
||||
- --passthrough-line-from-input=^pyobjc(.*)$
|
||||
|
||||
# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
|
||||
# - id: pip-tools-compile
|
||||
# alias: compile-windows-py3.9-zmq-requirements
|
||||
# name: Windows Py3.9 ZeroMQ Requirements
|
||||
# files: ^(pkg/windows/(req|req_win)\.txt|requirements/((base|zeromq|pytest)\.txt|static/windows\.in))$
|
||||
# args:
|
||||
# - -v
|
||||
# - --py-version=3.9
|
||||
# - --platform=windows
|
||||
# - --include=pkg/windows/req.txt
|
||||
# - --include=pkg/windows/req_win.txt
|
||||
# - --include=requirements/base.txt
|
||||
# - --include=requirements/zeromq.txt
|
||||
# - --include=requirements/pytest.txt
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-cloud-py3.9-requirements
|
||||
name: Cloud Py3.9 Requirements
|
||||
files: ^requirements/(static/cloud\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-doc-requirements
|
||||
name: Docs Py3.9 Requirements
|
||||
files: ^requirements/((base|zeromq|pytest)\.txt|static/docs\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=linux
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-linux-crypto-py3.9-requirements
|
||||
name: Linux Py3.9 Crypto Requirements
|
||||
files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=linux
|
||||
- --out-prefix=linux
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-darwin-crypto-py3.9-requirements
|
||||
name: Darwin Py3.9 Crypto Requirements
|
||||
files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=darwin
|
||||
- --out-prefix=darwin
|
||||
|
||||
# Commented out since pywin32 and pymssql do not have packages or support for Py >= 3.8
|
||||
# - id: pip-tools-compile
|
||||
# alias: compile-windows-crypto-py3.9-requirements
|
||||
# name: Windows Py3.9 Crypto Requirements
|
||||
# files: ^requirements/(crypto\.txt|static/crypto\.in)$
|
||||
# args:
|
||||
# - -v
|
||||
# - --py-version=3.9
|
||||
# - --platform=windows
|
||||
# - --out-prefix=windows
|
||||
|
||||
- id: pip-tools-compile
|
||||
alias: compile-lint-py3.9-requirements
|
||||
name: Lint Py3.9 Requirements
|
||||
files: ^requirements/static/lint\.in$
|
||||
args:
|
||||
- -v
|
||||
- --py-version=3.9
|
||||
- --platform=linux
|
||||
|
||||
- repo: https://github.com/timothycrosley/isort
|
||||
rev: "1e78a9acf3110e1f9721feb591f89a451fc9876a"
|
||||
hooks:
|
||||
|
@ -319,7 +520,7 @@ repos:
|
|||
)$
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 19.10b0
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
# This tells pre-commit not to pass files to black.
|
||||
|
@ -331,6 +532,7 @@ repos:
|
|||
tests/kitchen/.*
|
||||
)$
|
||||
|
||||
|
||||
- repo: https://github.com/saltstack/salt-nox-pre-commit
|
||||
rev: master
|
||||
hooks:
|
||||
|
|
|
@ -13,6 +13,10 @@ Versions are `MAJOR.PATCH`.
|
|||
### Deprecated
|
||||
|
||||
### Changed
|
||||
- [#56751](https://github.com/saltstack/salt/pull/56751) - Backport 49981
|
||||
|
||||
- [#56731](https://github.com/saltstack/salt/pull/56731) - Backport #53994
|
||||
- [#56753](https://github.com/saltstack/salt/pull/56753) - Backport 51095
|
||||
|
||||
### Fixed
|
||||
- [#56237](https://github.com/saltstack/salt/pull/56237) - Fix alphabetical ordering and remove duplicates across all documentation indexes - [@myii](https://github.com/myii)
|
||||
|
@ -20,6 +24,7 @@ Versions are `MAJOR.PATCH`.
|
|||
|
||||
### Added
|
||||
- [#56627](https://github.com/saltstack/salt/pull/56627) - Add new salt-ssh set_path option
|
||||
- [#51379](https://github.com/saltstack/salt/pull/56792) - Backport 51379 : Adds .set_domain_workgroup to win_system
|
||||
|
||||
## 3000.1
|
||||
|
||||
|
@ -28,6 +33,7 @@ Versions are `MAJOR.PATCH`.
|
|||
### Deprecated
|
||||
|
||||
### Changed
|
||||
- [#56730](https://github.com/saltstack/salt/pull/56730) - Backport #52992
|
||||
|
||||
### Fixed
|
||||
|
||||
|
|
158
COPYING
158
COPYING
|
@ -1,158 +0,0 @@
|
|||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: salt
|
||||
Upstream-Contact: salt-users@googlegroups.com
|
||||
Source: https://github.com/saltstack/salt
|
||||
|
||||
Files: *
|
||||
Copyright: 2014 SaltStack Team
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2013 Joe Healy <joehealy@gmail.com>
|
||||
2012 Michael Prokop <mika@debian.org>
|
||||
2012 Christian Hofstaedtler <christian@hofstaedtler.name>
|
||||
2012 Ulrich Dangel <mru@spamt.net>
|
||||
2012 Corey Quinn <corey@sequestered.net>
|
||||
2011 Aaron Toponce <aaron.toponce@gmail.com>
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
Files: salt/auth/pam.py
|
||||
Copyright: 2007 Chris AtLee <chris@atlee.ca>
|
||||
License: MIT License
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
Files: doc/_ext/youtube.py
|
||||
Copyright: 2009 Chris Pickel <sfiera@gmail.com>
|
||||
License: BSD-2-clause
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
.
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Files: salt/ext/six.py
|
||||
Copyright: 2010-2014 Benjamin Peterson
|
||||
License: MIT License
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
Files: doc/_ext/images
|
||||
Copyright: 2013 SaltStack Team
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
.
|
||||
Files in this directory were created in-house.
|
||||
|
||||
Files: tests/utils/cptestcase.py
|
||||
Copyright: (c) 2014 Adam Hajari
|
||||
The MIT License (MIT)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
353
LICENSE
353
LICENSE
|
@ -1,6 +1,192 @@
|
|||
Salt - Remote execution system
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright 2014-2019 SaltStack Team
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,3 +200,166 @@
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
===========================================================================
|
||||
|
||||
Below is a summary of the licensing used by external modules that are
|
||||
bundled with SaltStack.
|
||||
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: salt
|
||||
Upstream-Contact: salt-users@googlegroups.com
|
||||
Source: https://github.com/saltstack/salt
|
||||
|
||||
Files: *
|
||||
Copyright: 2014 SaltStack Team
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2013 Joe Healy <joehealy@gmail.com>
|
||||
2012 Michael Prokop <mika@debian.org>
|
||||
2012 Christian Hofstaedtler <christian@hofstaedtler.name>
|
||||
2012 Ulrich Dangel <mru@spamt.net>
|
||||
2012 Corey Quinn <corey@sequestered.net>
|
||||
2011 Aaron Toponce <aaron.toponce@gmail.com>
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
Files: salt/auth/pam.py
|
||||
Copyright: 2007 Chris AtLee <chris@atlee.ca>
|
||||
License: MIT License
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
Files: doc/_ext/youtube.py
|
||||
Copyright: 2009 Chris Pickel <sfiera@gmail.com>
|
||||
License: BSD-2-clause
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
.
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Files: salt/ext/six.py
|
||||
Copyright: 2010-2014 Benjamin Peterson
|
||||
License: MIT License
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
Files: doc/_ext/images
|
||||
Copyright: 2013 SaltStack Team
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License, Version 2.0 can be
|
||||
found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
.
|
||||
Files in this directory were created in-house.
|
||||
|
||||
Files: tests/utils/cptestcase.py
|
||||
Copyright: (c) 2014 Adam Hajari
|
||||
The MIT License (MIT)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
|
@ -78,3 +78,10 @@ services`_ offerings.
|
|||
.. _PyCryptodome: https://pypi.org/project/pycryptodome/
|
||||
.. _Issue #52674: https://github.com/saltstack/salt/issues/52674
|
||||
.. _Issue #54115: https://github.com/saltstack/salt/issues/54115
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
SaltStack is licensed by the SaltStack Team under the Apache 2.0 license. Please see the
|
||||
LICENSE file for the full text of the Apache license, followed by a full summary
|
||||
of the licensing used by external modules.
|
||||
|
|
1
changelog/56186.fixed
Normal file
1
changelog/56186.fixed
Normal file
|
@ -0,0 +1 @@
|
|||
Pillar data is correctly included from `init.sls` file.
|
1
changelog/7424.added
Normal file
1
changelog/7424.added
Normal file
|
@ -0,0 +1 @@
|
|||
Added `validate` to tls module.
|
1
changelog/8875.added
Normal file
1
changelog/8875.added
Normal file
|
@ -0,0 +1 @@
|
|||
Pillar relative includes.
|
|
@ -677,7 +677,9 @@
|
|||
|
||||
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
||||
# used by the state compiler.
|
||||
#master_roots: /srv/salt-master
|
||||
#master_roots:
|
||||
# base:
|
||||
# - /srv/salt-master
|
||||
|
||||
# When using multiple environments, each with their own top file, the
|
||||
# default behaviour is an unordered merge. To prevent top files from
|
||||
|
|
|
@ -161,6 +161,8 @@ MOCK_MODULES = [
|
|||
"jnpr.junos.utils.config",
|
||||
"jnpr.junos.utils.sw",
|
||||
"keyring",
|
||||
"kubernetes",
|
||||
"kubernetes.config",
|
||||
"libvirt",
|
||||
"lxml",
|
||||
"lxml.etree",
|
||||
|
|
|
@ -284897,7 +284897,7 @@ new
|
|||
all
|
||||
.TP
|
||||
.B note
|
||||
If you see the following error, you\(aqll need to upgrade \fBrequests\fP to atleast 2.4.2
|
||||
If you see the following error, you\(aqll need to upgrade \fBrequests\fP to at least 2.4.2
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
|
|
|
@ -2654,14 +2654,18 @@ nothing is ignored.
|
|||
``master_roots``
|
||||
----------------
|
||||
|
||||
Default: ``/srv/salt-master``
|
||||
Default: ``''``
|
||||
|
||||
A master-only copy of the :conf_master:`file_roots` dictionary, used by the
|
||||
state compiler.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
master_roots: /srv/salt-master
|
||||
master_roots:
|
||||
base:
|
||||
- /srv/salt-master
|
||||
|
||||
roots: Master's Local File Server
|
||||
---------------------------------
|
||||
|
|
|
@ -66,6 +66,7 @@ state modules
|
|||
boto_sqs
|
||||
boto_vpc
|
||||
bower
|
||||
btrfs
|
||||
cabal
|
||||
ceph
|
||||
chef
|
||||
|
|
6
doc/ref/states/all/salt.states.btrfs.rst
Normal file
6
doc/ref/states/all/salt.states.btrfs.rst
Normal file
|
@ -0,0 +1,6 @@
|
|||
=================
|
||||
salt.states.btrfs
|
||||
=================
|
||||
|
||||
.. automodule:: salt.states.btrfs
|
||||
:members:
|
|
@ -232,6 +232,10 @@ There are several corresponding requisite_any statements:
|
|||
* ``onchanges_any``
|
||||
* ``onfail_any``
|
||||
|
||||
Lastly, onfail has one special ``onfail_all`` form to account for when `AND`
|
||||
logic is desired instead of the default `OR` logic of onfail/onfail_any (which
|
||||
are equivalent).
|
||||
|
||||
All of the requisites define specific relationships and always work with the
|
||||
dependency logic defined above.
|
||||
|
||||
|
@ -374,6 +378,63 @@ exactly like the ``require`` requisite (the watching state will execute if
|
|||
|
||||
.. note::
|
||||
|
||||
If the watching state ``changes`` key contains values, then ``mod_watch``
|
||||
will not be called. If you're using ``watch`` or ``watch_in`` then it's a
|
||||
good idea to have a state that only enforces one attribute - such as
|
||||
splitting out ``service.running`` into its own state and have
|
||||
``service.enabled`` in another.
|
||||
|
||||
One common source of confusion is expecting ``mod_watch`` to be called for
|
||||
every necessary change. You might be tempted to write something like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
httpd:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: httpd-config
|
||||
|
||||
httpd-config:
|
||||
file.managed:
|
||||
- name: /etc/httpd/conf/httpd.conf
|
||||
- source: salt://httpd/files/apache.conf
|
||||
|
||||
If your service is already running but not enabled, you might expect that Salt
|
||||
will be able to tell that since the config file changed your service needs to
|
||||
be restarted. This is not the case. Because the service needs to be enabled,
|
||||
that change will be made and ``mod_watch`` will never be triggered. In this
|
||||
case, changes to your ``apache.conf`` will fail to be loaded. If you want to
|
||||
ensure that your service always reloads the correct way to handle this is
|
||||
either ensure that your service is not running before applying your state, or
|
||||
simply make sure that ``service.running`` is in a state on its own:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
enable-httpd:
|
||||
service.enabled:
|
||||
- name: httpd
|
||||
|
||||
start-httpd:
|
||||
service.running:
|
||||
- name: httpd
|
||||
- watch:
|
||||
- file: httpd-config
|
||||
|
||||
httpd-config:
|
||||
file.managed:
|
||||
- name: /etc/httpd/conf/httpd.conf
|
||||
- source: salt://httpd/files/apache.conf
|
||||
|
||||
Now that ``service.running`` is its own state, changes to ``service.enabled``
|
||||
will no longer prevent ``mod_watch`` from getting triggered, so your ``httpd``
|
||||
service will get restarted like you want.
|
||||
|
||||
.. _requisites-listen:
|
||||
|
||||
listen
|
||||
~~~~~~
|
||||
|
||||
Not all state modules contain ``mod_watch``. If ``mod_watch`` is absent
|
||||
from the watching state module, the ``watch`` requisite behaves exactly
|
||||
like a ``require`` requisite.
|
||||
|
@ -521,6 +582,46 @@ The ``onfail`` requisite is applied in the same way as ``require`` as ``watch``:
|
|||
- onfail:
|
||||
- mount: primary_mount
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
build_site:
|
||||
cmd.run:
|
||||
- name: /srv/web/app/build_site
|
||||
|
||||
notify-build_failure:
|
||||
hipchat.send_message:
|
||||
- room_id: 123456
|
||||
- message: "Building website fail on {{ salt.grains.get('id') }}"
|
||||
|
||||
|
||||
The default behavior of the ``onfail`` when multiple requisites are listed is
|
||||
the opposite of other requisites in the salt state engine, it acts by default
|
||||
like ``any()`` instead of ``all()``. This means that when you list multiple
|
||||
onfail requisites on a state, if *any* fail the requisite will be satisfied.
|
||||
If you instead need *all* logic to be applied, you can use ``onfail_all``
|
||||
form:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
test_site_a:
|
||||
cmd.run:
|
||||
- name: ping -c1 10.0.0.1
|
||||
|
||||
test_site_b:
|
||||
cmd.run:
|
||||
- name: ping -c1 10.0.0.2
|
||||
|
||||
notify_site_down:
|
||||
hipchat.send_message:
|
||||
- room_id: 123456
|
||||
- message: "Both primary and backup sites are down!"
|
||||
- onfail_all:
|
||||
- cmd: test_site_a
|
||||
- cmd: test_site_b
|
||||
|
||||
In this contrived example `notify_site_down` will run when both 10.0.0.1 and
|
||||
10.0.0.2 fail to respond to ping.
|
||||
|
||||
.. note::
|
||||
|
||||
Setting failhard (:ref:`globally <global-failhard>` or in
|
||||
|
@ -535,6 +636,8 @@ The ``onfail`` requisite is applied in the same way as ``require`` as ``watch``:
|
|||
Beginning in the ``2016.11.0`` release of Salt, ``onfail`` uses OR logic for
|
||||
multiple listed ``onfail`` requisites. Prior to the ``2016.11.0`` release,
|
||||
``onfail`` used AND logic. See `Issue #22370`_ for more information.
|
||||
Beginning in the ``Neon`` release of Salt, a new ``onfail_all`` requisite
|
||||
form is available if AND logic is desired.
|
||||
|
||||
.. _Issue #22370: https://github.com/saltstack/salt/issues/22370
|
||||
|
||||
|
|
|
@ -69,16 +69,6 @@ dynamic modules when states are run. To disable this behavior set
|
|||
When dynamic modules are autoloaded via states, only the modules defined in the
|
||||
same saltenvs as the states currently being run.
|
||||
|
||||
Also it is possible to use the explicit ``saltutil.sync_*`` :py:mod:`state functions <salt.states.saltutil>`
|
||||
to sync the modules (previously it was necessary to use the ``module.run`` state):
|
||||
|
||||
.. code-block::yaml
|
||||
|
||||
synchronize_modules:
|
||||
saltutil.sync_modules:
|
||||
- refresh: True
|
||||
|
||||
|
||||
Sync Via the saltutil Module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -350,7 +340,7 @@ SDB
|
|||
|
||||
* :ref:`Writing SDB Modules <sdb-writing-modules>`
|
||||
|
||||
SDB is a way to store data that's not associated with a minion. See
|
||||
SDB is a way to store data that's not associated with a minion. See
|
||||
:ref:`Storing Data in Other Databases <sdb>`.
|
||||
|
||||
Serializer
|
||||
|
@ -394,6 +384,12 @@ pkgfiles modules handle the actual installation.
|
|||
SSH Wrapper
|
||||
-----------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
ssh_wrapper
|
||||
|
||||
Replacement execution modules for :ref:`Salt SSH <salt-ssh>`.
|
||||
|
||||
Thorium
|
||||
|
@ -420,7 +416,7 @@ the state system.
|
|||
Util
|
||||
----
|
||||
|
||||
Just utility modules to use with other modules via ``__utils__`` (see
|
||||
Just utility modules to use with other modules via ``__utils__`` (see
|
||||
:ref:`Dunder Dictionaries <dunder-dictionaries>`).
|
||||
|
||||
Wheel
|
||||
|
|
63
doc/topics/development/modules/ssh_wrapper.rst
Normal file
63
doc/topics/development/modules/ssh_wrapper.rst
Normal file
|
@ -0,0 +1,63 @@
|
|||
.. _ssh-wrapper:
|
||||
|
||||
===========
|
||||
SSH Wrapper
|
||||
===========
|
||||
|
||||
Salt-SSH Background
|
||||
===================
|
||||
|
||||
Salt-SSH works by creating a tar ball of salt, a bunch of python modules, and a generated
|
||||
short minion config. It then copies this onto the destination host over ssh, then
|
||||
uses that host's local python install to run ``salt-client --local`` with any requested modules.
|
||||
It does not automatically copy over states or cache files and since it is uses a local file_client,
|
||||
modules that rely on :py:func:`cp.cache* <salt.modules.cp>` functionality do not work.
|
||||
|
||||
SSH Wrapper modules
|
||||
===================
|
||||
|
||||
To support cp modules or other functionality which might not otherwise work in the remote environment,
|
||||
a wrapper module can be created. These modules are run from the salt-master initiating the salt-ssh
|
||||
command and can include logic to support the needed functionality. SSH Wrapper modules are located in
|
||||
/salt/client/ssh/wrapper/ and are named the same as the execution module being extended. Any functions
|
||||
defined inside of the wrapper module are called from the ``salt-ssh module.function argument``
|
||||
command rather than executing on the minion.
|
||||
|
||||
State Module example
|
||||
--------------------
|
||||
|
||||
Running salt states on an salt-ssh minion, obviously requires the state files themselves. To support this,
|
||||
a state module wrapper script exists at salt/client/ssh/wrapper/state.py, and includes standard state
|
||||
functions like :py:func:`apply <salt.modules.state.apply>`, :py:func:`sls <salt.modules.state.sls>`,
|
||||
and :py:func:`highstate <salt.modules.state.highstate>`. When executing ``salt-ssh minion state.highstate``,
|
||||
these wrapper functions are used and include the logic to walk the low_state output for that minion to
|
||||
determine files used, gather needed files, tar them together, transfer the tar file to the minion over
|
||||
ssh, and run a state on the ssh minion. This state then extracts the tar file, applies the needed states
|
||||
and data, and cleans up the transferred files.
|
||||
|
||||
Wrapper Handling
|
||||
----------------
|
||||
|
||||
From the wrapper script any invocations of ``__salt__['some.module']()`` do not run on the master
|
||||
which is running the wrapper, but instead magically are invoked on the minion over ssh.
|
||||
Should the function being called exist in the wrapper, the wrapper function will be
|
||||
used instead.
|
||||
|
||||
One way of supporting this workflow may be to create a wrapper function which performs the needed file
|
||||
copy operations. Now that files are resident on the ssh minion, the next step is to run the original
|
||||
execution module function. But since that function name was already overridden by the wrapper, a
|
||||
function alias can be created in the original execution module, which can then be called from the
|
||||
wrapper.
|
||||
|
||||
Example
|
||||
```````
|
||||
|
||||
The saltcheck module needs sls and tst files on the minion to function. The invocation of
|
||||
:py:func:`saltcheck.run_state_tests <salt.modules.saltcheck.run_state_tests>` is run from
|
||||
the wrapper module, and is responsible for performing the needed file copy. The
|
||||
:py:func:`saltcheck <salt.modules.saltcheck>` execution module includes an alias line of
|
||||
``run_state_tests_ssh = salt.utils.functools.alias_function(run_state_tests, 'run_state_tests_ssh')``
|
||||
which creates an alias of ``run_state_tests`` with the name ``run_state_tests_ssh``. At the end of
|
||||
the ``run_state_tests`` function in the wrapper module, it then calls
|
||||
``__salt__['saltcheck.run_state_tests_ssh']()``. Since this function does not exist in the wrapper script,
|
||||
the call is made on the remote minion, which then having the needed files, runs as expected.
|
|
@ -250,7 +250,7 @@ done at the CLI:
|
|||
|
||||
caller = salt.client.Caller()
|
||||
|
||||
ret = called.cmd('event.send',
|
||||
ret = caller.cmd('event.send',
|
||||
'myco/event/success'
|
||||
{ 'success': True,
|
||||
'message': "It works!" })
|
||||
|
|
|
@ -87,7 +87,7 @@ the context into the included file is required:
|
|||
.. code-block:: jinja
|
||||
|
||||
{% from 'lib.sls' import test with context %}
|
||||
|
||||
|
||||
Includes must use full paths, like so:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
@ -649,6 +649,56 @@ Returns:
|
|||
1, 4
|
||||
|
||||
|
||||
.. jinja_ref:: method_call
|
||||
|
||||
``method_call``
|
||||
---------------
|
||||
|
||||
.. versionadded:: Sodium
|
||||
|
||||
Returns a result of object's method call.
|
||||
|
||||
Example #1:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ [1, 2, 1, 3, 4] | method_call('index', 1, 1, 3) }}
|
||||
|
||||
Returns:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
2
|
||||
|
||||
This filter can be used with the `map filter`_ to apply object methods without
|
||||
using loop constructs or temporary variables.
|
||||
|
||||
Example #2:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{% set host_list = ['web01.example.com', 'db01.example.com'] %}
|
||||
{% set host_list_split = [] %}
|
||||
{% for item in host_list %}
|
||||
{% do host_list_split.append(item.split('.', 1)) %}
|
||||
{% endfor %}
|
||||
{{ host_list_split }}
|
||||
|
||||
Example #3:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ host_list|map('method_call', 'split', '.', 1)|list }}
|
||||
|
||||
Return of examples #2 and #3:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
[[web01, example.com], [db01, example.com]]
|
||||
|
||||
.. _`map filter`: http://jinja.pocoo.org/docs/2.10/templates/#map
|
||||
|
||||
|
||||
.. jinja_ref:: is_sorted
|
||||
|
||||
``is_sorted``
|
||||
|
|
|
@ -17,6 +17,16 @@ The old syntax for the mine_function - as a dict, or as a list with dicts that
|
|||
contain more than exactly one key - is still supported but discouraged in favor
|
||||
of the more uniform syntax of module.run.
|
||||
|
||||
State Execution Module
|
||||
======================
|
||||
|
||||
The :mod:`state.test <salt.modules.state.test>` function
|
||||
can be used to test a state on a minion. This works by executing the
|
||||
:mod:`state.apply <salt.modules.state.apply>` function while forcing the ``test`` kwarg
|
||||
to ``True`` so that the ``state.apply`` function is not required to be called by the
|
||||
user directly. This also allows you to add the ``state.test`` function to a minion's
|
||||
``minion_blackout_whitelist`` pillar if you wish to be able to test a state while a
|
||||
minion is in blackout.
|
||||
|
||||
New Grains
|
||||
==========
|
||||
|
|
|
@ -180,6 +180,7 @@ Results can then be analyzed with `kcachegrind`_ or similar tool.
|
|||
|
||||
.. _`kcachegrind`: http://kcachegrind.sourceforge.net/html/Home.html
|
||||
|
||||
Make sure you have yappi installed.
|
||||
|
||||
On Windows, in the absense of kcachegrind, a simple file-based workflow to create
|
||||
profiling graphs could use `gprof2dot`_, `graphviz`_ and this batch file:
|
||||
|
|
|
@ -272,7 +272,7 @@ system, such as a database.
|
|||
data using a returner (instead of the local job cache on disk).
|
||||
|
||||
If a master has many accepted keys, it may take a long time to publish a job
|
||||
because the master much first determine the matching minions and deliver
|
||||
because the master must first determine the matching minions and deliver
|
||||
that information back to the waiting client before the job can be published.
|
||||
|
||||
To mitigate this, a key cache may be enabled. This will reduce the load
|
||||
|
|
14
noxfile.py
14
noxfile.py
|
@ -45,7 +45,7 @@ SITECUSTOMIZE_DIR = os.path.join(REPO_ROOT, "tests", "support", "coverage")
|
|||
IS_DARWIN = sys.platform.lower().startswith("darwin")
|
||||
IS_WINDOWS = sys.platform.lower().startswith("win")
|
||||
# Python versions to run against
|
||||
_PYTHON_VERSIONS = ("2", "2.7", "3", "3.4", "3.5", "3.6", "3.7")
|
||||
_PYTHON_VERSIONS = ("2", "2.7", "3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9")
|
||||
|
||||
# Nox options
|
||||
# Reuse existing virtualenvs
|
||||
|
@ -889,9 +889,11 @@ def _pytest(session, coverage, cmd_args):
|
|||
|
||||
try:
|
||||
if coverage is True:
|
||||
_run_with_coverage(session, "coverage", "run", "-m", "py.test", *cmd_args)
|
||||
_run_with_coverage(
|
||||
session, "python", "-m", "coverage", "run", "-m", "pytest", *cmd_args
|
||||
)
|
||||
else:
|
||||
session.run("py.test", *cmd_args, env=env)
|
||||
session.run("python", "-m", "pytest", *cmd_args, env=env)
|
||||
except CommandFailed: # pylint: disable=try-except-raise
|
||||
# Not rerunning failed tests for now
|
||||
raise
|
||||
|
@ -905,9 +907,11 @@ def _pytest(session, coverage, cmd_args):
|
|||
cmd_args[idx] = parg.replace(".xml", "-rerun-failed.xml")
|
||||
cmd_args.append("--lf")
|
||||
if coverage is True:
|
||||
_run_with_coverage(session, "coverage", "run", "-m", "py.test", *cmd_args)
|
||||
_run_with_coverage(
|
||||
session, "python", "-m", "coverage", "run", "-m", "pytest", *cmd_args
|
||||
)
|
||||
else:
|
||||
session.run("py.test", *cmd_args, env=env)
|
||||
session.run("python", "-m", "pytest", *cmd_args, env=env)
|
||||
# pylint: enable=unreachable
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-r req_win.txt
|
||||
backports-abc==0.5; python_version < '3.0'
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
backports.ssl-match-hostname==3.7.0.1; python_version < '3.7'
|
||||
certifi
|
||||
cffi==1.12.2
|
||||
CherryPy==17.4.1
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
mock
|
||||
boto
|
||||
boto3
|
||||
moto
|
||||
SaltPyLint>=v2017.6.22
|
||||
apache-libcloud
|
||||
virtualenv
|
||||
|
||||
# Needed for archive, which is gated for Redhat
|
||||
# rarfile
|
||||
# Needed for keystone
|
||||
# python-keystoneclient
|
|
@ -1,2 +1,2 @@
|
|||
pywin32==224
|
||||
pywin32==227
|
||||
WMI==1.4.9
|
||||
|
|
|
@ -10,7 +10,7 @@ atomicwrites==1.3.0 # via pytest
|
|||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
backports.ssl-match-hostname==3.7.0.1 ; python_version < "3.7"
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
|
@ -88,7 +88,7 @@ python-jose==2.0.2 # via moto
|
|||
pythonnet==2.3.0
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pywin32==224
|
||||
pywin32==227
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
|
|
|
@ -10,7 +10,7 @@ atomicwrites==1.3.0 # via pytest
|
|||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
backports.ssl-match-hostname==3.7.0.1 ; python_version < "3.7"
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
|
@ -87,7 +87,7 @@ python-jose==2.0.2 # via moto
|
|||
pythonnet==2.3.0
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pywin32==224
|
||||
pywin32==227
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
|
|
|
@ -10,7 +10,6 @@ atomicwrites==1.3.0 # via pytest
|
|||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
|
@ -87,7 +86,7 @@ python-jose==2.0.2 # via moto
|
|||
pythonnet==2.3.0
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pywin32==224
|
||||
pywin32==227
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
|
|
115
requirements/static/py3.8/cloud.txt
Normal file
115
requirements/static/py3.8/cloud.txt
Normal file
|
@ -0,0 +1,115 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/cloud.txt -v requirements/static/cloud.in
|
||||
#
|
||||
adal==1.2.1 # via azure-datalake-store, msrestazure
|
||||
asn1crypto==0.24.0 # via cryptography
|
||||
azure-applicationinsights==0.1.0 # via azure
|
||||
azure-batch==4.1.3 # via azure
|
||||
azure-common==1.1.18 # via azure-applicationinsights, azure-batch, azure-cosmosdb-table, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-loganalytics, azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cdn, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-compute, azure-mgmt-consumption, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-analytics, azure-mgmt-datalake-store, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-eventhub, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-media, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-network, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-rdbms, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web, azure-servicebus, azure-servicefabric, azure-servicemanagement-legacy, azure-storage-blob, azure-storage-common, azure-storage-file, azure-storage-queue
|
||||
azure-cosmosdb-nspkg==2.0.2 # via azure-cosmosdb-table
|
||||
azure-cosmosdb-table==1.0.5 # via azure
|
||||
azure-datalake-store==0.0.44 # via azure
|
||||
azure-eventgrid==1.2.0 # via azure
|
||||
azure-graphrbac==0.40.0 # via azure
|
||||
azure-keyvault==1.1.0 # via azure
|
||||
azure-loganalytics==0.1.0 # via azure
|
||||
azure-mgmt-advisor==1.0.1 # via azure-mgmt
|
||||
azure-mgmt-applicationinsights==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-authorization==0.50.0 # via azure-mgmt
|
||||
azure-mgmt-batch==5.0.1 # via azure-mgmt
|
||||
azure-mgmt-batchai==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-billing==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-cdn==3.1.0 # via azure-mgmt
|
||||
azure-mgmt-cognitiveservices==3.0.0 # via azure-mgmt
|
||||
azure-mgmt-commerce==1.0.1 # via azure-mgmt
|
||||
azure-mgmt-compute==4.6.0 # via azure-mgmt
|
||||
azure-mgmt-consumption==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-containerinstance==1.4.1 # via azure-mgmt
|
||||
azure-mgmt-containerregistry==2.7.0 # via azure-mgmt
|
||||
azure-mgmt-containerservice==4.4.0 # via azure-mgmt
|
||||
azure-mgmt-cosmosdb==0.4.1 # via azure-mgmt
|
||||
azure-mgmt-datafactory==0.6.0 # via azure-mgmt
|
||||
azure-mgmt-datalake-analytics==0.6.0 # via azure-mgmt
|
||||
azure-mgmt-datalake-nspkg==3.0.1 # via azure-mgmt-datalake-analytics, azure-mgmt-datalake-store
|
||||
azure-mgmt-datalake-store==0.5.0 # via azure-mgmt
|
||||
azure-mgmt-datamigration==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-devspaces==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-devtestlabs==2.2.0 # via azure-mgmt
|
||||
azure-mgmt-dns==2.1.0 # via azure-mgmt
|
||||
azure-mgmt-eventgrid==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-eventhub==2.5.0 # via azure-mgmt
|
||||
azure-mgmt-hanaonazure==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-iotcentral==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-iothub==0.5.0 # via azure-mgmt
|
||||
azure-mgmt-iothubprovisioningservices==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-keyvault==1.1.0 # via azure-mgmt
|
||||
azure-mgmt-loganalytics==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-logic==3.0.0 # via azure-mgmt
|
||||
azure-mgmt-machinelearningcompute==0.4.1 # via azure-mgmt
|
||||
azure-mgmt-managementgroups==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-managementpartner==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-maps==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-marketplaceordering==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-media==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-monitor==0.5.2 # via azure-mgmt
|
||||
azure-mgmt-msi==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-network==2.6.0 # via azure-mgmt
|
||||
azure-mgmt-notificationhubs==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-nspkg==3.0.2 # via azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-consumption, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-nspkg, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web
|
||||
azure-mgmt-policyinsights==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-powerbiembedded==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-rdbms==1.8.0 # via azure-mgmt
|
||||
azure-mgmt-recoveryservices==0.3.0 # via azure-mgmt
|
||||
azure-mgmt-recoveryservicesbackup==0.3.0 # via azure-mgmt
|
||||
azure-mgmt-redis==5.0.0 # via azure-mgmt
|
||||
azure-mgmt-relay==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-reservations==0.2.1 # via azure-mgmt
|
||||
azure-mgmt-resource==2.1.0 # via azure-mgmt
|
||||
azure-mgmt-scheduler==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-search==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-servicebus==0.5.3 # via azure-mgmt
|
||||
azure-mgmt-servicefabric==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-signalr==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-sql==0.9.1 # via azure-mgmt
|
||||
azure-mgmt-storage==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-subscription==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-trafficmanager==0.50.0 # via azure-mgmt
|
||||
azure-mgmt-web==0.35.0 # via azure-mgmt
|
||||
azure-mgmt==4.0.0 # via azure
|
||||
azure-nspkg==3.0.2 # via azure-applicationinsights, azure-batch, azure-cosmosdb-nspkg, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-loganalytics, azure-mgmt-nspkg, azure-servicebus, azure-servicefabric, azure-servicemanagement-legacy
|
||||
azure-servicebus==0.21.1 # via azure
|
||||
azure-servicefabric==6.3.0.0 # via azure
|
||||
azure-servicemanagement-legacy==0.20.6 # via azure
|
||||
azure-storage-blob==1.5.0 # via azure
|
||||
azure-storage-common==1.4.0 # via azure-cosmosdb-table, azure-storage-blob, azure-storage-file, azure-storage-queue
|
||||
azure-storage-file==1.4.0 # via azure
|
||||
azure-storage-queue==1.4.0 # via azure
|
||||
azure==4.0.0
|
||||
certifi==2019.3.9 # via msrest, requests
|
||||
cffi==1.12.2 # via azure-datalake-store, cryptography
|
||||
chardet==3.0.4 # via requests
|
||||
cryptography==2.6.1 # via adal, azure-cosmosdb-table, azure-keyvault, azure-storage-common, requests-ntlm, smbprotocol
|
||||
idna==2.8 # via requests
|
||||
isodate==0.6.0 # via msrest
|
||||
msrest==0.6.6 # via azure-applicationinsights, azure-eventgrid, azure-keyvault, azure-loganalytics, azure-mgmt-cdn, azure-mgmt-compute, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-dns, azure-mgmt-eventhub, azure-mgmt-keyvault, azure-mgmt-media, azure-mgmt-network, azure-mgmt-rdbms, azure-mgmt-resource, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-servicefabric, msrestazure
|
||||
msrestazure==0.6.0 # via azure-batch, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cdn, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-compute, azure-mgmt-consumption, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-analytics, azure-mgmt-datalake-store, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-eventhub, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-media, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-network, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-rdbms, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web
|
||||
netaddr==0.7.19
|
||||
ntlm-auth==1.3.0 # via requests-ntlm, smbprotocol
|
||||
oauthlib==3.0.1 # via requests-oauthlib
|
||||
profitbricks==4.1.3
|
||||
pyasn1==0.4.5 # via smbprotocol
|
||||
pycparser==2.19 # via cffi
|
||||
pyjwt==1.7.1 # via adal
|
||||
pypsexec==0.1.0
|
||||
python-dateutil==2.8.0 # via adal, azure-cosmosdb-table, azure-storage-common
|
||||
pywinrm==0.3.0
|
||||
requests-ntlm==1.1.0 # via pywinrm
|
||||
requests-oauthlib==1.2.0 # via msrest
|
||||
requests==2.21.0 # via adal, azure-cosmosdb-table, azure-datalake-store, azure-keyvault, azure-servicebus, azure-servicemanagement-legacy, azure-storage-common, msrest, profitbricks, pywinrm, requests-ntlm, requests-oauthlib
|
||||
six==1.12.0 # via cryptography, isodate, profitbricks, pypsexec, python-dateutil, pywinrm, smbprotocol
|
||||
smbprotocol==0.1.1 # via pypsexec
|
||||
urllib3==1.24.2 # via requests
|
||||
xmltodict==0.12.0 # via pywinrm
|
8
requirements/static/py3.8/darwin-crypto.txt
Normal file
8
requirements/static/py3.8/darwin-crypto.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/darwin-crypto.txt -v requirements/static/crypto.in
|
||||
#
|
||||
m2crypto==0.35.2
|
||||
pycryptodomex==3.9.0
|
123
requirements/static/py3.8/darwin.txt
Normal file
123
requirements/static/py3.8/darwin.txt
Normal file
|
@ -0,0 +1,123 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/darwin.txt -v pkg/osx/req.txt pkg/osx/req_ext.txt requirements/base.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/darwin.in
|
||||
#
|
||||
apache-libcloud==2.4.0
|
||||
appdirs==1.4.3 # via virtualenv
|
||||
argh==0.26.2 # via watchdog
|
||||
asn1crypto==1.3.0 # via certvalidator, cryptography, oscrypto
|
||||
atomicwrites==1.3.0 # via pytest
|
||||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
backports.ssl_match_hostname==3.7.0.1
|
||||
bcrypt==3.1.6 # via paramiko
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
cachetools==3.1.0 # via google-auth
|
||||
certifi==2019.3.9
|
||||
certvalidator==0.11.1 # via vcert
|
||||
cffi==1.12.2
|
||||
chardet==3.0.4 # via requests
|
||||
cheetah3==3.1.0
|
||||
cheroot==6.5.5 # via cherrypy
|
||||
cherrypy==17.4.1
|
||||
click==7.0
|
||||
clustershell==1.8.1
|
||||
contextlib2==0.5.5 # via cherrypy
|
||||
croniter==0.3.29
|
||||
cryptography==2.6.1
|
||||
distlib==0.3.0 # via virtualenv
|
||||
dnspython==1.16.0
|
||||
docker-pycreds==0.4.0 # via docker
|
||||
docker==3.7.2
|
||||
docutils==0.14 # via botocore
|
||||
ecdsa==0.13.3 # via python-jose
|
||||
enum34==1.1.6
|
||||
filelock==3.0.12 # via virtualenv
|
||||
future==0.17.1 # via python-jose
|
||||
genshi==0.7.3
|
||||
gitdb2==2.0.5 # via gitpython
|
||||
gitpython==2.1.15
|
||||
google-auth==1.6.3 # via kubernetes
|
||||
idna==2.8
|
||||
ipaddress==1.0.22
|
||||
jaraco.functools==2.0 # via tempora
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4
|
||||
jsondiff==1.1.1 # via moto
|
||||
jsonpickle==1.1 # via aws-xray-sdk
|
||||
jsonschema==2.6.0
|
||||
junos-eznc==2.2.0
|
||||
jxmlease==1.0.1
|
||||
keyring==5.7.1
|
||||
kubernetes==3.0.0
|
||||
linode-python==1.1.1
|
||||
lxml==4.3.3 # via junos-eznc, ncclient
|
||||
mako==1.0.7
|
||||
markupsafe==1.1.1
|
||||
mock==3.0.5
|
||||
more-itertools==5.0.0
|
||||
moto==1.3.7
|
||||
msgpack-python==0.5.6
|
||||
msgpack==0.5.6
|
||||
ncclient==0.6.4 # via junos-eznc
|
||||
netaddr==0.7.19 # via junos-eznc
|
||||
oscrypto==1.2.0 # via certvalidator
|
||||
packaging==19.2 # via pytest
|
||||
paramiko==2.4.2 # via junos-eznc, ncclient, scp
|
||||
pathtools==0.1.2 # via watchdog
|
||||
pluggy==0.13.1 # via pytest
|
||||
portend==2.4 # via cherrypy
|
||||
psutil==5.6.6
|
||||
py==1.8.0 # via pytest
|
||||
pyaml==19.4.1 # via moto
|
||||
pyasn1-modules==0.2.4 # via google-auth
|
||||
pyasn1==0.4.5
|
||||
pycparser==2.19
|
||||
pycryptodome==3.8.1
|
||||
pynacl==1.3.0 # via paramiko
|
||||
pyopenssl==19.0.0
|
||||
pyparsing==2.4.5 # via packaging
|
||||
pyserial==3.4 # via junos-eznc
|
||||
pytest-helpers-namespace==2019.1.8
|
||||
pytest-salt-runtests-bridge==2019.7.10
|
||||
pytest-salt==2019.12.27
|
||||
pytest-tempdir==2019.10.12
|
||||
pytest==4.6.6
|
||||
python-dateutil==2.8.0
|
||||
python-etcd==0.4.5
|
||||
python-gnupg==0.4.4
|
||||
python-jose==2.0.2 # via moto
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
responses==0.10.6 # via moto
|
||||
rfc3987==1.3.8
|
||||
rsa==4.0 # via google-auth
|
||||
s3transfer==0.2.0 # via boto3
|
||||
salttesting==2017.6.1
|
||||
scp==0.13.2 # via junos-eznc
|
||||
setproctitle==1.1.10
|
||||
six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, mock, more-itertools, moto, ncclient, packaging, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, tempora, vcert, virtualenv, websocket-client
|
||||
smmap2==2.0.5 # via gitdb2
|
||||
strict-rfc3339==0.7
|
||||
tempora==1.14.1 # via portend
|
||||
timelib==0.2.4
|
||||
urllib3==1.24.2 # via botocore, kubernetes, python-etcd, requests
|
||||
vcert==0.7.3
|
||||
virtualenv==20.0.10
|
||||
vultr==1.0.1
|
||||
watchdog==0.9.0
|
||||
wcwidth==0.1.7 # via pytest
|
||||
websocket-client==0.40.0 # via docker, kubernetes
|
||||
werkzeug==0.15.6 # via moto
|
||||
wrapt==1.11.1 # via aws-xray-sdk
|
||||
xmltodict==0.12.0 # via moto
|
||||
yamlordereddictloader==0.4.0
|
||||
zc.lockfile==1.4 # via cherrypy
|
30
requirements/static/py3.8/docs.txt
Normal file
30
requirements/static/py3.8/docs.txt
Normal file
|
@ -0,0 +1,30 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/docs.txt -v requirements/static/docs.in
|
||||
#
|
||||
alabaster==0.7.12 # via sphinx
|
||||
babel==2.7.0 # via sphinx
|
||||
certifi==2019.3.9 # via requests
|
||||
chardet==3.0.4 # via requests
|
||||
docutils==0.14 # via sphinx
|
||||
idna==2.8 # via requests
|
||||
imagesize==1.1.0 # via sphinx
|
||||
jinja2==2.10.1 # via sphinx
|
||||
markupsafe==1.1.1 # via jinja2
|
||||
packaging==19.0 # via sphinx
|
||||
pygments==2.4.2 # via sphinx
|
||||
pyparsing==2.4.0 # via packaging
|
||||
pytz==2019.1 # via babel
|
||||
requests==2.22.0 # via sphinx
|
||||
six==1.12.0 # via packaging
|
||||
snowballstemmer==1.2.1 # via sphinx
|
||||
sphinx==2.0.1
|
||||
sphinxcontrib-applehelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-devhelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-htmlhelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-jsmath==1.0.1 # via sphinx
|
||||
sphinxcontrib-qthelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-serializinghtml==1.1.3 # via sphinx
|
||||
urllib3==1.25.3 # via requests
|
16
requirements/static/py3.8/lint.txt
Normal file
16
requirements/static/py3.8/lint.txt
Normal file
|
@ -0,0 +1,16 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/lint.txt -v requirements/static/lint.in
|
||||
#
|
||||
astroid==2.3.3 # via pylint
|
||||
isort==4.3.17 # via pylint
|
||||
lazy-object-proxy==1.4.3 # via astroid
|
||||
mccabe==0.6.1 # via pylint
|
||||
modernize==0.5 # via saltpylint
|
||||
pycodestyle==2.5.0 # via saltpylint
|
||||
pylint==2.4.4
|
||||
saltpylint==2019.11.14
|
||||
six==1.12.0 # via astroid
|
||||
wrapt==1.11.1 # via astroid
|
8
requirements/static/py3.8/linux-crypto.txt
Normal file
8
requirements/static/py3.8/linux-crypto.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/linux-crypto.txt -v requirements/static/crypto.in
|
||||
#
|
||||
m2crypto==0.35.2
|
||||
pycryptodomex==3.9.3
|
119
requirements/static/py3.8/linux.txt
Normal file
119
requirements/static/py3.8/linux.txt
Normal file
|
@ -0,0 +1,119 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.8/linux.txt -v requirements/base.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/linux.in
|
||||
#
|
||||
apache-libcloud==2.0.0
|
||||
argh==0.26.2 # via watchdog
|
||||
asn1crypto==1.3.0 # via certvalidator, cryptography, oscrypto
|
||||
atomicwrites==1.3.0 # via pytest
|
||||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
bcrypt==3.1.6 # via paramiko
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
cachetools==3.1.0 # via google-auth
|
||||
certifi==2019.3.9
|
||||
certvalidator==0.11.1 # via vcert
|
||||
cffi==1.12.2
|
||||
chardet==3.0.4 # via requests
|
||||
cheetah3==3.1.0
|
||||
cheroot==6.5.4 # via cherrypy
|
||||
cherrypy==17.3.0
|
||||
contextlib2==0.5.5 # via cherrypy
|
||||
croniter==0.3.29
|
||||
cryptography==2.6.1 # via moto, paramiko, pyopenssl, vcert
|
||||
dnspython==1.16.0
|
||||
docker-pycreds==0.4.0 # via docker
|
||||
docker==3.7.2
|
||||
docutils==0.14 # via botocore
|
||||
ecdsa==0.13.3 # via python-jose
|
||||
future==0.17.1 # via python-jose
|
||||
genshi==0.7.3
|
||||
gitdb2==2.0.5 # via gitpython
|
||||
gitpython==2.1.11
|
||||
google-auth==1.6.3 # via kubernetes
|
||||
hgtools==8.1.1
|
||||
idna==2.8 # via requests
|
||||
ipaddress==1.0.22 # via kubernetes
|
||||
jaraco.functools==2.0 # via tempora
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4
|
||||
jsondiff==1.1.1 # via moto
|
||||
jsonpickle==1.1 # via aws-xray-sdk
|
||||
jsonschema==2.6.0
|
||||
junos-eznc==2.2.0
|
||||
jxmlease==1.0.1
|
||||
kazoo==2.6.1
|
||||
keyring==5.7.1
|
||||
kubernetes==3.0.0
|
||||
libnacl==1.7.1
|
||||
lxml==4.3.3 # via junos-eznc, ncclient
|
||||
mako==1.1.0
|
||||
markupsafe==1.1.1
|
||||
mock==3.0.5
|
||||
more-itertools==5.0.0
|
||||
moto==1.3.7
|
||||
msgpack==0.5.6
|
||||
ncclient==0.6.4 # via junos-eznc
|
||||
netaddr==0.7.19 # via junos-eznc
|
||||
oscrypto==1.2.0 # via certvalidator
|
||||
packaging==19.2 # via pytest
|
||||
paramiko==2.4.2
|
||||
pathtools==0.1.2 # via watchdog
|
||||
pluggy==0.13.0 # via pytest
|
||||
portend==2.4 # via cherrypy
|
||||
psutil==5.6.1
|
||||
py==1.8.0 # via pytest
|
||||
pyaml==19.4.1 # via moto
|
||||
pyasn1-modules==0.2.4 # via google-auth
|
||||
pyasn1==0.4.5 # via paramiko, pyasn1-modules, rsa
|
||||
pycparser==2.19 # via cffi
|
||||
pycrypto==2.6.1 ; sys_platform not in "win32,darwin"
|
||||
pycryptodome==3.8.1 # via python-jose
|
||||
pygit2==0.28.2
|
||||
pyinotify==0.9.6
|
||||
pynacl==1.3.0 # via paramiko
|
||||
pyopenssl==19.0.0
|
||||
pyparsing==2.4.5 # via packaging
|
||||
pyserial==3.4 # via junos-eznc
|
||||
pytest-helpers-namespace==2019.1.8
|
||||
pytest-salt-runtests-bridge==2019.7.10
|
||||
pytest-salt==2019.12.27
|
||||
pytest-tempdir==2019.10.12
|
||||
pytest==4.6.6
|
||||
python-dateutil==2.8.0 # via botocore, croniter, kubernetes, moto, vcert
|
||||
python-etcd==0.4.5
|
||||
python-gnupg==0.4.4
|
||||
python-jose==2.0.2 # via moto
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
responses==0.10.6 # via moto
|
||||
rfc3987==1.3.8
|
||||
rsa==4.0 # via google-auth
|
||||
s3transfer==0.2.0 # via boto3
|
||||
salttesting==2017.6.1
|
||||
scp==0.13.2 # via junos-eznc
|
||||
setproctitle==1.1.10
|
||||
setuptools-scm==3.2.0
|
||||
six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kazoo, kubernetes, mock, more-itertools, moto, ncclient, packaging, pygit2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, tempora, vcert, websocket-client
|
||||
smmap2==2.0.5 # via gitdb2
|
||||
strict-rfc3339==0.7
|
||||
tempora==1.14.1 # via portend
|
||||
timelib==0.2.4
|
||||
urllib3==1.24.2 # via botocore, kubernetes, python-etcd, requests
|
||||
vcert==0.7.3
|
||||
virtualenv==16.4.3
|
||||
watchdog==0.9.0
|
||||
wcwidth==0.1.7 # via pytest
|
||||
websocket-client==0.40.0 # via docker, kubernetes
|
||||
werkzeug==0.15.6 # via moto
|
||||
wrapt==1.11.1 # via aws-xray-sdk
|
||||
xmltodict==0.12.0 # via moto
|
||||
zc.lockfile==1.4 # via cherrypy
|
115
requirements/static/py3.9/cloud.txt
Normal file
115
requirements/static/py3.9/cloud.txt
Normal file
|
@ -0,0 +1,115 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/cloud.txt -v requirements/static/cloud.in
|
||||
#
|
||||
adal==1.2.1 # via azure-datalake-store, msrestazure
|
||||
asn1crypto==0.24.0 # via cryptography
|
||||
azure-applicationinsights==0.1.0 # via azure
|
||||
azure-batch==4.1.3 # via azure
|
||||
azure-common==1.1.18 # via azure-applicationinsights, azure-batch, azure-cosmosdb-table, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-loganalytics, azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cdn, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-compute, azure-mgmt-consumption, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-analytics, azure-mgmt-datalake-store, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-eventhub, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-media, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-network, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-rdbms, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web, azure-servicebus, azure-servicefabric, azure-servicemanagement-legacy, azure-storage-blob, azure-storage-common, azure-storage-file, azure-storage-queue
|
||||
azure-cosmosdb-nspkg==2.0.2 # via azure-cosmosdb-table
|
||||
azure-cosmosdb-table==1.0.5 # via azure
|
||||
azure-datalake-store==0.0.44 # via azure
|
||||
azure-eventgrid==1.2.0 # via azure
|
||||
azure-graphrbac==0.40.0 # via azure
|
||||
azure-keyvault==1.1.0 # via azure
|
||||
azure-loganalytics==0.1.0 # via azure
|
||||
azure-mgmt-advisor==1.0.1 # via azure-mgmt
|
||||
azure-mgmt-applicationinsights==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-authorization==0.50.0 # via azure-mgmt
|
||||
azure-mgmt-batch==5.0.1 # via azure-mgmt
|
||||
azure-mgmt-batchai==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-billing==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-cdn==3.1.0 # via azure-mgmt
|
||||
azure-mgmt-cognitiveservices==3.0.0 # via azure-mgmt
|
||||
azure-mgmt-commerce==1.0.1 # via azure-mgmt
|
||||
azure-mgmt-compute==4.6.0 # via azure-mgmt
|
||||
azure-mgmt-consumption==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-containerinstance==1.4.1 # via azure-mgmt
|
||||
azure-mgmt-containerregistry==2.7.0 # via azure-mgmt
|
||||
azure-mgmt-containerservice==4.4.0 # via azure-mgmt
|
||||
azure-mgmt-cosmosdb==0.4.1 # via azure-mgmt
|
||||
azure-mgmt-datafactory==0.6.0 # via azure-mgmt
|
||||
azure-mgmt-datalake-analytics==0.6.0 # via azure-mgmt
|
||||
azure-mgmt-datalake-nspkg==3.0.1 # via azure-mgmt-datalake-analytics, azure-mgmt-datalake-store
|
||||
azure-mgmt-datalake-store==0.5.0 # via azure-mgmt
|
||||
azure-mgmt-datamigration==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-devspaces==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-devtestlabs==2.2.0 # via azure-mgmt
|
||||
azure-mgmt-dns==2.1.0 # via azure-mgmt
|
||||
azure-mgmt-eventgrid==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-eventhub==2.5.0 # via azure-mgmt
|
||||
azure-mgmt-hanaonazure==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-iotcentral==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-iothub==0.5.0 # via azure-mgmt
|
||||
azure-mgmt-iothubprovisioningservices==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-keyvault==1.1.0 # via azure-mgmt
|
||||
azure-mgmt-loganalytics==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-logic==3.0.0 # via azure-mgmt
|
||||
azure-mgmt-machinelearningcompute==0.4.1 # via azure-mgmt
|
||||
azure-mgmt-managementgroups==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-managementpartner==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-maps==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-marketplaceordering==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-media==1.0.0 # via azure-mgmt
|
||||
azure-mgmt-monitor==0.5.2 # via azure-mgmt
|
||||
azure-mgmt-msi==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-network==2.6.0 # via azure-mgmt
|
||||
azure-mgmt-notificationhubs==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-nspkg==3.0.2 # via azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-consumption, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-nspkg, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web
|
||||
azure-mgmt-policyinsights==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-powerbiembedded==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-rdbms==1.8.0 # via azure-mgmt
|
||||
azure-mgmt-recoveryservices==0.3.0 # via azure-mgmt
|
||||
azure-mgmt-recoveryservicesbackup==0.3.0 # via azure-mgmt
|
||||
azure-mgmt-redis==5.0.0 # via azure-mgmt
|
||||
azure-mgmt-relay==0.1.0 # via azure-mgmt
|
||||
azure-mgmt-reservations==0.2.1 # via azure-mgmt
|
||||
azure-mgmt-resource==2.1.0 # via azure-mgmt
|
||||
azure-mgmt-scheduler==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-search==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-servicebus==0.5.3 # via azure-mgmt
|
||||
azure-mgmt-servicefabric==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-signalr==0.1.1 # via azure-mgmt
|
||||
azure-mgmt-sql==0.9.1 # via azure-mgmt
|
||||
azure-mgmt-storage==2.0.0 # via azure-mgmt
|
||||
azure-mgmt-subscription==0.2.0 # via azure-mgmt
|
||||
azure-mgmt-trafficmanager==0.50.0 # via azure-mgmt
|
||||
azure-mgmt-web==0.35.0 # via azure-mgmt
|
||||
azure-mgmt==4.0.0 # via azure
|
||||
azure-nspkg==3.0.2 # via azure-applicationinsights, azure-batch, azure-cosmosdb-nspkg, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-loganalytics, azure-mgmt-nspkg, azure-servicebus, azure-servicefabric, azure-servicemanagement-legacy
|
||||
azure-servicebus==0.21.1 # via azure
|
||||
azure-servicefabric==6.3.0.0 # via azure
|
||||
azure-servicemanagement-legacy==0.20.6 # via azure
|
||||
azure-storage-blob==1.5.0 # via azure
|
||||
azure-storage-common==1.4.0 # via azure-cosmosdb-table, azure-storage-blob, azure-storage-file, azure-storage-queue
|
||||
azure-storage-file==1.4.0 # via azure
|
||||
azure-storage-queue==1.4.0 # via azure
|
||||
azure==4.0.0
|
||||
certifi==2019.3.9 # via msrest, requests
|
||||
cffi==1.12.2 # via azure-datalake-store, cryptography
|
||||
chardet==3.0.4 # via requests
|
||||
cryptography==2.6.1 # via adal, azure-cosmosdb-table, azure-keyvault, azure-storage-common, requests-ntlm, smbprotocol
|
||||
idna==2.8 # via requests
|
||||
isodate==0.6.0 # via msrest
|
||||
msrest==0.6.6 # via azure-applicationinsights, azure-eventgrid, azure-keyvault, azure-loganalytics, azure-mgmt-cdn, azure-mgmt-compute, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-dns, azure-mgmt-eventhub, azure-mgmt-keyvault, azure-mgmt-media, azure-mgmt-network, azure-mgmt-rdbms, azure-mgmt-resource, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-servicefabric, msrestazure
|
||||
msrestazure==0.6.0 # via azure-batch, azure-eventgrid, azure-graphrbac, azure-keyvault, azure-mgmt-advisor, azure-mgmt-applicationinsights, azure-mgmt-authorization, azure-mgmt-batch, azure-mgmt-batchai, azure-mgmt-billing, azure-mgmt-cdn, azure-mgmt-cognitiveservices, azure-mgmt-commerce, azure-mgmt-compute, azure-mgmt-consumption, azure-mgmt-containerinstance, azure-mgmt-containerregistry, azure-mgmt-containerservice, azure-mgmt-cosmosdb, azure-mgmt-datafactory, azure-mgmt-datalake-analytics, azure-mgmt-datalake-store, azure-mgmt-datamigration, azure-mgmt-devspaces, azure-mgmt-devtestlabs, azure-mgmt-dns, azure-mgmt-eventgrid, azure-mgmt-eventhub, azure-mgmt-hanaonazure, azure-mgmt-iotcentral, azure-mgmt-iothub, azure-mgmt-iothubprovisioningservices, azure-mgmt-keyvault, azure-mgmt-loganalytics, azure-mgmt-logic, azure-mgmt-machinelearningcompute, azure-mgmt-managementgroups, azure-mgmt-managementpartner, azure-mgmt-maps, azure-mgmt-marketplaceordering, azure-mgmt-media, azure-mgmt-monitor, azure-mgmt-msi, azure-mgmt-network, azure-mgmt-notificationhubs, azure-mgmt-policyinsights, azure-mgmt-powerbiembedded, azure-mgmt-rdbms, azure-mgmt-recoveryservices, azure-mgmt-recoveryservicesbackup, azure-mgmt-redis, azure-mgmt-relay, azure-mgmt-reservations, azure-mgmt-resource, azure-mgmt-scheduler, azure-mgmt-search, azure-mgmt-servicebus, azure-mgmt-servicefabric, azure-mgmt-signalr, azure-mgmt-sql, azure-mgmt-storage, azure-mgmt-subscription, azure-mgmt-trafficmanager, azure-mgmt-web
|
||||
netaddr==0.7.19
|
||||
ntlm-auth==1.3.0 # via requests-ntlm, smbprotocol
|
||||
oauthlib==3.0.1 # via requests-oauthlib
|
||||
profitbricks==4.1.3
|
||||
pyasn1==0.4.5 # via smbprotocol
|
||||
pycparser==2.19 # via cffi
|
||||
pyjwt==1.7.1 # via adal
|
||||
pypsexec==0.1.0
|
||||
python-dateutil==2.8.0 # via adal, azure-cosmosdb-table, azure-storage-common
|
||||
pywinrm==0.3.0
|
||||
requests-ntlm==1.1.0 # via pywinrm
|
||||
requests-oauthlib==1.2.0 # via msrest
|
||||
requests==2.21.0 # via adal, azure-cosmosdb-table, azure-datalake-store, azure-keyvault, azure-servicebus, azure-servicemanagement-legacy, azure-storage-common, msrest, profitbricks, pywinrm, requests-ntlm, requests-oauthlib
|
||||
six==1.12.0 # via cryptography, isodate, profitbricks, pypsexec, python-dateutil, pywinrm, smbprotocol
|
||||
smbprotocol==0.1.1 # via pypsexec
|
||||
urllib3==1.24.2 # via requests
|
||||
xmltodict==0.12.0 # via pywinrm
|
8
requirements/static/py3.9/darwin-crypto.txt
Normal file
8
requirements/static/py3.9/darwin-crypto.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/darwin-crypto.txt -v requirements/static/crypto.in
|
||||
#
|
||||
m2crypto==0.35.2
|
||||
pycryptodomex==3.9.0
|
123
requirements/static/py3.9/darwin.txt
Normal file
123
requirements/static/py3.9/darwin.txt
Normal file
|
@ -0,0 +1,123 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/darwin.txt -v pkg/osx/req.txt pkg/osx/req_ext.txt requirements/base.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/darwin.in
|
||||
#
|
||||
apache-libcloud==2.4.0
|
||||
appdirs==1.4.3 # via virtualenv
|
||||
argh==0.26.2 # via watchdog
|
||||
asn1crypto==1.3.0 # via certvalidator, cryptography, oscrypto
|
||||
atomicwrites==1.3.0 # via pytest
|
||||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
backports.ssl_match_hostname==3.7.0.1
|
||||
bcrypt==3.1.6 # via paramiko
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
cachetools==3.1.0 # via google-auth
|
||||
certifi==2019.3.9
|
||||
certvalidator==0.11.1 # via vcert
|
||||
cffi==1.12.2
|
||||
chardet==3.0.4 # via requests
|
||||
cheetah3==3.1.0
|
||||
cheroot==6.5.5 # via cherrypy
|
||||
cherrypy==17.4.1
|
||||
click==7.0
|
||||
clustershell==1.8.1
|
||||
contextlib2==0.5.5 # via cherrypy
|
||||
croniter==0.3.29
|
||||
cryptography==2.6.1
|
||||
distlib==0.3.0 # via virtualenv
|
||||
dnspython==1.16.0
|
||||
docker-pycreds==0.4.0 # via docker
|
||||
docker==3.7.2
|
||||
docutils==0.14 # via botocore
|
||||
ecdsa==0.13.3 # via python-jose
|
||||
enum34==1.1.6
|
||||
filelock==3.0.12 # via virtualenv
|
||||
future==0.17.1 # via python-jose
|
||||
genshi==0.7.3
|
||||
gitdb2==2.0.5 # via gitpython
|
||||
gitpython==2.1.15
|
||||
google-auth==1.6.3 # via kubernetes
|
||||
idna==2.8
|
||||
ipaddress==1.0.22
|
||||
jaraco.functools==2.0 # via tempora
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4
|
||||
jsondiff==1.1.1 # via moto
|
||||
jsonpickle==1.1 # via aws-xray-sdk
|
||||
jsonschema==2.6.0
|
||||
junos-eznc==2.2.0
|
||||
jxmlease==1.0.1
|
||||
keyring==5.7.1
|
||||
kubernetes==3.0.0
|
||||
linode-python==1.1.1
|
||||
lxml==4.3.3 # via junos-eznc, ncclient
|
||||
mako==1.0.7
|
||||
markupsafe==1.1.1
|
||||
mock==3.0.5
|
||||
more-itertools==5.0.0
|
||||
moto==1.3.7
|
||||
msgpack-python==0.5.6
|
||||
msgpack==0.5.6
|
||||
ncclient==0.6.4 # via junos-eznc
|
||||
netaddr==0.7.19 # via junos-eznc
|
||||
oscrypto==1.2.0 # via certvalidator
|
||||
packaging==19.2 # via pytest
|
||||
paramiko==2.4.2 # via junos-eznc, ncclient, scp
|
||||
pathtools==0.1.2 # via watchdog
|
||||
pluggy==0.13.1 # via pytest
|
||||
portend==2.4 # via cherrypy
|
||||
psutil==5.6.6
|
||||
py==1.8.0 # via pytest
|
||||
pyaml==19.4.1 # via moto
|
||||
pyasn1-modules==0.2.4 # via google-auth
|
||||
pyasn1==0.4.5
|
||||
pycparser==2.19
|
||||
pycryptodome==3.8.1
|
||||
pynacl==1.3.0 # via paramiko
|
||||
pyopenssl==19.0.0
|
||||
pyparsing==2.4.5 # via packaging
|
||||
pyserial==3.4 # via junos-eznc
|
||||
pytest-helpers-namespace==2019.1.8
|
||||
pytest-salt-runtests-bridge==2019.7.10
|
||||
pytest-salt==2019.12.27
|
||||
pytest-tempdir==2019.10.12
|
||||
pytest==4.6.6
|
||||
python-dateutil==2.8.0
|
||||
python-etcd==0.4.5
|
||||
python-gnupg==0.4.4
|
||||
python-jose==2.0.2 # via moto
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
responses==0.10.6 # via moto
|
||||
rfc3987==1.3.8
|
||||
rsa==4.0 # via google-auth
|
||||
s3transfer==0.2.0 # via boto3
|
||||
salttesting==2017.6.1
|
||||
scp==0.13.2 # via junos-eznc
|
||||
setproctitle==1.1.10
|
||||
six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, mock, more-itertools, moto, ncclient, packaging, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, tempora, vcert, virtualenv, websocket-client
|
||||
smmap2==2.0.5 # via gitdb2
|
||||
strict-rfc3339==0.7
|
||||
tempora==1.14.1 # via portend
|
||||
timelib==0.2.4
|
||||
urllib3==1.24.2 # via botocore, kubernetes, python-etcd, requests
|
||||
vcert==0.7.3
|
||||
virtualenv==20.0.10
|
||||
vultr==1.0.1
|
||||
watchdog==0.9.0
|
||||
wcwidth==0.1.7 # via pytest
|
||||
websocket-client==0.40.0 # via docker, kubernetes
|
||||
werkzeug==0.15.6 # via moto
|
||||
wrapt==1.11.1 # via aws-xray-sdk
|
||||
xmltodict==0.12.0 # via moto
|
||||
yamlordereddictloader==0.4.0
|
||||
zc.lockfile==1.4 # via cherrypy
|
30
requirements/static/py3.9/docs.txt
Normal file
30
requirements/static/py3.9/docs.txt
Normal file
|
@ -0,0 +1,30 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/docs.txt -v requirements/static/docs.in
|
||||
#
|
||||
alabaster==0.7.12 # via sphinx
|
||||
babel==2.7.0 # via sphinx
|
||||
certifi==2019.3.9 # via requests
|
||||
chardet==3.0.4 # via requests
|
||||
docutils==0.14 # via sphinx
|
||||
idna==2.8 # via requests
|
||||
imagesize==1.1.0 # via sphinx
|
||||
jinja2==2.10.1 # via sphinx
|
||||
markupsafe==1.1.1 # via jinja2
|
||||
packaging==19.0 # via sphinx
|
||||
pygments==2.4.2 # via sphinx
|
||||
pyparsing==2.4.0 # via packaging
|
||||
pytz==2019.1 # via babel
|
||||
requests==2.22.0 # via sphinx
|
||||
six==1.12.0 # via packaging
|
||||
snowballstemmer==1.2.1 # via sphinx
|
||||
sphinx==2.0.1
|
||||
sphinxcontrib-applehelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-devhelp==1.0.1 # via sphinx
|
||||
sphinxcontrib-htmlhelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-jsmath==1.0.1 # via sphinx
|
||||
sphinxcontrib-qthelp==1.0.2 # via sphinx
|
||||
sphinxcontrib-serializinghtml==1.1.3 # via sphinx
|
||||
urllib3==1.25.3 # via requests
|
16
requirements/static/py3.9/lint.txt
Normal file
16
requirements/static/py3.9/lint.txt
Normal file
|
@ -0,0 +1,16 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/lint.txt -v requirements/static/lint.in
|
||||
#
|
||||
astroid==2.3.3 # via pylint
|
||||
isort==4.3.17 # via pylint
|
||||
lazy-object-proxy==1.4.3 # via astroid
|
||||
mccabe==0.6.1 # via pylint
|
||||
modernize==0.5 # via saltpylint
|
||||
pycodestyle==2.5.0 # via saltpylint
|
||||
pylint==2.4.4
|
||||
saltpylint==2019.11.14
|
||||
six==1.12.0 # via astroid
|
||||
wrapt==1.11.1 # via astroid
|
8
requirements/static/py3.9/linux-crypto.txt
Normal file
8
requirements/static/py3.9/linux-crypto.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/linux-crypto.txt -v requirements/static/crypto.in
|
||||
#
|
||||
m2crypto==0.35.2
|
||||
pycryptodomex==3.9.3
|
119
requirements/static/py3.9/linux.txt
Normal file
119
requirements/static/py3.9/linux.txt
Normal file
|
@ -0,0 +1,119 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile -o requirements/static/py3.9/linux.txt -v requirements/base.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/linux.in
|
||||
#
|
||||
apache-libcloud==2.0.0
|
||||
argh==0.26.2 # via watchdog
|
||||
asn1crypto==1.3.0 # via certvalidator, cryptography, oscrypto
|
||||
atomicwrites==1.3.0 # via pytest
|
||||
attrs==19.1.0 # via pytest
|
||||
aws-xray-sdk==0.95 # via moto
|
||||
backports.functools-lru-cache==1.5 # via cheroot
|
||||
bcrypt==3.1.6 # via paramiko
|
||||
boto3==1.9.132
|
||||
boto==2.49.0
|
||||
botocore==1.12.132 # via boto3, moto, s3transfer
|
||||
cachetools==3.1.0 # via google-auth
|
||||
certifi==2019.3.9
|
||||
certvalidator==0.11.1 # via vcert
|
||||
cffi==1.12.2
|
||||
chardet==3.0.4 # via requests
|
||||
cheetah3==3.1.0
|
||||
cheroot==6.5.4 # via cherrypy
|
||||
cherrypy==17.3.0
|
||||
contextlib2==0.5.5 # via cherrypy
|
||||
croniter==0.3.29
|
||||
cryptography==2.6.1 # via moto, paramiko, pyopenssl, vcert
|
||||
dnspython==1.16.0
|
||||
docker-pycreds==0.4.0 # via docker
|
||||
docker==3.7.2
|
||||
docutils==0.14 # via botocore
|
||||
ecdsa==0.13.3 # via python-jose
|
||||
future==0.17.1 # via python-jose
|
||||
genshi==0.7.3
|
||||
gitdb2==2.0.5 # via gitpython
|
||||
gitpython==2.1.11
|
||||
google-auth==1.6.3 # via kubernetes
|
||||
hgtools==8.1.1
|
||||
idna==2.8 # via requests
|
||||
ipaddress==1.0.22 # via kubernetes
|
||||
jaraco.functools==2.0 # via tempora
|
||||
jinja2==2.10.1
|
||||
jmespath==0.9.4
|
||||
jsondiff==1.1.1 # via moto
|
||||
jsonpickle==1.1 # via aws-xray-sdk
|
||||
jsonschema==2.6.0
|
||||
junos-eznc==2.2.0
|
||||
jxmlease==1.0.1
|
||||
kazoo==2.6.1
|
||||
keyring==5.7.1
|
||||
kubernetes==3.0.0
|
||||
libnacl==1.7.1
|
||||
lxml==4.3.3 # via junos-eznc, ncclient
|
||||
mako==1.1.0
|
||||
markupsafe==1.1.1
|
||||
mock==3.0.5
|
||||
more-itertools==5.0.0
|
||||
moto==1.3.7
|
||||
msgpack==0.5.6
|
||||
ncclient==0.6.4 # via junos-eznc
|
||||
netaddr==0.7.19 # via junos-eznc
|
||||
oscrypto==1.2.0 # via certvalidator
|
||||
packaging==19.2 # via pytest
|
||||
paramiko==2.4.2
|
||||
pathtools==0.1.2 # via watchdog
|
||||
pluggy==0.13.0 # via pytest
|
||||
portend==2.4 # via cherrypy
|
||||
psutil==5.6.1
|
||||
py==1.8.0 # via pytest
|
||||
pyaml==19.4.1 # via moto
|
||||
pyasn1-modules==0.2.4 # via google-auth
|
||||
pyasn1==0.4.5 # via paramiko, pyasn1-modules, rsa
|
||||
pycparser==2.19 # via cffi
|
||||
pycrypto==2.6.1 ; sys_platform not in "win32,darwin"
|
||||
pycryptodome==3.8.1 # via python-jose
|
||||
pygit2==0.28.2
|
||||
pyinotify==0.9.6
|
||||
pynacl==1.3.0 # via paramiko
|
||||
pyopenssl==19.0.0
|
||||
pyparsing==2.4.5 # via packaging
|
||||
pyserial==3.4 # via junos-eznc
|
||||
pytest-helpers-namespace==2019.1.8
|
||||
pytest-salt-runtests-bridge==2019.7.10
|
||||
pytest-salt==2019.12.27
|
||||
pytest-tempdir==2019.10.12
|
||||
pytest==4.6.6
|
||||
python-dateutil==2.8.0 # via botocore, croniter, kubernetes, moto, vcert
|
||||
python-etcd==0.4.5
|
||||
python-gnupg==0.4.4
|
||||
python-jose==2.0.2 # via moto
|
||||
pytz==2019.1 # via moto, tempora
|
||||
pyvmomi==6.7.1.2018.12
|
||||
pyyaml==5.1.2
|
||||
pyzmq==18.0.1 ; python_version != "3.4"
|
||||
requests==2.21.0
|
||||
responses==0.10.6 # via moto
|
||||
rfc3987==1.3.8
|
||||
rsa==4.0 # via google-auth
|
||||
s3transfer==0.2.0 # via boto3
|
||||
salttesting==2017.6.1
|
||||
scp==0.13.2 # via junos-eznc
|
||||
setproctitle==1.1.10
|
||||
setuptools-scm==3.2.0
|
||||
six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kazoo, kubernetes, mock, more-itertools, moto, ncclient, packaging, pygit2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, tempora, vcert, websocket-client
|
||||
smmap2==2.0.5 # via gitdb2
|
||||
strict-rfc3339==0.7
|
||||
tempora==1.14.1 # via portend
|
||||
timelib==0.2.4
|
||||
urllib3==1.24.2 # via botocore, kubernetes, python-etcd, requests
|
||||
vcert==0.7.3
|
||||
virtualenv==16.4.3
|
||||
watchdog==0.9.0
|
||||
wcwidth==0.1.7 # via pytest
|
||||
websocket-client==0.40.0 # via docker, kubernetes
|
||||
werkzeug==0.15.6 # via moto
|
||||
wrapt==1.11.1 # via aws-xray-sdk
|
||||
xmltodict==0.12.0 # via moto
|
||||
zc.lockfile==1.4 # via cherrypy
|
|
@ -1620,7 +1620,12 @@ class LocalClient(object):
|
|||
yield {
|
||||
id_: {
|
||||
"out": "no_return",
|
||||
"ret": "Minion did not return. [No response]",
|
||||
"ret": "Minion did not return. [No response]"
|
||||
"\nThe minions may not have all finished running and any "
|
||||
"remaining minions will return upon completion. To look "
|
||||
"up the return data for this job later, run the following "
|
||||
"command:\n\n"
|
||||
"salt-run jobs.lookup_jid {0}".format(jid),
|
||||
"retcode": salt.defaults.exitcodes.EX_GENERIC,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,6 +79,11 @@ def communicator(func):
|
|||
queue.put("ERROR")
|
||||
queue.put("Exception")
|
||||
queue.put("{0}\n{1}\n".format(ex, trace))
|
||||
except SystemExit as ex:
|
||||
trace = traceback.format_exc()
|
||||
queue.put("ERROR")
|
||||
queue.put("System exit")
|
||||
queue.put("{0}\n{1}\n".format(ex, trace))
|
||||
return ret
|
||||
|
||||
return _call
|
||||
|
|
|
@ -331,20 +331,39 @@ def get_resources_vms(call=None, resFilter=None, includeConfig=True):
|
|||
|
||||
salt-cloud -f get_resources_vms my-proxmox-config
|
||||
"""
|
||||
log.debug("Getting resource: vms.. (filter: %s)", resFilter)
|
||||
resources = query("get", "cluster/resources")
|
||||
timeoutTime = time.time() + 60
|
||||
while True:
|
||||
log.debug("Getting resource: vms.. (filter: %s)", resFilter)
|
||||
resources = query("get", "cluster/resources")
|
||||
ret = {}
|
||||
badResource = False
|
||||
for resource in resources:
|
||||
if "type" in resource and resource["type"] in ["openvz", "qemu", "lxc"]:
|
||||
try:
|
||||
name = resource["name"]
|
||||
except KeyError:
|
||||
badResource = True
|
||||
log.debug("No name in VM resource %s", repr(resource))
|
||||
break
|
||||
|
||||
ret = {}
|
||||
for resource in resources:
|
||||
if "type" in resource and resource["type"] in ["openvz", "qemu", "lxc"]:
|
||||
name = resource["name"]
|
||||
ret[name] = resource
|
||||
ret[name] = resource
|
||||
|
||||
if includeConfig:
|
||||
# Requested to include the detailed configuration of a VM
|
||||
ret[name]["config"] = get_vmconfig(
|
||||
ret[name]["vmid"], ret[name]["node"], ret[name]["type"]
|
||||
)
|
||||
if includeConfig:
|
||||
# Requested to include the detailed configuration of a VM
|
||||
ret[name]["config"] = get_vmconfig(
|
||||
ret[name]["vmid"], ret[name]["node"], ret[name]["type"]
|
||||
)
|
||||
|
||||
if time.time() > timeoutTime:
|
||||
raise SaltCloudExecutionTimeout(
|
||||
"FAILED to get the proxmox " "resources vms"
|
||||
)
|
||||
|
||||
# Carry on if there wasn't a bad resource return from Proxmox
|
||||
if not badResource:
|
||||
break
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
if resFilter is not None:
|
||||
log.debug("Filter given: %s, returning requested " "resource: nodes", resFilter)
|
||||
|
@ -905,6 +924,13 @@ def create_node(vm_, newid):
|
|||
): # if the property is set, use it for the VM request
|
||||
postParams[prop] = vm_["clone_" + prop]
|
||||
|
||||
try:
|
||||
int(vm_["clone_from"])
|
||||
except ValueError:
|
||||
if ":" in vm_["clone_from"]:
|
||||
vmhost = vm_["clone_from"].split(":")[0]
|
||||
vm_["clone_from"] = vm_["clone_from"].split(":")[1]
|
||||
|
||||
node = query(
|
||||
"post",
|
||||
"nodes/{0}/qemu/{1}/clone".format(vmhost, vm_["clone_from"]),
|
||||
|
|
|
@ -268,6 +268,10 @@ def create(vm_):
|
|||
"deploy", vm_, __opts__, default=False
|
||||
)
|
||||
|
||||
# If ssh_host is not set, default to the minion name
|
||||
if not config.get_cloud_config_value("ssh_host", vm_, __opts__, default=""):
|
||||
vm_["ssh_host"] = vm_["name"]
|
||||
|
||||
if deploy_config:
|
||||
wol_mac = config.get_cloud_config_value(
|
||||
"wake_on_lan_mac", vm_, __opts__, default=""
|
||||
|
|
|
@ -4627,7 +4627,7 @@ def reboot_host(kwargs=None, call=None):
|
|||
if not host_ref.capability.rebootSupported:
|
||||
raise SaltCloudSystemExit("Specified host system does not support reboot.")
|
||||
|
||||
if not host_ref.runtime.inMaintenanceMode:
|
||||
if not host_ref.runtime.inMaintenanceMode and not force:
|
||||
raise SaltCloudSystemExit(
|
||||
"Specified host system is not in maintenance mode. Specify force=True to "
|
||||
"force reboot even if there are virtual machines running or other operations "
|
||||
|
@ -4715,3 +4715,67 @@ def create_datastore_cluster(kwargs=None, call=None):
|
|||
return False
|
||||
|
||||
return {datastore_cluster_name: "created"}
|
||||
|
||||
|
||||
def shutdown_host(kwargs=None, call=None):
|
||||
"""
|
||||
Shut down the specified host system in this VMware environment
|
||||
|
||||
.. note::
|
||||
|
||||
If the host system is not in maintenance mode, it will not be shut down. If you
|
||||
want to shut down the host system regardless of whether it is in maintenance mode,
|
||||
set ``force=True``. Default is ``force=False``.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True]
|
||||
"""
|
||||
if call != "function":
|
||||
raise SaltCloudSystemExit(
|
||||
"The shutdown_host function must be called with " "-f or --function."
|
||||
)
|
||||
|
||||
host_name = kwargs.get("host") if kwargs and "host" in kwargs else None
|
||||
force = _str_to_bool(kwargs.get("force")) if kwargs and "force" in kwargs else False
|
||||
|
||||
if not host_name:
|
||||
raise SaltCloudSystemExit("You must specify name of the host system.")
|
||||
|
||||
# Get the service instance
|
||||
si = _get_si()
|
||||
|
||||
host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name)
|
||||
if not host_ref:
|
||||
raise SaltCloudSystemExit("Specified host system does not exist.")
|
||||
|
||||
if host_ref.runtime.connectionState == "notResponding":
|
||||
raise SaltCloudSystemExit(
|
||||
"Specified host system cannot be shut down in it's current state (not responding)."
|
||||
)
|
||||
|
||||
if not host_ref.capability.rebootSupported:
|
||||
raise SaltCloudSystemExit("Specified host system does not support shutdown.")
|
||||
|
||||
if not host_ref.runtime.inMaintenanceMode and not force:
|
||||
raise SaltCloudSystemExit(
|
||||
"Specified host system is not in maintenance mode. Specify force=True to "
|
||||
"force reboot even if there are virtual machines running or other operations "
|
||||
"in progress."
|
||||
)
|
||||
|
||||
try:
|
||||
host_ref.ShutdownHost_Task(force)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.error(
|
||||
"Error while shutting down host %s: %s",
|
||||
host_name,
|
||||
exc,
|
||||
# Show the traceback if the debug logging level is enabled
|
||||
exc_info_on_loglevel=logging.DEBUG,
|
||||
)
|
||||
return {host_name: "failed to shut down host"}
|
||||
|
||||
return {host_name: "shut down host"}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# coding: utf-8 -*-
|
||||
'''
|
||||
This directory contains external modules shipping with Salt. They are governed
|
||||
under their respective licenses. See the COPYING file included with this
|
||||
under their respective licenses. See the LICENSE file included with this
|
||||
distribution for more information.
|
||||
'''
|
||||
|
|
|
@ -46,7 +46,7 @@ from zope.interface import implementer # type: ignore
|
|||
from salt.ext.tornado.concurrent import Future
|
||||
from salt.ext.tornado.escape import utf8
|
||||
from salt.ext.tornado import gen
|
||||
import tornado.ioloop
|
||||
import salt.ext.tornado.ioloop
|
||||
from salt.ext.tornado.log import app_log
|
||||
from salt.ext.tornado.netutil import Resolver
|
||||
from salt.ext.tornado.stack_context import NullContext, wrap
|
||||
|
@ -128,7 +128,7 @@ class TornadoReactor(PosixReactorBase):
|
|||
"""
|
||||
def __init__(self, io_loop=None):
|
||||
if not io_loop:
|
||||
io_loop = tornado.ioloop.IOLoop.current()
|
||||
io_loop = salt.ext.tornado.ioloop.IOLoop.current()
|
||||
self._io_loop = io_loop
|
||||
self._readers = {} # map of reader objects to fd
|
||||
self._writers = {} # map of writer objects to fd
|
||||
|
@ -352,7 +352,7 @@ def install(io_loop=None):
|
|||
|
||||
"""
|
||||
if not io_loop:
|
||||
io_loop = tornado.ioloop.IOLoop.current()
|
||||
io_loop = salt.ext.tornado.ioloop.IOLoop.current()
|
||||
reactor = TornadoReactor(io_loop)
|
||||
from twisted.internet.main import installReactor # type: ignore
|
||||
installReactor(reactor)
|
||||
|
@ -374,22 +374,22 @@ class _FD(object):
|
|||
|
||||
def doRead(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
|
||||
self.handler(self.fileobj, salt.ext.tornado.ioloop.IOLoop.READ)
|
||||
|
||||
def doWrite(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
|
||||
self.handler(self.fileobj, salt.ext.tornado.ioloop.IOLoop.WRITE)
|
||||
|
||||
def connectionLost(self, reason):
|
||||
if not self.lost:
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
|
||||
self.handler(self.fileobj, salt.ext.tornado.ioloop.IOLoop.ERROR)
|
||||
self.lost = True
|
||||
|
||||
def logPrefix(self):
|
||||
return ''
|
||||
|
||||
|
||||
class TwistedIOLoop(tornado.ioloop.IOLoop):
|
||||
class TwistedIOLoop(salt.ext.tornado.ioloop.IOLoop):
|
||||
"""IOLoop implementation that runs on Twisted.
|
||||
|
||||
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
|
||||
|
@ -434,16 +434,16 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
raise ValueError('fd %s added twice' % fd)
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
|
||||
if events & tornado.ioloop.IOLoop.READ:
|
||||
if events & salt.ext.tornado.ioloop.IOLoop.READ:
|
||||
self.fds[fd].reading = True
|
||||
self.reactor.addReader(self.fds[fd])
|
||||
if events & tornado.ioloop.IOLoop.WRITE:
|
||||
if events & salt.ext.tornado.ioloop.IOLoop.WRITE:
|
||||
self.fds[fd].writing = True
|
||||
self.reactor.addWriter(self.fds[fd])
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if events & tornado.ioloop.IOLoop.READ:
|
||||
if events & salt.ext.tornado.ioloop.IOLoop.READ:
|
||||
if not self.fds[fd].reading:
|
||||
self.fds[fd].reading = True
|
||||
self.reactor.addReader(self.fds[fd])
|
||||
|
@ -451,7 +451,7 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
if self.fds[fd].reading:
|
||||
self.fds[fd].reading = False
|
||||
self.reactor.removeReader(self.fds[fd])
|
||||
if events & tornado.ioloop.IOLoop.WRITE:
|
||||
if events & salt.ext.tornado.ioloop.IOLoop.WRITE:
|
||||
if not self.fds[fd].writing:
|
||||
self.fds[fd].writing = True
|
||||
self.reactor.addWriter(self.fds[fd])
|
||||
|
@ -534,7 +534,7 @@ class TwistedResolver(Resolver):
|
|||
self.io_loop = io_loop or IOLoop.current()
|
||||
# partial copy of twisted.names.client.createResolver, which doesn't
|
||||
# allow for a reactor to be passed in.
|
||||
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
|
||||
self.reactor = salt.ext.tornado.platform.twisted.TornadoReactor(io_loop)
|
||||
|
||||
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
|
||||
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import tornado.escape
|
||||
import salt.ext.tornado.escape
|
||||
|
||||
from salt.ext.tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode, squeeze, recursive_unicode
|
||||
from salt.ext.tornado.util import unicode_type
|
||||
|
@ -136,7 +136,7 @@ linkify_tests = [
|
|||
class EscapeTestCase(unittest.TestCase):
|
||||
def test_linkify(self):
|
||||
for text, kwargs, html in linkify_tests:
|
||||
linked = tornado.escape.linkify(text, **kwargs)
|
||||
linked = salt.ext.tornado.escape.linkify(text, **kwargs)
|
||||
self.assertEqual(linked, html)
|
||||
|
||||
def test_xhtml_escape(self):
|
||||
|
|
|
@ -9,32 +9,32 @@ class ImportTest(unittest.TestCase):
|
|||
# Some of our modules are not otherwise tested. Import them
|
||||
# all (unless they have external dependencies) here to at
|
||||
# least ensure that there are no syntax errors.
|
||||
import tornado.auth
|
||||
import tornado.autoreload
|
||||
import tornado.concurrent
|
||||
import tornado.escape
|
||||
import tornado.gen
|
||||
import tornado.http1connection
|
||||
import tornado.httpclient
|
||||
import tornado.httpserver
|
||||
import tornado.httputil
|
||||
import tornado.ioloop
|
||||
import tornado.iostream
|
||||
import tornado.locale
|
||||
import tornado.log
|
||||
import tornado.netutil
|
||||
import tornado.options
|
||||
import tornado.process
|
||||
import tornado.simple_httpclient
|
||||
import tornado.stack_context
|
||||
import tornado.tcpserver
|
||||
import tornado.tcpclient
|
||||
import tornado.template
|
||||
import tornado.testing
|
||||
import tornado.util
|
||||
import tornado.web
|
||||
import tornado.websocket
|
||||
import tornado.wsgi
|
||||
import salt.ext.tornado.auth
|
||||
import salt.ext.tornado.autoreload
|
||||
import salt.ext.tornado.concurrent
|
||||
import salt.ext.tornado.escape
|
||||
import salt.ext.tornado.gen
|
||||
import salt.ext.tornado.http1connection
|
||||
import salt.ext.tornado.httpclient
|
||||
import salt.ext.tornado.httpserver
|
||||
import salt.ext.tornado.httputil
|
||||
import salt.ext.tornado.ioloop
|
||||
import salt.ext.tornado.iostream
|
||||
import salt.ext.tornado.locale
|
||||
import salt.ext.tornado.log
|
||||
import salt.ext.tornado.netutil
|
||||
import salt.ext.tornado.options
|
||||
import salt.ext.tornado.process
|
||||
import salt.ext.tornado.simple_httpclient
|
||||
import salt.ext.tornado.stack_context
|
||||
import salt.ext.tornado.tcpserver
|
||||
import salt.ext.tornado.tcpclient
|
||||
import salt.ext.tornado.template
|
||||
import salt.ext.tornado.testing
|
||||
import salt.ext.tornado.util
|
||||
import salt.ext.tornado.web
|
||||
import salt.ext.tornado.websocket
|
||||
import salt.ext.tornado.wsgi
|
||||
|
||||
# for modules with dependencies, if those dependencies can be loaded,
|
||||
# load them too.
|
||||
|
@ -45,4 +45,4 @@ class ImportTest(unittest.TestCase):
|
|||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
import tornado.curl_httpclient
|
||||
import salt.ext.tornado.curl_httpclient
|
||||
|
|
|
@ -6,7 +6,7 @@ import os
|
|||
import shutil
|
||||
import tempfile
|
||||
|
||||
import tornado.locale
|
||||
import salt.ext.tornado.locale
|
||||
from salt.ext.tornado.escape import utf8, to_unicode
|
||||
from salt.ext.tornado.test.util import unittest, skipOnAppEngine
|
||||
from salt.ext.tornado.util import unicode_type
|
||||
|
@ -17,25 +17,25 @@ class TranslationLoaderTest(unittest.TestCase):
|
|||
SAVE_VARS = ['_translations', '_supported_locales', '_use_gettext']
|
||||
|
||||
def clear_locale_cache(self):
|
||||
if hasattr(tornado.locale.Locale, '_cache'):
|
||||
del tornado.locale.Locale._cache
|
||||
if hasattr(salt.ext.tornado.locale.Locale, '_cache'):
|
||||
del salt.ext.tornado.locale.Locale._cache
|
||||
|
||||
def setUp(self):
|
||||
self.saved = {}
|
||||
for var in TranslationLoaderTest.SAVE_VARS:
|
||||
self.saved[var] = getattr(tornado.locale, var)
|
||||
self.saved[var] = getattr(salt.ext.tornado.locale, var)
|
||||
self.clear_locale_cache()
|
||||
|
||||
def tearDown(self):
|
||||
for k, v in self.saved.items():
|
||||
setattr(tornado.locale, k, v)
|
||||
setattr(salt.ext.tornado.locale, k, v)
|
||||
self.clear_locale_cache()
|
||||
|
||||
def test_csv(self):
|
||||
tornado.locale.load_translations(
|
||||
salt.ext.tornado.locale.load_translations(
|
||||
os.path.join(os.path.dirname(__file__), 'csv_translations'))
|
||||
locale = tornado.locale.get("fr_FR")
|
||||
self.assertTrue(isinstance(locale, tornado.locale.CSVLocale))
|
||||
locale = salt.ext.tornado.locale.get("fr_FR")
|
||||
self.assertTrue(isinstance(locale, salt.ext.tornado.locale.CSVLocale))
|
||||
self.assertEqual(locale.translate("school"), u"\u00e9cole")
|
||||
|
||||
# tempfile.mkdtemp is not available on app engine.
|
||||
|
@ -53,19 +53,19 @@ class TranslationLoaderTest(unittest.TestCase):
|
|||
try:
|
||||
with open(os.path.join(tmpdir, 'fr_FR.csv'), 'wb') as f:
|
||||
f.write(char_data.encode(encoding))
|
||||
tornado.locale.load_translations(tmpdir)
|
||||
locale = tornado.locale.get('fr_FR')
|
||||
self.assertIsInstance(locale, tornado.locale.CSVLocale)
|
||||
salt.ext.tornado.locale.load_translations(tmpdir)
|
||||
locale = salt.ext.tornado.locale.get('fr_FR')
|
||||
self.assertIsInstance(locale, salt.ext.tornado.locale.CSVLocale)
|
||||
self.assertEqual(locale.translate("school"), u"\u00e9cole")
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
def test_gettext(self):
|
||||
tornado.locale.load_gettext_translations(
|
||||
salt.ext.tornado.locale.load_gettext_translations(
|
||||
os.path.join(os.path.dirname(__file__), 'gettext_translations'),
|
||||
"tornado_test")
|
||||
locale = tornado.locale.get("fr_FR")
|
||||
self.assertTrue(isinstance(locale, tornado.locale.GettextLocale))
|
||||
locale = salt.ext.tornado.locale.get("fr_FR")
|
||||
self.assertTrue(isinstance(locale, salt.ext.tornado.locale.GettextLocale))
|
||||
self.assertEqual(locale.translate("school"), u"\u00e9cole")
|
||||
self.assertEqual(locale.pgettext("law", "right"), u"le droit")
|
||||
self.assertEqual(locale.pgettext("good", "right"), u"le bien")
|
||||
|
@ -77,7 +77,7 @@ class TranslationLoaderTest(unittest.TestCase):
|
|||
|
||||
class LocaleDataTest(unittest.TestCase):
|
||||
def test_non_ascii_name(self):
|
||||
name = tornado.locale.LOCALE_NAMES['es_LA']['name']
|
||||
name = salt.ext.tornado.locale.LOCALE_NAMES['es_LA']['name']
|
||||
self.assertTrue(isinstance(name, unicode_type))
|
||||
self.assertEqual(name, u'Espa\u00f1ol')
|
||||
self.assertEqual(utf8(name), b'Espa\xc3\xb1ol')
|
||||
|
@ -85,7 +85,7 @@ class LocaleDataTest(unittest.TestCase):
|
|||
|
||||
class EnglishTest(unittest.TestCase):
|
||||
def test_format_date(self):
|
||||
locale = tornado.locale.get('en_US')
|
||||
locale = salt.ext.tornado.locale.get('en_US')
|
||||
date = datetime.datetime(2013, 4, 28, 18, 35)
|
||||
self.assertEqual(locale.format_date(date, full_format=True),
|
||||
'April 28, 2013 at 6:35 pm')
|
||||
|
@ -114,18 +114,18 @@ class EnglishTest(unittest.TestCase):
|
|||
'%s %d, %d' % (locale._months[date.month - 1], date.day, date.year))
|
||||
|
||||
def test_friendly_number(self):
|
||||
locale = tornado.locale.get('en_US')
|
||||
locale = salt.ext.tornado.locale.get('en_US')
|
||||
self.assertEqual(locale.friendly_number(1000000), '1,000,000')
|
||||
|
||||
def test_list(self):
|
||||
locale = tornado.locale.get('en_US')
|
||||
locale = salt.ext.tornado.locale.get('en_US')
|
||||
self.assertEqual(locale.list([]), '')
|
||||
self.assertEqual(locale.list(['A']), 'A')
|
||||
self.assertEqual(locale.list(['A', 'B']), 'A and B')
|
||||
self.assertEqual(locale.list(['A', 'B', 'C']), 'A, B and C')
|
||||
|
||||
def test_format_day(self):
|
||||
locale = tornado.locale.get('en_US')
|
||||
locale = salt.ext.tornado.locale.get('en_US')
|
||||
date = datetime.datetime(2013, 4, 28, 18, 35)
|
||||
self.assertEqual(locale.format_day(date=date, dow=True), 'Sunday, April 28')
|
||||
self.assertEqual(locale.format_day(date=date, dow=False), 'April 28')
|
||||
|
|
|
@ -165,7 +165,7 @@ def main():
|
|||
add_parse_callback(
|
||||
lambda: logging.getLogger().handlers[0].addFilter(log_counter))
|
||||
|
||||
import tornado.testing
|
||||
import salt.ext.tornado.testing
|
||||
kwargs = {}
|
||||
if sys.version_info >= (3, 2):
|
||||
# HACK: unittest.main will make its own changes to the warning
|
||||
|
@ -176,7 +176,7 @@ def main():
|
|||
kwargs['warnings'] = False
|
||||
kwargs['testRunner'] = TornadoTextTestRunner
|
||||
try:
|
||||
tornado.testing.main(**kwargs)
|
||||
salt.ext.tornado.testing.main(**kwargs)
|
||||
finally:
|
||||
# The tests should run clean; consider it a failure if they logged
|
||||
# any warnings or errors. We'd like to ban info logs too, but
|
||||
|
|
|
@ -5,7 +5,7 @@ import re
|
|||
import sys
|
||||
import datetime
|
||||
|
||||
import tornado.escape
|
||||
import salt.ext.tornado.escape
|
||||
from salt.ext.tornado.escape import utf8
|
||||
from salt.ext.tornado.util import raise_exc_info, Configurable, exec_in, ArgReplacer, timedelta_to_seconds, import_object, re_unescape, is_finalizing, PY3
|
||||
from salt.ext.tornado.test.util import unittest
|
||||
|
@ -194,13 +194,13 @@ class ImportObjectTest(unittest.TestCase):
|
|||
self.assertIs(import_object(u'tornado.escape.utf8'), utf8)
|
||||
|
||||
def test_import_module(self):
|
||||
self.assertIs(import_object('tornado.escape'), tornado.escape)
|
||||
self.assertIs(import_object('tornado.escape'), salt.ext.tornado.escape)
|
||||
|
||||
def test_import_module_unicode(self):
|
||||
# The internal implementation of __import__ differs depending on
|
||||
# whether the thing being imported is a module or not.
|
||||
# This variant requires a byte string in python 2.
|
||||
self.assertIs(import_object(u'tornado.escape'), tornado.escape)
|
||||
self.assertIs(import_object(u'tornado.escape'), salt.ext.tornado.escape)
|
||||
|
||||
|
||||
class ReUnescapeTest(unittest.TestCase):
|
||||
|
|
|
@ -15,7 +15,7 @@ from salt.ext.tornado.test.util import unittest, skipBefore35, exec_test
|
|||
from salt.ext.tornado.web import Application, RequestHandler
|
||||
|
||||
try:
|
||||
import tornado.websocket # noqa
|
||||
import salt.ext.tornado.websocket # noqa
|
||||
from salt.ext.tornado.util import _websocket_mask_python
|
||||
except ImportError:
|
||||
# The unittest module presents misleading errors on ImportError
|
||||
|
|
|
@ -75,12 +75,12 @@ import stat
|
|||
import sys
|
||||
import threading
|
||||
import time
|
||||
import salt.ext.tornado as tornado
|
||||
import traceback
|
||||
import types
|
||||
from inspect import isclass
|
||||
from io import BytesIO
|
||||
|
||||
import salt.ext.tornado
|
||||
from salt.ext.tornado.concurrent import Future
|
||||
from salt.ext.tornado import escape
|
||||
from salt.ext.tornado import gen
|
||||
|
@ -288,7 +288,7 @@ class RequestHandler(object):
|
|||
def clear(self):
|
||||
"""Resets all headers and content for this response."""
|
||||
self._headers = httputil.HTTPHeaders({
|
||||
"Server": "TornadoServer/%s" % tornado.version,
|
||||
"Server": "TornadoServer/%s" % salt.ext.tornado.version,
|
||||
"Content-Type": "text/html; charset=UTF-8",
|
||||
"Date": httputil.format_timestamp(time.time()),
|
||||
})
|
||||
|
|
|
@ -296,7 +296,7 @@ class WSGIContainer(object):
|
|||
if "content-type" not in header_set:
|
||||
headers.append(("Content-Type", "text/html; charset=UTF-8"))
|
||||
if "server" not in header_set:
|
||||
headers.append(("Server", "TornadoServer/%s" % tornado.version))
|
||||
headers.append(("Server", "TornadoServer/%s" % salt.ext.tornado.version))
|
||||
|
||||
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
|
||||
header_obj = httputil.HTTPHeaders()
|
||||
|
|
|
@ -280,7 +280,7 @@ def _linux_gpu_data():
|
|||
|
||||
gpus = []
|
||||
for gpu in devs:
|
||||
vendor_strings = gpu["Vendor"].lower().split()
|
||||
vendor_strings = re.split("[^A-Za-z0-9]", gpu["Vendor"].lower())
|
||||
# default vendor to 'unknown', overwrite if we match a known one
|
||||
vendor = "unknown"
|
||||
for name in known_vendors:
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
logger instance uses our ``salt.log.setup.SaltLoggingClass``.
|
||||
"""
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import logging
|
||||
|
@ -26,7 +25,6 @@ import time
|
|||
import traceback
|
||||
import types
|
||||
|
||||
# Import salt libs
|
||||
# pylint: disable=unused-import
|
||||
from salt._logging import (
|
||||
LOG_COLORS,
|
||||
|
@ -50,12 +48,8 @@ from salt._logging.impl import (
|
|||
SaltLogRecord,
|
||||
)
|
||||
from salt._logging.impl import set_log_record_factory as setLogRecordFactory
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves.urllib.parse import ( # pylint: disable=import-error,no-name-in-module
|
||||
urlparse,
|
||||
)
|
||||
from salt.ext.six.moves.urllib.parse import urlparse
|
||||
|
||||
# pylint: enable=unused-import
|
||||
|
||||
|
@ -881,7 +875,14 @@ def __remove_temp_logging_handler():
|
|||
logging.captureWarnings(True)
|
||||
|
||||
|
||||
def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
|
||||
def __global_logging_exception_handler(
|
||||
exc_type,
|
||||
exc_value,
|
||||
exc_traceback,
|
||||
_logger=logging.getLogger(__name__),
|
||||
_stderr=sys.__stderr__,
|
||||
_format_exception=traceback.format_exception,
|
||||
):
|
||||
"""
|
||||
This function will log all un-handled python exceptions.
|
||||
"""
|
||||
|
@ -890,19 +891,37 @@ def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
|
|||
# Stop the logging queue listener thread
|
||||
if is_mp_logging_listener_configured():
|
||||
shutdown_multiprocessing_logging_listener()
|
||||
else:
|
||||
# Log the exception
|
||||
logging.getLogger(__name__).error(
|
||||
"An un-handled exception was caught by salt's global exception "
|
||||
"handler:\n%s: %s\n%s",
|
||||
return
|
||||
|
||||
# Log the exception
|
||||
msg = "An un-handled exception was caught by salt's global exception handler:"
|
||||
try:
|
||||
msg = "{}\n{}: {}\n{}".format(
|
||||
msg,
|
||||
exc_type.__name__,
|
||||
exc_value,
|
||||
"".join(
|
||||
traceback.format_exception(exc_type, exc_value, exc_traceback)
|
||||
).strip(),
|
||||
"".join(_format_exception(exc_type, exc_value, exc_traceback)).strip(),
|
||||
)
|
||||
# Call the original sys.excepthook
|
||||
except Exception: # pylint: disable=broad-except
|
||||
msg = "{}\n{}: {}\n(UNABLE TO FORMAT TRACEBACK)".format(
|
||||
msg, exc_type.__name__, exc_value,
|
||||
)
|
||||
try:
|
||||
_logger.error(msg)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# Python is shutting down and logging has been set to None already
|
||||
try:
|
||||
_stderr.write(msg + "\n")
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# We have also lost reference to sys.__stderr__ ?!
|
||||
print(msg)
|
||||
|
||||
# Call the original sys.excepthook
|
||||
try:
|
||||
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
# Python is shutting down and sys has been set to None already
|
||||
pass
|
||||
|
||||
|
||||
# Set our own exception handler as the one to use
|
||||
|
|
|
@ -58,6 +58,9 @@ LEA = salt.utils.path.which_bin(
|
|||
)
|
||||
LE_LIVE = "/etc/letsencrypt/live/"
|
||||
|
||||
if salt.utils.platform.is_freebsd():
|
||||
LE_LIVE = "/usr/local" + LE_LIVE
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
|
|
|
@ -223,9 +223,7 @@ def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwa
|
|||
return {}
|
||||
|
||||
if pkgs:
|
||||
log.debug(
|
||||
"Removing these fileset(s)/rpm package(s) {0}: {1}".format(name, targets)
|
||||
)
|
||||
log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets)
|
||||
|
||||
# Get a list of the currently installed pkgs.
|
||||
old = list_pkgs()
|
||||
|
@ -310,9 +308,7 @@ def remove(name=None, pkgs=None, **kwargs):
|
|||
return {}
|
||||
|
||||
if pkgs:
|
||||
log.debug(
|
||||
"Removing these fileset(s)/rpm package(s) {0}: {1}".format(name, targets)
|
||||
)
|
||||
log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets)
|
||||
|
||||
errors = []
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ def latest_version(*names, **kwargs):
|
|||
"""
|
||||
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
|
||||
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ""
|
||||
|
||||
ret = {}
|
||||
|
|
|
@ -270,7 +270,7 @@ def latest_version(*names, **kwargs):
|
|||
fromrepo = kwargs.pop("fromrepo", None)
|
||||
cache_valid_time = kwargs.pop("cache_valid_time", 0)
|
||||
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ""
|
||||
ret = {}
|
||||
# Initialize the dict with empty strings
|
||||
|
@ -634,7 +634,7 @@ def install(
|
|||
if not fromrepo and repo:
|
||||
fromrepo = repo
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
return {}
|
||||
|
||||
cmd_prefix = []
|
||||
|
|
|
@ -121,7 +121,7 @@ def _check_load_paths(load_path):
|
|||
else:
|
||||
log.info("Invalid augeas_cfg load_path entry: %s removed", _path)
|
||||
|
||||
if len(_paths) == 0:
|
||||
if not _paths:
|
||||
return None
|
||||
|
||||
return ":".join(_paths)
|
||||
|
|
|
@ -238,8 +238,7 @@ def delete_queue(name, region, opts=None, user=None):
|
|||
queues = list_queues(region, opts, user)
|
||||
url_map = _parse_queue_list(queues)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug("map %s", six.text_type(url_map))
|
||||
log.debug("map %s", url_map)
|
||||
if name in url_map:
|
||||
delete = {"queue-url": url_map[name]}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ def default_security_rule_get(name, security_group, resource_group, **kwargs):
|
|||
"error": "Unable to find {0} in {1}!".format(name, security_group)
|
||||
}
|
||||
except KeyError as exc:
|
||||
log.error("Unable to find {0} in {1}!".format(name, security_group))
|
||||
log.error("Unable to find %s in %s!", name, security_group)
|
||||
result = {"error": str(exc)}
|
||||
|
||||
return result
|
||||
|
@ -227,7 +227,7 @@ def default_security_rules_list(security_group, resource_group, **kwargs):
|
|||
try:
|
||||
result = secgroup["default_security_rules"]
|
||||
except KeyError as exc:
|
||||
log.error("No default security rules found for {0}!".format(security_group))
|
||||
log.error("No default security rules found for %s!", security_group)
|
||||
result = {"error": str(exc)}
|
||||
|
||||
return result
|
||||
|
@ -362,9 +362,7 @@ def security_rule_create_or_update(
|
|||
# pylint: disable=eval-used
|
||||
if not eval(params[0]) and not eval(params[1]):
|
||||
log.error(
|
||||
"Either the {0} or {1} parameter must be provided!".format(
|
||||
params[0], params[1]
|
||||
)
|
||||
"Either the %s or %s parameter must be provided!", params[0], params[1]
|
||||
)
|
||||
return False
|
||||
# pylint: disable=eval-used
|
||||
|
|
|
@ -105,10 +105,7 @@ def attach_(dev=None):
|
|||
if "cache" in data:
|
||||
res[dev] = attach_(dev)
|
||||
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return None
|
||||
return res if res else None
|
||||
|
||||
bcache = uuid(dev)
|
||||
if bcache:
|
||||
|
@ -158,10 +155,7 @@ def detach(dev=None):
|
|||
if "cache" in data:
|
||||
res[dev] = detach(dev)
|
||||
|
||||
if res:
|
||||
return res
|
||||
else:
|
||||
return None
|
||||
return res if res else None
|
||||
|
||||
log.debug("Detaching %s", dev)
|
||||
if not _bcsys(dev, "detach", "goaway", "error", "Error detaching {0}".format(dev)):
|
||||
|
@ -737,7 +731,7 @@ def _bdev(dev=None):
|
|||
if not dev:
|
||||
return False
|
||||
else:
|
||||
return _devbase(os.path.realpath(os.path.join(dev, "../")))
|
||||
return _devbase(os.path.dirname(dev))
|
||||
|
||||
|
||||
def _bcpath(dev):
|
||||
|
|
|
@ -163,7 +163,7 @@ def add(name, beacon_data, **kwargs):
|
|||
else:
|
||||
beacon_name = name
|
||||
|
||||
if beacon_name not in list_available(return_yaml=False):
|
||||
if beacon_name not in list_available(return_yaml=False, **kwargs):
|
||||
ret["comment"] = 'Beacon "{0}" is not available.'.format(beacon_name)
|
||||
return ret
|
||||
|
||||
|
@ -201,7 +201,9 @@ def add(name, beacon_data, **kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon add failed."
|
||||
return ret
|
||||
|
||||
try:
|
||||
with salt.utils.event.get_event(
|
||||
|
@ -234,6 +236,7 @@ def add(name, beacon_data, **kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon add failed."
|
||||
return ret
|
||||
|
||||
|
@ -262,7 +265,7 @@ def modify(name, beacon_data, **kwargs):
|
|||
|
||||
if "test" in kwargs and kwargs["test"]:
|
||||
ret["result"] = True
|
||||
ret["comment"] = "Beacon: {0} would be added.".format(name)
|
||||
ret["comment"] = "Beacon: {0} would be modified.".format(name)
|
||||
else:
|
||||
try:
|
||||
# Attempt to load the beacon module so we have access to the validate function
|
||||
|
@ -289,13 +292,15 @@ def modify(name, beacon_data, **kwargs):
|
|||
ret["result"] = False
|
||||
ret["comment"] = (
|
||||
"Beacon {0} configuration invalid, "
|
||||
"not adding.\n{1}".format(name, vcomment)
|
||||
"not modifying.\n{1}".format(name, vcomment)
|
||||
)
|
||||
return ret
|
||||
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon modify failed."
|
||||
return ret
|
||||
|
||||
if not valid:
|
||||
ret["result"] = False
|
||||
|
@ -364,7 +369,8 @@ def modify(name, beacon_data, **kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["comment"] = "Event module not available. Beacon add failed."
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon modify failed."
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -421,13 +427,14 @@ def delete(name, **kwargs):
|
|||
)
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon add failed."
|
||||
return ret
|
||||
|
||||
|
||||
def save(**kwargs):
|
||||
"""
|
||||
Save all beacons on the minion
|
||||
Save all configured beacons to the minion config
|
||||
|
||||
:return: Boolean and status message on success or failure of save.
|
||||
|
||||
|
@ -461,7 +468,7 @@ def save(**kwargs):
|
|||
except (IOError, OSError):
|
||||
ret[
|
||||
"comment"
|
||||
] = "Unable to write to beacons file at {0}. Check " "permissions.".format(sfn)
|
||||
] = "Unable to write to beacons file at {0}. Check permissions.".format(sfn)
|
||||
ret["result"] = False
|
||||
return ret
|
||||
|
||||
|
@ -513,6 +520,7 @@ def enable(**kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacons enable job failed."
|
||||
return ret
|
||||
|
||||
|
@ -564,6 +572,7 @@ def disable(**kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacons enable job failed."
|
||||
return ret
|
||||
|
||||
|
@ -650,13 +659,14 @@ def enable_beacon(name, **kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon enable job failed."
|
||||
return ret
|
||||
|
||||
|
||||
def disable_beacon(name, **kwargs):
|
||||
"""
|
||||
Disable beacon on the minion
|
||||
Disable a beacon on the minion
|
||||
|
||||
:name: Name of the beacon to disable.
|
||||
:return: Boolean and status message on success or failure of disable.
|
||||
|
@ -676,7 +686,7 @@ def disable_beacon(name, **kwargs):
|
|||
return ret
|
||||
|
||||
if "test" in kwargs and kwargs["test"]:
|
||||
ret["comment"] = "Beacons would be enabled."
|
||||
ret["comment"] = "Beacons would be disabled."
|
||||
else:
|
||||
_beacons = list_(return_yaml=False, **kwargs)
|
||||
if name not in _beacons:
|
||||
|
@ -724,6 +734,7 @@ def disable_beacon(name, **kwargs):
|
|||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon disable job failed."
|
||||
return ret
|
||||
|
||||
|
@ -762,9 +773,14 @@ def reset(**kwargs):
|
|||
if ret is not None:
|
||||
ret["comment"] = event_ret["comment"]
|
||||
else:
|
||||
ret["comment"] = "Beacon reset event never received"
|
||||
ret[
|
||||
"comment"
|
||||
] = "Did not receive the beacon reset event before the timeout of {}s".format(
|
||||
kwargs.get("timeout", default_event_wait)
|
||||
)
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret["comment"] = "Event module not available. Beacon disable job failed."
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Event module not available. Beacon reset job failed."
|
||||
return ret
|
||||
|
|
|
@ -191,7 +191,7 @@ def _delete_resource(
|
|||
orig_wait = wait
|
||||
while wait > 0:
|
||||
r = s(name=name, conn=conn)
|
||||
if not r or (r and r[0].get(status_param) == status_gone):
|
||||
if not r or r[0].get(status_param) == status_gone:
|
||||
log.info("%s %s deleted.", desc.title(), name)
|
||||
return True
|
||||
sleep = wait if wait % 60 == wait else 60
|
||||
|
|
|
@ -116,6 +116,14 @@ def describe_topic(name, region=None, key=None, keyid=None, profile=None):
|
|||
ret["Attributes"] = get_topic_attributes(
|
||||
arn, region=region, key=key, keyid=keyid, profile=profile
|
||||
)
|
||||
# Grab extended attributes for the above subscriptions
|
||||
for sub in range(len(ret["Subscriptions"])):
|
||||
sub_arn = ret["Subscriptions"][sub]["SubscriptionArn"]
|
||||
if not sub_arn.startswith("arn:aws:sns:"):
|
||||
# Sometimes a sub is in e.g. PendingAccept or other
|
||||
# wierd states and doesn't have an ARN yet
|
||||
log.debug("Subscription with invalid ARN %s skipped...", sub_arn)
|
||||
continue
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -382,6 +390,17 @@ def unsubscribe(SubscriptionArn, region=None, key=None, keyid=None, profile=None
|
|||
|
||||
salt myminion boto3_sns.unsubscribe my_subscription_arn region=us-east-1
|
||||
"""
|
||||
if not SubscriptionArn.startswith("arn:aws:sns:"):
|
||||
# Grrr, AWS sent us an ARN that's NOT and ARN....
|
||||
# This can happen if, for instance, a subscription is left in PendingAcceptance or similar
|
||||
# Note that anything left in PendingConfirmation will be auto-deleted by AWS after 30 days
|
||||
# anyway, so this isn't as ugly a hack as it might seem at first...
|
||||
log.info(
|
||||
"Invalid subscription ARN `%s` passed - likely a PendingConfirmaton or such. "
|
||||
"Skipping unsubscribe attempt as it would almost certainly fail...",
|
||||
SubscriptionArn,
|
||||
)
|
||||
return True
|
||||
subs = list_subscriptions(region=region, key=key, keyid=keyid, profile=profile)
|
||||
sub = [s for s in subs if s.get("SubscriptionArn") == SubscriptionArn]
|
||||
if not sub:
|
||||
|
|
|
@ -485,7 +485,7 @@ def update(
|
|||
_asg.resume_processes()
|
||||
# suspend any that are specified. Note that the boto default of empty
|
||||
# list suspends all; don't do that.
|
||||
if suspended_processes is not None and len(suspended_processes) > 0:
|
||||
if suspended_processes:
|
||||
_asg.suspend_processes(suspended_processes)
|
||||
log.info("Updated ASG %s", name)
|
||||
# ### scaling policies
|
||||
|
|
|
@ -220,7 +220,7 @@ def describe(Name, region=None, key=None, keyid=None, profile=None):
|
|||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
trails = conn.describe_trails(trailNameList=[Name])
|
||||
if trails and len(trails.get("trailList", [])) > 0:
|
||||
if trails and trails.get("trailList"):
|
||||
keys = (
|
||||
"Name",
|
||||
"S3BucketName",
|
||||
|
|
|
@ -95,7 +95,7 @@ def get_alarm(name, region=None, key=None, keyid=None, profile=None):
|
|||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
|
||||
alarms = conn.describe_alarms(alarm_names=[name])
|
||||
if len(alarms) == 0:
|
||||
if not alarms:
|
||||
return None
|
||||
if len(alarms) > 1:
|
||||
log.error("multiple alarms matched name '%s'", name)
|
||||
|
|
|
@ -102,7 +102,7 @@ def exists(Name, region=None, key=None, keyid=None, profile=None):
|
|||
|
||||
try:
|
||||
events = conn.list_rules(NamePrefix=Name)
|
||||
if len(events) == 0:
|
||||
if not events:
|
||||
return {"exists": False}
|
||||
for rule in events.get("Rules", []):
|
||||
if rule.get("Name", None) == Name:
|
||||
|
|
|
@ -339,7 +339,7 @@ def extract_index(index_data, global_index=False):
|
|||
"read": parsed_data["read_capacity_units"],
|
||||
"write": parsed_data["write_capacity_units"],
|
||||
}
|
||||
if parsed_data["name"] and len(keys) > 0:
|
||||
if parsed_data["name"] and keys:
|
||||
if global_index:
|
||||
if parsed_data.get("keys_only") and parsed_data.get("includes"):
|
||||
raise SaltInvocationError(
|
||||
|
|
|
@ -942,7 +942,7 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):
|
|||
tags = []
|
||||
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
|
||||
result = client.get_all_tags(filters={"resource-id": instance_id})
|
||||
if len(result) > 0:
|
||||
if result:
|
||||
for tag in result:
|
||||
tags.append({tag.name: tag.value})
|
||||
else:
|
||||
|
@ -1538,7 +1538,7 @@ def get_attribute(
|
|||
if len(instances) > 1:
|
||||
log.error("Found more than one EC2 instance matching the criteria.")
|
||||
return False
|
||||
elif len(instances) < 1:
|
||||
elif not instances:
|
||||
log.error("Found no EC2 instance matching the criteria.")
|
||||
return False
|
||||
instance_id = instances[0]
|
||||
|
|
|
@ -264,6 +264,7 @@ def create_function(
|
|||
.. code-block:: bash
|
||||
|
||||
salt myminion boto_lamba.create_function my_function python2.7 my_role my_file.my_function my_function.zip
|
||||
salt myminion boto_lamba.create_function my_function python2.7 my_role my_file.my_function salt://files/my_function.zip
|
||||
|
||||
"""
|
||||
|
||||
|
@ -276,6 +277,13 @@ def create_function(
|
|||
"Either ZipFile must be specified, or "
|
||||
"S3Bucket and S3Key must be provided."
|
||||
)
|
||||
if "://" in ZipFile: # Looks like a remote URL to me...
|
||||
dlZipFile = __salt__["cp.cache_file"](path=ZipFile)
|
||||
if dlZipFile is False:
|
||||
ret["result"] = False
|
||||
ret["comment"] = "Failed to cache ZipFile `{0}`.".format(ZipFile)
|
||||
return ret
|
||||
ZipFile = dlZipFile
|
||||
code = {
|
||||
"ZipFile": _filedata(ZipFile),
|
||||
}
|
||||
|
@ -1180,9 +1188,8 @@ def describe_event_source_mapping(
|
|||
salt myminion boto_lambda.describe_event_source_mapping uuid
|
||||
|
||||
"""
|
||||
|
||||
ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName)
|
||||
if len(ids) < 1:
|
||||
if not ids:
|
||||
return {"event_source_mapping": None}
|
||||
|
||||
UUID = ids[0]
|
||||
|
|
|
@ -398,10 +398,20 @@ def convert_to_group_ids(
|
|||
)
|
||||
if not group_id:
|
||||
# Security groups are a big deal - need to fail if any can't be resolved...
|
||||
raise CommandExecutionError(
|
||||
"Could not resolve Security Group name "
|
||||
"{0} to a Group ID".format(group)
|
||||
)
|
||||
# But... if we're running in test mode, it may just be that the SG is scheduled
|
||||
# to be created, and thus WOULD have been there if running "for real"...
|
||||
if __opts__["test"]:
|
||||
log.warn(
|
||||
"Security Group `%s` could not be resolved to an ID. This may "
|
||||
"cause a failure when not running in test mode.",
|
||||
group,
|
||||
)
|
||||
return []
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
"Could not resolve Security Group name "
|
||||
"{0} to a Group ID".format(group)
|
||||
)
|
||||
else:
|
||||
group_ids.append(six.text_type(group_id))
|
||||
log.debug("security group contents %s post-conversion", group_ids)
|
||||
|
|
|
@ -2948,7 +2948,7 @@ def route_exists(
|
|||
"vpc_peering_connection_id": vpc_peering_connection_id,
|
||||
}
|
||||
route_comp = set(route_dict.items()) ^ set(route_check.items())
|
||||
if len(route_comp) == 0:
|
||||
if not route_comp:
|
||||
log.info("Route %s exists.", destination_cidr_block)
|
||||
return {"exists": True}
|
||||
|
||||
|
|
|
@ -207,7 +207,7 @@ def _import_platform_generator(platform):
|
|||
The generator class is identified looking under the <platform> module
|
||||
for a class inheriting the `ACLGenerator` class.
|
||||
"""
|
||||
log.debug("Using platform: {plat}".format(plat=platform))
|
||||
log.debug("Using platform: %s", platform)
|
||||
for mod_name, mod_obj in inspect.getmembers(capirca.aclgen):
|
||||
if mod_name == platform and inspect.ismodule(mod_obj):
|
||||
for plat_obj_name, plat_obj in inspect.getmembers(
|
||||
|
@ -216,15 +216,9 @@ def _import_platform_generator(platform):
|
|||
if inspect.isclass(plat_obj) and issubclass(
|
||||
plat_obj, capirca.lib.aclgenerator.ACLGenerator
|
||||
):
|
||||
log.debug(
|
||||
"Identified Capirca class {cls} for {plat}".format(
|
||||
cls=plat_obj, plat=platform
|
||||
)
|
||||
)
|
||||
log.debug("Identified Capirca class %s for %s", plat_obj, platform)
|
||||
return plat_obj
|
||||
log.error(
|
||||
"Unable to identify any Capirca plaform class for {plat}".format(plat=platform)
|
||||
)
|
||||
log.error("Unable to identify any Capirca plaform class for %s", platform)
|
||||
|
||||
|
||||
def _get_services_mapping():
|
||||
|
@ -267,9 +261,8 @@ def _get_services_mapping():
|
|||
log.error("Did not read that properly:")
|
||||
log.error(line)
|
||||
log.error(
|
||||
"Please report the above error: {port} does not seem a valid port value!".format(
|
||||
port=port
|
||||
)
|
||||
"Please report the above error: %s does not seem a valid port value!",
|
||||
port,
|
||||
)
|
||||
_SERVICES[srv_name]["protocol"].append(protocol)
|
||||
return _SERVICES
|
||||
|
@ -501,11 +494,7 @@ def _get_term_object(
|
|||
"""
|
||||
Return an instance of the ``_Term`` class given the term options.
|
||||
"""
|
||||
log.debug(
|
||||
"Generating config for term {tname} under filter {fname}".format(
|
||||
tname=term_name, fname=filter_name
|
||||
)
|
||||
)
|
||||
log.debug("Generating config for term %s under filter %s", term_name, filter_name)
|
||||
term = _Term()
|
||||
term.name = term_name
|
||||
term_opts = {}
|
||||
|
@ -588,7 +577,7 @@ def _get_policy_object(
|
|||
log.debug(six.text_type(policy))
|
||||
platform_generator = _import_platform_generator(platform)
|
||||
policy_config = platform_generator(policy, 2)
|
||||
log.debug("Generating policy config for {platform}:".format(platform=platform))
|
||||
log.debug("Generating policy config for %s:", platform)
|
||||
log.debug(six.text_type(policy_config))
|
||||
return policy_config
|
||||
|
||||
|
|
|
@ -215,6 +215,6 @@ def _exec_cmd(*args, **kwargs):
|
|||
]
|
||||
)
|
||||
cmd_exec = "{0}{1}".format(cmd_args, cmd_kwargs)
|
||||
log.debug("Chef command: {0}".format(cmd_exec))
|
||||
log.debug("Chef command: %s", cmd_exec)
|
||||
|
||||
return __salt__["cmd.run_all"](cmd_exec, python_shell=False)
|
||||
|
|
|
@ -16,12 +16,12 @@ import tempfile
|
|||
# Import salt libs
|
||||
import salt.utils.data
|
||||
import salt.utils.platform
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
from salt.exceptions import (
|
||||
CommandExecutionError,
|
||||
CommandNotFoundError,
|
||||
SaltInvocationError,
|
||||
)
|
||||
from salt.utils.data import CaseInsensitiveDict
|
||||
from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
|
@ -852,11 +852,11 @@ def _run(
|
|||
)
|
||||
log.error(log_callback(msg))
|
||||
if ret["stdout"]:
|
||||
log.log(output_loglevel, "stdout: {0}".format(log_callback(ret["stdout"])))
|
||||
log.log(output_loglevel, "stdout: %s", log_callback(ret["stdout"]))
|
||||
if ret["stderr"]:
|
||||
log.log(output_loglevel, "stderr: {0}".format(log_callback(ret["stderr"])))
|
||||
log.log(output_loglevel, "stderr: %s", log_callback(ret["stderr"]))
|
||||
if ret["retcode"]:
|
||||
log.log(output_loglevel, "retcode: {0}".format(ret["retcode"]))
|
||||
log.log(output_loglevel, "retcode: %s", ret["retcode"])
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -3339,7 +3339,7 @@ def shell_info(shell, list_modules=False):
|
|||
hive="HKEY_LOCAL_MACHINE", key="Software\\Microsoft\\PowerShell"
|
||||
)
|
||||
pw_keys.sort(key=int)
|
||||
if len(pw_keys) == 0:
|
||||
if not pw_keys:
|
||||
return {
|
||||
"error": "Unable to locate 'powershell' Reason: Cannot be "
|
||||
"found in registry.",
|
||||
|
|
|
@ -485,8 +485,7 @@ def get(
|
|||
else:
|
||||
if merge not in ("recurse", "overwrite"):
|
||||
log.warning(
|
||||
"Unsupported merge strategy '{0}'. Falling back "
|
||||
"to 'recurse'.".format(merge)
|
||||
"Unsupported merge strategy '%s'. Falling back " "to 'recurse'.", merge
|
||||
)
|
||||
merge = "recurse"
|
||||
|
||||
|
@ -553,7 +552,7 @@ def gather_bootstrap_script(bootstrap=None):
|
|||
if not HAS_CLOUD:
|
||||
return False, "config.gather_bootstrap_script is unavailable"
|
||||
ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap)
|
||||
if "Success" in ret and len(ret["Success"]["Files updated"]) > 0:
|
||||
if "Success" in ret and ret["Success"]["Files updated"]:
|
||||
return ret["Success"]["Files updated"][0]
|
||||
|
||||
|
||||
|
|
|
@ -394,17 +394,11 @@ def copy_to(
|
|||
# Before we try to replace the file, compare checksums.
|
||||
source_md5 = __salt__["file.get_sum"](local_file, "md5")
|
||||
if source_md5 == _get_md5(name, dest, run_all):
|
||||
log.debug(
|
||||
"{0} and {1}:{2} are the same file, skipping copy".format(
|
||||
source, name, dest
|
||||
)
|
||||
)
|
||||
log.debug("%s and %s:%s are the same file, skipping copy", source, name, dest)
|
||||
return True
|
||||
|
||||
log.debug(
|
||||
"Copying {0} to {1} container '{2}' as {3}".format(
|
||||
source, container_type, name, dest
|
||||
)
|
||||
"Copying %s to %s container '%s' as %s", source, container_type, name, dest
|
||||
)
|
||||
|
||||
# Using cat here instead of opening the file, reading it into memory,
|
||||
|
|
|
@ -113,7 +113,7 @@ def remove(module, details=False):
|
|||
for file_ in files:
|
||||
if file_ in rm_details:
|
||||
continue
|
||||
log.trace("Removing {0}".format(file_))
|
||||
log.trace("Removing %s", file_)
|
||||
if __salt__["file.remove"](file_):
|
||||
rm_details[file_] = "removed"
|
||||
else:
|
||||
|
|
|
@ -355,7 +355,7 @@ def raw_cron(user):
|
|||
)
|
||||
).splitlines(True)
|
||||
|
||||
if len(lines) != 0 and lines[0].startswith(
|
||||
if lines and lines[0].startswith(
|
||||
"# DO NOT EDIT THIS FILE - edit the master and reinstall."
|
||||
):
|
||||
del lines[0:3]
|
||||
|
|
|
@ -233,8 +233,7 @@ def _csf_to_list(option):
|
|||
|
||||
|
||||
def split_option(option):
|
||||
l = re.split("(?: +)?\=(?: +)?", option) # pylint: disable=W1401
|
||||
return l
|
||||
return re.split(r"(?: +)?\=(?: +)?", option)
|
||||
|
||||
|
||||
def get_option(option):
|
||||
|
|
|
@ -68,7 +68,7 @@ def _check_cygwin_installed(cyg_arch="x86_64"):
|
|||
path_to_cygcheck = os.sep.join(
|
||||
["C:", _get_cyg_dir(cyg_arch), "bin", "cygcheck.exe"]
|
||||
)
|
||||
LOG.debug("Path to cygcheck.exe: {0}".format(path_to_cygcheck))
|
||||
LOG.debug("Path to cygcheck.exe: %s", path_to_cygcheck)
|
||||
if not os.path.exists(path_to_cygcheck):
|
||||
LOG.debug("Could not find cygcheck.exe")
|
||||
return False
|
||||
|
@ -122,7 +122,7 @@ def check_valid_package(package, cyg_arch="x86_64", mirrors=None):
|
|||
if mirrors is None:
|
||||
mirrors = [{DEFAULT_MIRROR: DEFAULT_MIRROR_KEY}]
|
||||
|
||||
LOG.debug("Checking Valid Mirrors: {0}".format(mirrors))
|
||||
LOG.debug("Checking Valid Mirrors: %s", mirrors)
|
||||
|
||||
for mirror in mirrors:
|
||||
for mirror_url, key in mirror.items():
|
||||
|
@ -251,7 +251,7 @@ def uninstall(packages, cyg_arch="x86_64", mirrors=None):
|
|||
args = []
|
||||
if packages is not None:
|
||||
args.append("--remove-packages {pkgs}".format(pkgs=packages))
|
||||
LOG.debug("args: {0}".format(args))
|
||||
LOG.debug("args: %s", args)
|
||||
if not _check_cygwin_installed(cyg_arch):
|
||||
LOG.debug("We're convinced cygwin isn't installed")
|
||||
return True
|
||||
|
@ -279,12 +279,7 @@ def update(cyg_arch="x86_64", mirrors=None):
|
|||
|
||||
# Can't update something that isn't installed
|
||||
if not _check_cygwin_installed(cyg_arch):
|
||||
LOG.debug(
|
||||
"Cygwin ({0}) not installed,\
|
||||
could not update".format(
|
||||
cyg_arch
|
||||
)
|
||||
)
|
||||
LOG.debug("Cygwin (%s) not installed, could not update", cyg_arch)
|
||||
return False
|
||||
|
||||
return _run_silent_cygwin(cyg_arch=cyg_arch, args=args, mirrors=mirrors)
|
||||
|
|
|
@ -235,7 +235,7 @@ def enabled(name, **kwargs):
|
|||
salt '*' daemontools.enabled <service name>
|
||||
"""
|
||||
if not available(name):
|
||||
log.error("Service {0} not found".format(name))
|
||||
log.error("Service %s not found", name)
|
||||
return False
|
||||
|
||||
run_file = os.path.join(SERVICE_DIR, name, "run")
|
||||
|
|
|
@ -34,7 +34,15 @@ def __virtual__():
|
|||
|
||||
|
||||
def cluster_create(
|
||||
version, name="main", port=None, locale=None, encoding=None, datadir=None
|
||||
version,
|
||||
name="main",
|
||||
port=None,
|
||||
locale=None,
|
||||
encoding=None,
|
||||
datadir=None,
|
||||
allow_group_access=None,
|
||||
data_checksums=None,
|
||||
wal_segsize=None,
|
||||
):
|
||||
"""
|
||||
Adds a cluster to the Postgres server.
|
||||
|
@ -53,7 +61,9 @@ def cluster_create(
|
|||
|
||||
salt '*' postgres.cluster_create '9.3' locale='fr_FR'
|
||||
|
||||
salt '*' postgres.cluster_create '11' data_checksums=True wal_segsize='32'
|
||||
"""
|
||||
|
||||
cmd = [salt.utils.path.which("pg_createcluster")]
|
||||
if port:
|
||||
cmd += ["--port", six.text_type(port)]
|
||||
|
@ -64,12 +74,19 @@ def cluster_create(
|
|||
if datadir:
|
||||
cmd += ["--datadir", datadir]
|
||||
cmd += [version, name]
|
||||
# initdb-specific options are passed after '--'
|
||||
if allow_group_access or data_checksums or wal_segsize:
|
||||
cmd += ["--"]
|
||||
if allow_group_access is True:
|
||||
cmd += ["--allow-group-access"]
|
||||
if data_checksums is True:
|
||||
cmd += ["--data-checksums"]
|
||||
if wal_segsize:
|
||||
cmd += ["--wal-segsize", wal_segsize]
|
||||
cmdstr = " ".join([pipes.quote(c) for c in cmd])
|
||||
ret = __salt__["cmd.run_all"](cmdstr, python_shell=False)
|
||||
if ret.get("retcode", 0) != 0:
|
||||
log.error(
|
||||
"Error creating a Postgresql" " cluster {0}/{1}".format(version, name)
|
||||
)
|
||||
log.error("Error creating a Postgresql cluster %s/%s", version, name)
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
@ -135,9 +152,7 @@ def cluster_remove(version, name="main", stop=False):
|
|||
ret = __salt__["cmd.run_all"](cmdstr, python_shell=False)
|
||||
# FIXME - return Boolean ?
|
||||
if ret.get("retcode", 0) != 0:
|
||||
log.error(
|
||||
"Error removing a Postgresql" " cluster {0}/{1}".format(version, name)
|
||||
)
|
||||
log.error("Error removing a Postgresql cluster %s/%s", version, name)
|
||||
else:
|
||||
ret["changes"] = ("Successfully removed" " cluster {0}/{1}").format(
|
||||
version, name
|
||||
|
|
|
@ -180,8 +180,9 @@ def _error_msg_routes(iface, option, expected):
|
|||
|
||||
|
||||
def _log_default_iface(iface, opt, value):
|
||||
msg = "Using default option -- Interface: {0} Option: {1} Value: {2}"
|
||||
log.info(msg.format(iface, opt, value))
|
||||
log.info(
|
||||
"Using default option -- Interface: %s Option: %s Value: %s", iface, opt, value
|
||||
)
|
||||
|
||||
|
||||
def _error_msg_network(option, expected):
|
||||
|
@ -194,8 +195,7 @@ def _error_msg_network(option, expected):
|
|||
|
||||
|
||||
def _log_default_network(opt, value):
|
||||
msg = "Using existing setting -- Setting: {0} Value: {1}"
|
||||
log.info(msg.format(opt, value))
|
||||
log.info("Using existing setting -- Setting: %s Value: %s", opt, value)
|
||||
|
||||
|
||||
def _raise_error_iface(iface, option, expected):
|
||||
|
@ -829,30 +829,27 @@ def _parse_settings_bond(opts, iface):
|
|||
}
|
||||
|
||||
if opts["mode"] in ["balance-rr", "0"]:
|
||||
log.info("Device: {0} Bonding Mode: load balancing (round-robin)".format(iface))
|
||||
log.info("Device: %s Bonding Mode: load balancing (round-robin)", iface)
|
||||
return _parse_settings_bond_0(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["active-backup", "1"]:
|
||||
log.info(
|
||||
"Device: {0} Bonding Mode: fault-tolerance (active-backup)".format(iface)
|
||||
)
|
||||
log.info("Device: %s Bonding Mode: fault-tolerance (active-backup)", iface)
|
||||
return _parse_settings_bond_1(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["balance-xor", "2"]:
|
||||
log.info("Device: {0} Bonding Mode: load balancing (xor)".format(iface))
|
||||
log.info("Device: %s Bonding Mode: load balancing (xor)", iface)
|
||||
return _parse_settings_bond_2(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["broadcast", "3"]:
|
||||
log.info("Device: {0} Bonding Mode: fault-tolerance (broadcast)".format(iface))
|
||||
log.info("Device: %s Bonding Mode: fault-tolerance (broadcast)", iface)
|
||||
return _parse_settings_bond_3(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["802.3ad", "4"]:
|
||||
log.info(
|
||||
"Device: {0} Bonding Mode: IEEE 802.3ad Dynamic link "
|
||||
"aggregation".format(iface)
|
||||
"Device: %s Bonding Mode: IEEE 802.3ad Dynamic link " "aggregation", iface
|
||||
)
|
||||
return _parse_settings_bond_4(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["balance-tlb", "5"]:
|
||||
log.info("Device: {0} Bonding Mode: transmit load balancing".format(iface))
|
||||
log.info("Device: %s Bonding Mode: transmit load balancing", iface)
|
||||
return _parse_settings_bond_5(opts, iface, bond_def)
|
||||
elif opts["mode"] in ["balance-alb", "6"]:
|
||||
log.info("Device: {0} Bonding Mode: adaptive load balancing".format(iface))
|
||||
log.info("Device: %s Bonding Mode: adaptive load balancing", iface)
|
||||
return _parse_settings_bond_6(opts, iface, bond_def)
|
||||
else:
|
||||
valid = [
|
||||
|
|
|
@ -513,7 +513,7 @@ def build(
|
|||
dscs = make_src_pkg(dsc_dir, spec, sources, env, saltenv, runas)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
shutil.rmtree(dsc_dir)
|
||||
log.error("Failed to make src package, exception '{0}'".format(exc))
|
||||
log.error("Failed to make src package, exception '%s'", exc)
|
||||
return ret
|
||||
|
||||
root_user = "root"
|
||||
|
@ -586,7 +586,7 @@ def build(
|
|||
ret.setdefault("Packages", []).append(bdist)
|
||||
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log.error("Error building from '{0}', execption '{1}'".format(dsc, exc))
|
||||
log.error("Error building from '%s', execption '%s'", dsc, exc)
|
||||
|
||||
# remove any Packages file created for local dependency processing
|
||||
for pkgzfile in os.listdir(dest_dir):
|
||||
|
|
|
@ -96,8 +96,8 @@ def A(host, nameserver=None):
|
|||
# In this case, 0 is not the same as False
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as "
|
||||
"fallback.".format(cmd["retcode"])
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
cmd["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
@ -126,8 +126,8 @@ def AAAA(host, nameserver=None):
|
|||
# In this case, 0 is not the same as False
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as "
|
||||
"fallback.".format(cmd["retcode"])
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
cmd["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
@ -156,8 +156,8 @@ def NS(domain, resolve=True, nameserver=None):
|
|||
# In this case, 0 is not the same as False
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as "
|
||||
"fallback.".format(cmd["retcode"])
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
cmd["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
@ -195,9 +195,8 @@ def SPF(domain, record="SPF", nameserver=None):
|
|||
# In this case, 0 is not the same as False
|
||||
if result["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as fallback.".format(
|
||||
result["retcode"]
|
||||
)
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
result["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
@ -207,7 +206,7 @@ def SPF(domain, record="SPF", nameserver=None):
|
|||
return SPF(domain, "TXT", nameserver)
|
||||
|
||||
sections = re.sub('"', "", result["stdout"]).split()
|
||||
if len(sections) == 0 or sections[0] != "v=spf1":
|
||||
if not sections or sections[0] != "v=spf1":
|
||||
return []
|
||||
|
||||
if sections[1].startswith("redirect="):
|
||||
|
@ -253,8 +252,8 @@ def MX(domain, resolve=False, nameserver=None):
|
|||
# In this case, 0 is not the same as False
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as "
|
||||
"fallback.".format(cmd["retcode"])
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
cmd["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
@ -287,8 +286,8 @@ def TXT(host, nameserver=None):
|
|||
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning(
|
||||
"dig returned exit code '{0}'. Returning empty list as "
|
||||
"fallback.".format(cmd["retcode"])
|
||||
"dig returned exit code '%s'. Returning empty list as fallback.",
|
||||
cmd["retcode"],
|
||||
)
|
||||
return []
|
||||
|
||||
|
|
|
@ -611,7 +611,7 @@ def hdparms(disks, args=None):
|
|||
disk_data = {}
|
||||
for line in _hdparm("-{0} {1}".format(args, disk), False).splitlines():
|
||||
line = line.strip()
|
||||
if len(line) == 0 or line == disk + ":":
|
||||
if not line or line == disk + ":":
|
||||
continue
|
||||
|
||||
if ":" in line:
|
||||
|
@ -647,7 +647,7 @@ def hdparms(disks, args=None):
|
|||
rvals.append(val)
|
||||
if valdict:
|
||||
rvals.append(valdict)
|
||||
if len(rvals) == 0:
|
||||
if not rvals:
|
||||
continue
|
||||
elif len(rvals) == 1:
|
||||
rvals = rvals[0]
|
||||
|
|
|
@ -1506,6 +1506,86 @@ def login(*registries):
|
|||
return ret
|
||||
|
||||
|
||||
def logout(*registries):
|
||||
"""
|
||||
.. versionadded:: 3001
|
||||
|
||||
Performs a ``docker logout`` to remove the saved authentication details for
|
||||
one or more configured repositories.
|
||||
|
||||
Multiple registry URLs (matching those configured in Pillar) can be passed,
|
||||
and Salt will attempt to logout of *just* those registries. If no registry
|
||||
URLs are provided, Salt will attempt to logout of *all* configured
|
||||
registries.
|
||||
|
||||
**RETURN DATA**
|
||||
|
||||
A dictionary containing the following keys:
|
||||
|
||||
- ``Results`` - A dictionary mapping registry URLs to the authentication
|
||||
result. ``True`` means a successful logout, ``False`` means a failed
|
||||
logout.
|
||||
- ``Errors`` - A list of errors encountered during the course of this
|
||||
function.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt myminion docker.logout
|
||||
salt myminion docker.logout hub
|
||||
salt myminion docker.logout hub https://mydomain.tld/registry/
|
||||
"""
|
||||
# NOTE: This function uses the "docker logout" CLI command to remove
|
||||
# authentication information from config.json. docker-py does not support
|
||||
# this usecase (see https://github.com/docker/docker-py/issues/1091)
|
||||
|
||||
# To logout of all known (to Salt) docker registries, they have to be collected first
|
||||
registry_auth = __salt__["config.get"]("docker-registries", {})
|
||||
ret = {"retcode": 0}
|
||||
errors = ret.setdefault("Errors", [])
|
||||
if not isinstance(registry_auth, dict):
|
||||
errors.append("'docker-registries' Pillar value must be a dictionary")
|
||||
registry_auth = {}
|
||||
for reg_name, reg_conf in six.iteritems(
|
||||
__salt__["config.option"]("*-docker-registries", wildcard=True)
|
||||
):
|
||||
try:
|
||||
registry_auth.update(reg_conf)
|
||||
except TypeError:
|
||||
errors.append(
|
||||
"Docker registry '{0}' was not specified as a "
|
||||
"dictionary".format(reg_name)
|
||||
)
|
||||
|
||||
# If no registries passed, we will logout of all known registries
|
||||
if not registries:
|
||||
registries = list(registry_auth)
|
||||
|
||||
results = ret.setdefault("Results", {})
|
||||
for registry in registries:
|
||||
if registry not in registry_auth:
|
||||
errors.append("No match found for registry '{0}'".format(registry))
|
||||
continue
|
||||
else:
|
||||
cmd = ["docker", "logout"]
|
||||
if registry.lower() != "hub":
|
||||
cmd.append(registry)
|
||||
log.debug("Attempting to logout of docker registry '%s'", registry)
|
||||
logout_cmd = __salt__["cmd.run_all"](
|
||||
cmd, python_shell=False, output_loglevel="quiet",
|
||||
)
|
||||
results[registry] = logout_cmd["retcode"] == 0
|
||||
if not results[registry]:
|
||||
if logout_cmd["stderr"]:
|
||||
errors.append(logout_cmd["stderr"])
|
||||
elif logout_cmd["stdout"]:
|
||||
errors.append(logout_cmd["stdout"])
|
||||
if errors:
|
||||
ret["retcode"] = 1
|
||||
return ret
|
||||
|
||||
|
||||
# Functions for information gathering
|
||||
def depends(name):
|
||||
"""
|
||||
|
|
|
@ -36,7 +36,7 @@ def __parse_drac(output):
|
|||
section = ""
|
||||
|
||||
for i in output.splitlines():
|
||||
if len(i.rstrip()) > 0 and "=" in i:
|
||||
if i.rstrip() and "=" in i:
|
||||
if section in drac:
|
||||
drac[section].update(dict([[prop.strip() for prop in i.split("=")]]))
|
||||
else:
|
||||
|
@ -54,7 +54,7 @@ def __execute_cmd(command):
|
|||
cmd = __salt__["cmd.run_all"]("racadm {0}".format(command))
|
||||
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning("racadm return an exit code '{0}'.".format(cmd["retcode"]))
|
||||
log.warning("racadm return an exit code '%s'.", cmd["retcode"])
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -73,7 +73,7 @@ def system_info():
|
|||
cmd = __salt__["cmd.run_all"]("racadm getsysinfo")
|
||||
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning("racadm return an exit code '{0}'.".format(cmd["retcode"]))
|
||||
log.warning("racadm return an exit code '%s'.", cmd["retcode"])
|
||||
|
||||
return __parse_drac(cmd["stdout"])
|
||||
|
||||
|
@ -92,7 +92,7 @@ def network_info():
|
|||
cmd = __salt__["cmd.run_all"]("racadm getniccfg")
|
||||
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning("racadm return an exit code '{0}'.".format(cmd["retcode"]))
|
||||
log.warning("racadm return an exit code '%s'.", cmd["retcode"])
|
||||
|
||||
return __parse_drac(cmd["stdout"])
|
||||
|
||||
|
@ -197,7 +197,7 @@ def list_users():
|
|||
)
|
||||
|
||||
if cmd["retcode"] != 0:
|
||||
log.warning("racadm return an exit code '{0}'.".format(cmd["retcode"]))
|
||||
log.warning("racadm return an exit code '%s'.", cmd["retcode"])
|
||||
|
||||
for user in cmd["stdout"].splitlines():
|
||||
if not user.startswith("cfg"):
|
||||
|
@ -242,7 +242,7 @@ def delete_user(username, uid=None):
|
|||
)
|
||||
|
||||
else:
|
||||
log.warning("'{0}' does not exist".format(username))
|
||||
log.warning("'%s' does not exist", username)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -271,7 +271,7 @@ def change_password(username, password, uid=None):
|
|||
)
|
||||
)
|
||||
else:
|
||||
log.warning("'{0}' does not exist".format(username))
|
||||
log.warning("'%s' does not exist", username)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -305,7 +305,7 @@ def create_user(username, password, permissions, users=None):
|
|||
users = list_users()
|
||||
|
||||
if username in users:
|
||||
log.warning("'{0}' already exists".format(username))
|
||||
log.warning("'%s' already exists", username)
|
||||
return False
|
||||
|
||||
for idx in six.iterkeys(users):
|
||||
|
|
|
@ -54,7 +54,7 @@ def __parse_drac(output):
|
|||
if i.strip().endswith(":") and "=" not in i:
|
||||
section = i[0:-1]
|
||||
drac[section] = {}
|
||||
if len(i.rstrip()) > 0 and "=" in i:
|
||||
if i.rstrip() and "=" in i:
|
||||
if section in drac:
|
||||
drac[section].update(dict([[prop.strip() for prop in i.split("=")]]))
|
||||
else:
|
||||
|
@ -137,7 +137,7 @@ def __execute_ret(
|
|||
if l.startswith("Continuing execution"):
|
||||
continue
|
||||
|
||||
if len(l.strip()) == 0:
|
||||
if not l.strip():
|
||||
continue
|
||||
fmtlines.append(l)
|
||||
if "=" in l:
|
||||
|
@ -418,7 +418,7 @@ def list_users(host=None, admin_username=None, admin_password=None, module=None)
|
|||
else:
|
||||
break
|
||||
else:
|
||||
if len(_username) > 0:
|
||||
if _username:
|
||||
users[_username].update({key: val})
|
||||
|
||||
return users
|
||||
|
@ -1223,7 +1223,7 @@ def inventory(host=None, admin_username=None, admin_password=None):
|
|||
in_chassis = True
|
||||
continue
|
||||
|
||||
if len(l) < 1:
|
||||
if not l:
|
||||
continue
|
||||
|
||||
line = re.split(" +", l.strip())
|
||||
|
|
|
@ -6,10 +6,15 @@ from __future__ import absolute_import, print_function, unicode_literals
|
|||
|
||||
import logging
|
||||
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _analyse_overview_field(content):
|
||||
"""
|
||||
Split the field in drbd-overview
|
||||
"""
|
||||
if "(" in content:
|
||||
# Output like "Connected(2*)" or "UpToDate(2*)"
|
||||
return content.split("(")[0], content.split("(")[0]
|
||||
|
@ -20,9 +25,140 @@ def _analyse_overview_field(content):
|
|||
return content, ""
|
||||
|
||||
|
||||
def _count_spaces_startswith(line):
|
||||
"""
|
||||
Count the number of spaces before the first character
|
||||
"""
|
||||
if line.split("#")[0].strip() == "":
|
||||
return None
|
||||
|
||||
spaces = 0
|
||||
for i in line:
|
||||
if i.isspace():
|
||||
spaces += 1
|
||||
else:
|
||||
return spaces
|
||||
|
||||
|
||||
def _analyse_status_type(line):
|
||||
"""
|
||||
Figure out the sections in drbdadm status
|
||||
"""
|
||||
spaces = _count_spaces_startswith(line)
|
||||
|
||||
if spaces is None:
|
||||
return ""
|
||||
|
||||
switch = {
|
||||
0: "RESOURCE",
|
||||
2: {" disk:": "LOCALDISK", " role:": "PEERNODE", " connection:": "PEERNODE"},
|
||||
4: {" peer-disk:": "PEERDISK"},
|
||||
}
|
||||
|
||||
ret = switch.get(spaces, "UNKNOWN")
|
||||
|
||||
# isinstance(ret, str) only works when run directly, calling need unicode(six)
|
||||
if isinstance(ret, six.text_type):
|
||||
return ret
|
||||
|
||||
for x in ret:
|
||||
if x in line:
|
||||
return ret[x]
|
||||
|
||||
return "UNKNOWN"
|
||||
|
||||
|
||||
def _add_res(line):
|
||||
"""
|
||||
Analyse the line of local resource of ``drbdadm status``
|
||||
"""
|
||||
global resource
|
||||
fields = line.strip().split()
|
||||
|
||||
if resource:
|
||||
ret.append(resource)
|
||||
resource = {}
|
||||
|
||||
resource["resource name"] = fields[0]
|
||||
resource["local role"] = fields[1].split(":")[1]
|
||||
resource["local volumes"] = []
|
||||
resource["peer nodes"] = []
|
||||
|
||||
|
||||
def _add_volume(line):
|
||||
"""
|
||||
Analyse the line of volumes of ``drbdadm status``
|
||||
"""
|
||||
section = _analyse_status_type(line)
|
||||
fields = line.strip().split()
|
||||
|
||||
volume = {}
|
||||
for field in fields:
|
||||
volume[field.split(":")[0]] = field.split(":")[1]
|
||||
|
||||
if section == "LOCALDISK":
|
||||
resource["local volumes"].append(volume)
|
||||
else:
|
||||
# 'PEERDISK'
|
||||
lastpnodevolumes.append(volume)
|
||||
|
||||
|
||||
def _add_peernode(line):
|
||||
"""
|
||||
Analyse the line of peer nodes of ``drbdadm status``
|
||||
"""
|
||||
global lastpnodevolumes
|
||||
|
||||
fields = line.strip().split()
|
||||
|
||||
peernode = {}
|
||||
peernode["peernode name"] = fields[0]
|
||||
# Could be role or connection:
|
||||
peernode[fields[1].split(":")[0]] = fields[1].split(":")[1]
|
||||
peernode["peer volumes"] = []
|
||||
resource["peer nodes"].append(peernode)
|
||||
lastpnodevolumes = peernode["peer volumes"]
|
||||
|
||||
|
||||
def _empty(dummy):
|
||||
"""
|
||||
Action of empty line of ``drbdadm status``
|
||||
"""
|
||||
|
||||
|
||||
def _unknown_parser(line):
|
||||
"""
|
||||
Action of unsupported line of ``drbdadm status``
|
||||
"""
|
||||
global ret
|
||||
ret = {"Unknown parser": line}
|
||||
|
||||
|
||||
def _line_parser(line):
|
||||
"""
|
||||
Call action for different lines
|
||||
"""
|
||||
section = _analyse_status_type(line)
|
||||
fields = line.strip().split()
|
||||
|
||||
switch = {
|
||||
"": _empty,
|
||||
"RESOURCE": _add_res,
|
||||
"PEERNODE": _add_peernode,
|
||||
"LOCALDISK": _add_volume,
|
||||
"PEERDISK": _add_volume,
|
||||
}
|
||||
|
||||
func = switch.get(section, _unknown_parser)
|
||||
|
||||
func(line)
|
||||
|
||||
|
||||
def overview():
|
||||
"""
|
||||
Show status of the DRBD devices, support two nodes only.
|
||||
drbd-overview is removed since drbd-utils-9.6.0,
|
||||
use status instead.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -90,3 +226,58 @@ def overview():
|
|||
"synched": sync,
|
||||
}
|
||||
return ret
|
||||
|
||||
|
||||
# Global para for func status
|
||||
ret = []
|
||||
resource = {}
|
||||
lastpnodevolumes = None
|
||||
|
||||
|
||||
def status(name="all"):
|
||||
"""
|
||||
Using drbdadm to show status of the DRBD devices,
|
||||
available in the latest drbd9.
|
||||
Support multiple nodes, multiple volumes.
|
||||
|
||||
:type name: str
|
||||
:param name:
|
||||
Resource name.
|
||||
|
||||
:return: drbd status of resource.
|
||||
:rtype: list(dict(res))
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' drbd.status
|
||||
salt '*' drbd.status name=<resource name>
|
||||
"""
|
||||
|
||||
# Initialize for multiple times test cases
|
||||
global ret
|
||||
global resource
|
||||
ret = []
|
||||
resource = {}
|
||||
|
||||
cmd = ["drbdadm", "status"]
|
||||
cmd.append(name)
|
||||
|
||||
# One possible output: (number of resource/node/vol are flexible)
|
||||
# resource role:Secondary
|
||||
# volume:0 disk:Inconsistent
|
||||
# volume:1 disk:Inconsistent
|
||||
# drbd-node1 role:Primary
|
||||
# volume:0 replication:SyncTarget peer-disk:UpToDate done:10.17
|
||||
# volume:1 replication:SyncTarget peer-disk:UpToDate done:74.08
|
||||
# drbd-node2 role:Secondary
|
||||
# volume:0 peer-disk:Inconsistent resync-suspended:peer
|
||||
# volume:1 peer-disk:Inconsistent resync-suspended:peer
|
||||
for line in __salt__["cmd.run"](cmd).splitlines():
|
||||
_line_parser(line)
|
||||
|
||||
if resource:
|
||||
ret.append(resource)
|
||||
|
||||
return ret
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue