diff --git a/conf/minion b/conf/minion index ffa6b7273fb..d6ccf7f72e9 100644 --- a/conf/minion +++ b/conf/minion @@ -692,6 +692,12 @@ # for a full explanation. #multiprocessing: True +# Limit the maximum amount of processes or threads created by salt-minion. +# This is useful to avoid resource exhaustion in case the minion receives more +# publications than it is able to handle, as it limits the number of spawned +# processes or threads. -1 is the default and disables the limit. +#process_count_max: -1 + ##### Logging settings ##### ########################################## diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 4a440526ad2..5cfa57dd763 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2423,6 +2423,23 @@ executed in a thread. multiprocessing: True +.. conf_minion:: process_count_max + +``process_count_max`` +------- + +.. versionadded:: Oxygen + +Default: ``-1`` + +Limit the maximum amount of processes or threads created by ``salt-minion``. +This is useful to avoid resource exhaustion in case the minion receives more +publications than it is able to handle, as it limits the number of spawned +processes or threads. ``-1`` is the default and disables the limit. + +.. code-block:: yaml + + process_count_max: -1 .. _minion-logging-settings: diff --git a/doc/ref/publisheracl.rst b/doc/ref/publisheracl.rst index eda868b5d28..5549c3c92a8 100644 --- a/doc/ref/publisheracl.rst +++ b/doc/ref/publisheracl.rst @@ -25,6 +25,9 @@ configuration: - web*: - test.* - pkg.* + # Allow managers to use saltutil module functions + manager_.*: + - saltutil.* Permission Issues ----------------- diff --git a/doc/ref/runners/all/salt.runners.auth.rst b/doc/ref/runners/all/salt.runners.auth.rst index b82907d4d11..a3d933f2e43 100644 --- a/doc/ref/runners/all/salt.runners.auth.rst +++ b/doc/ref/runners/all/salt.runners.auth.rst @@ -1,5 +1,5 @@ -salt.runners.auth module -======================== +salt.runners.auth +================= .. automodule:: salt.runners.auth :members: diff --git a/doc/ref/runners/all/salt.runners.event.rst b/doc/ref/runners/all/salt.runners.event.rst index 9b07aa9988b..c2d505a1f2b 100644 --- a/doc/ref/runners/all/salt.runners.event.rst +++ b/doc/ref/runners/all/salt.runners.event.rst @@ -1,5 +1,5 @@ -salt.runners.event module -========================= +salt.runners.event +================== .. automodule:: salt.runners.event :members: diff --git a/doc/ref/runners/all/salt.runners.smartos_vmadm.rst b/doc/ref/runners/all/salt.runners.smartos_vmadm.rst index 5ee3d03eb1d..7b5a7c4834e 100644 --- a/doc/ref/runners/all/salt.runners.smartos_vmadm.rst +++ b/doc/ref/runners/all/salt.runners.smartos_vmadm.rst @@ -1,5 +1,5 @@ -salt.runners.smartos_vmadm module -================================= +salt.runners.smartos_vmadm +========================== .. automodule:: salt.runners.smartos_vmadm :members: diff --git a/doc/ref/runners/all/salt.runners.vistara.rst b/doc/ref/runners/all/salt.runners.vistara.rst index a66b06f6d2e..0f1400f4c7b 100644 --- a/doc/ref/runners/all/salt.runners.vistara.rst +++ b/doc/ref/runners/all/salt.runners.vistara.rst @@ -1,5 +1,5 @@ -salt.runners.vistara module -=========================== +salt.runners.vistara +==================== .. automodule:: salt.runners.vistara :members: diff --git a/doc/topics/releases/2016.11.8.rst b/doc/topics/releases/2016.11.8.rst new file mode 100644 index 00000000000..9f4eb68dab4 --- /dev/null +++ b/doc/topics/releases/2016.11.8.rst @@ -0,0 +1,1719 @@ +============================ +Salt 2016.11.8 Release Notes +============================ + +Version 2016.11.8 is a bugfix release for :ref:`2016.11.0 `.] + +Changes for v2016.11.7..v2016.11.8 +---------------------------------- + +Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): + +*Generated at: 2017-09-11T14:52:27Z* + +Statistics: + +- Total Merges: **169** +- Total Issue references: **70** +- Total PR references: **206** + +Changes: + + +- **PR** `#43271`_: (*twangboy*) Fix minor formatting issue + @ *2017-08-30T18:35:12Z* + + * cf21f91 Merge pull request `#43271`_ from twangboy/win_fix_pkg.install + * 91b062f Fix formatting issue, spaces surrounding + + +- **PR** `#43228`_: (*twangboy*) Win fix pkg.install + @ *2017-08-30T14:26:21Z* + + * 3a0b02f Merge pull request `#43228`_ from twangboy/win_fix_pkg.install + * 13dfabb Fix regex statement, add `.` + + * 31ff69f Add underscore to regex search + + * 3cf2b65 Fix spelling + + * ed030a3 Use regex to detect salt-minion install + + * e5daff4 Fix pkg.install + +- **PR** `#43191`_: (*viktorkrivak*) Fix apache.config with multiple statement + @ *2017-08-28T18:13:44Z* + + * b4c689d Merge pull request `#43191`_ from viktorkrivak/fix-apache-config-multi-entity + * c15bcbe Merge remote-tracking branch 'upstream/2016.11' into fix-apache-config-multi-entity + + * 4164047 Fix apache.config with multiple statement At this moment when you post more than one statement in config only last is used. Also file is rewrited multiple times until last statement is written. Example: salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '8080'}, {'Proxy': "Something"}]" Ends only with Proxy Something and ignore Listen 8080, This patch fix this issue. + +- **PR** `#43154`_: (*lomeroe*) Backport `#43116`_ to 2016.11 + @ *2017-08-28T16:40:41Z* + + - **ISSUE** `#42279`_: (*dafyddj*) win_lgpo matches multiple policies due to startswith() + | refs: `#43116`_ `#43116`_ `#43154`_ + - **PR** `#43116`_: (*lomeroe*) Fix 42279 in develop + | refs: `#43154`_ + * b90e59e Merge pull request `#43154`_ from lomeroe/`bp-43116`_-2016.11 + * 8f593b0 verify that files exist before trying to remove them, win_file.remove raises an exception if the file does not exist + + * 33a30ba correcting bad format statement in search for policy to be disabled + + * acc3d7a correct fopen calls from salt.utils for 2016.11's utils function + + * 2da1cdd lint fix + + * 61bd12c track xml namespace to ensure policies w/duplicate IDs or Names do not conflict + + * f232bed add additional checks for ADM policies that have the same ADMX policy ID (`#42279`_) + +- **PR** `#43202`_: (*garethgreenaway*) Reverting previous augeas module changes + @ *2017-08-28T13:14:27Z* + + - **ISSUE** `#42642`_: (*githubcdr*) state.augeas + | refs: `#42669`_ `#43202`_ + * 5308c27 Merge pull request `#43202`_ from garethgreenaway/42642_2016_11_augeas_module_revert_fix + * ef7e93e Reverting this change due to it breaking other uses. + +- **PR** `#43103`_: (*aogier*) genesis.bootstrap deboostrap fix + @ *2017-08-25T20:48:23Z* + + - **ISSUE** `#43101`_: (*aogier*) genesis.bootstrap fails if no pkg AND exclude_pkgs (which can't be a string) + | refs: `#43103`_ + * f16b724 Merge pull request `#43103`_ from aogier/43101-genesis-bootstrap + * db94f3b better formatting + + * e5cc667 tests: fix a leftover and simplify some parts + + * 13e5997 lint + + * 216ced6 allow comma-separated pkgs lists, quote args, test deb behaviour + + * d8612ae fix debootstrap and enhance packages selection/deletion via cmdline + +- **PR** `#42663`_: (*jagguli*) Check remote tags before deciding to do a fetch `#42329`_ + @ *2017-08-25T20:14:32Z* + + - **ISSUE** `#42329`_: (*jagguli*) State git.latest does not pull latest tags + | refs: `#42663`_ + * 4863771 Merge pull request `#42663`_ from StreetHawkInc/fix_git_tag_check + * 2b5af5b Remove refs/tags prefix from remote tags + + * 3f2e96e Convert set to list for serializer + + * 2728e5d Only include new tags in changes + + * 4b1df2f Exclude annotated tags from checks + + * 389c037 Check remote tags before deciding to do a fetch `#42329`_ + +- **PR** `#43199`_: (*corywright*) Add `disk.format` alias for `disk.format_` + @ *2017-08-25T19:21:07Z* + + - **ISSUE** `#43198`_: (*corywright*) disk.format_ needs to be aliased to disk.format + | refs: `#43199`_ + * 4193e7f Merge pull request `#43199`_ from corywright/disk-format-alias + * f00d3a9 Add `disk.format` alias for `disk.format_` + +- **PR** `#43196`_: (*gtmanfred*) Pin request install to version for npm tests + @ *2017-08-25T18:43:06Z* + + - **ISSUE** `#495`_: (*syphernl*) mysql.* without having MySQL installed/configured gives traceback + | refs: `#43196`_ + * 5471f9f Merge pull request `#43196`_ from gtmanfred/2016.11 + * ccd2241 Pin request install to version + +- **PR** `#43178`_: (*terminalmage*) git.detached: Fix traceback when rev is a SHA and is not present locally + @ *2017-08-25T13:58:37Z* + + - **ISSUE** `#43143`_: (*abulford*) git.detached does not fetch if rev is missing from local + | refs: `#43178`_ + * ace2715 Merge pull request `#43178`_ from terminalmage/issue43143 + * 2640833 git.detached: Fix traceback when rev is a SHA and is not present locally + +- **PR** `#43179`_: (*terminalmage*) Fix missed deprecation + @ *2017-08-24T22:52:34Z* + + * 12e9507 Merge pull request `#43179`_ from terminalmage/old-deprecation + * 3adf8ad Fix missed deprecation + +- **PR** `#43171`_: (*terminalmage*) Add warning about adding new functions to salt/utils/__init__.py + @ *2017-08-24T19:10:23Z* + + * b595440 Merge pull request `#43171`_ from terminalmage/salt-utils-warning + * 7b5943a Add warning about adding new functions to salt/utils/__init__.py + +- **PR** `#43173`_: (*Ch3LL*) Add New Release Branch Strategy to Contribution Docs + @ *2017-08-24T19:04:56Z* + + * 4f273ca Merge pull request `#43173`_ from Ch3LL/add_branch_docs + * 1b24244 Add New Release Branch Strategy to Contribution Docs + +- **PR** `#43151`_: (*ushmodin*) state.sls hangs on file.recurse with clean: True on windows + @ *2017-08-23T17:25:33Z* + + - **PR** `#42969`_: (*ushmodin*) state.sls hangs on file.recurse with clean: True on windows + | refs: `#43151`_ + * 669b376 Merge pull request `#43151`_ from ushmodin/2016.11 + * c5841e2 state.sls hangs on file.recurse with clean: True on windows + +- **PR** `#42986`_: (*renner*) Notify systemd synchronously (via NOTIFY_SOCKET) + @ *2017-08-22T16:52:56Z* + + * ae9d2b7 Merge pull request `#42986`_ from renner/systemd-notify + * 79c53f3 Fallback to systemd_notify_call() in case of socket.error + + * f176547 Notify systemd synchronously (via NOTIFY_SOCKET) + +- **PR** `#43037`_: (*mcarlton00*) Issue `#43036`_ Bhyve virtual grain in Linux VMs + @ *2017-08-22T16:43:40Z* + + - **ISSUE** `#43036`_: (*mcarlton00*) Linux VMs in Bhyve aren't displayed properly in grains + | refs: `#43037`_ + * b420fbe Merge pull request `#43037`_ from mcarlton00/fix-bhyve-grains + * 73315f0 Issue `#43036`_ Bhyve virtual grain in Linux VMs + +- **PR** `#43100`_: (*vutny*) [DOCS] Add missing `utils` sub-dir listed for `extension_modules` + @ *2017-08-22T15:40:09Z* + + * 0a86f2d Merge pull request `#43100`_ from vutny/doc-add-missing-utils-ext + * af743ff [DOCS] Add missing `utils` sub-dir listed for `extension_modules` + +- **PR** `#42985`_: (*DmitryKuzmenko*) Properly handle `prereq` having lost requisites. + @ *2017-08-21T22:49:39Z* + + - **ISSUE** `#15171`_: (*JensRantil*) Maximum recursion limit hit related to requisites + | refs: `#42985`_ + * e2bf2f4 Merge pull request `#42985`_ from DSRCorporation/bugs/15171_recursion_limit + * 651b1ba Properly handle `prereq` having lost requisites. + +- **PR** `#43092`_: (*blarghmatey*) Fixed issue with silently passing all tests in Testinfra module + @ *2017-08-21T20:22:08Z* + + * e513333 Merge pull request `#43092`_ from mitodl/2016.11 + * d4b113a Fixed issue with silently passing all tests in Testinfra module + +- **PR** `#43060`_: (*twangboy*) Osx update pkg scripts + @ *2017-08-21T20:06:12Z* + + * 77a443c Merge pull request `#43060`_ from twangboy/osx_update_pkg_scripts + * ef8a14c Remove /opt/salt instead of /opt/salt/bin + + * 2dd62aa Add more information to the description + + * f44f5b7 Only stop services if they are running + + * 3b62bf9 Remove salt from the path + + * ebdca3a Update pkg-scripts + +- **PR** `#43064`_: (*terminalmage*) Fix race condition in git.latest + @ *2017-08-21T14:29:52Z* + + - **ISSUE** `#42869`_: (*abednarik*) Git Module : Failed to update repository + | refs: `#43064`_ + * 1b1b6da Merge pull request `#43064`_ from terminalmage/issue42869 + * 093c0c2 Fix race condition in git.latest + +- **PR** `#43054`_: (*lorengordon*) Uses ConfigParser to read yum config files + @ *2017-08-18T20:49:44Z* + + - **ISSUE** `#42041`_: (*lorengordon*) pkg.list_repo_pkgs fails to find pkgs with spaces around yum repo enabled value + | refs: `#43054`_ + - **PR** `#42045`_: (*arount*) Fix: salt.modules.yumpkg: ConfigParser to read ini like files. + | refs: `#43054`_ + * 96e8e83 Merge pull request `#43054`_ from lorengordon/fix/yumpkg/config-parser + * 3b2cb81 fix typo in salt.modules.yumpkg + + * 38add0e break if leading comments are all fetched + + * d7f65dc fix configparser import & log if error was raised + + * ca1b1bb use configparser to parse yum repo file + +- **PR** `#43048`_: (*rallytime*) Back-port `#43031`_ to 2016.11 + @ *2017-08-18T12:56:04Z* + + - **PR** `#43031`_: (*gtmanfred*) use a ruby gem that doesn't have dependencies + | refs: `#43048`_ + * 43aa46f Merge pull request `#43048`_ from rallytime/`bp-43031`_ + * 35e4504 use a ruby gem that doesn't have dependencies + +- **PR** `#43023`_: (*terminalmage*) Fixes/improvements to Jenkins state/module + @ *2017-08-18T01:33:10Z* + + * ad89ff3 Merge pull request `#43023`_ from terminalmage/fix-jenkins-xml-caching + * 33fd8ff Update jenkins.py + + * fc306fc Add missing colon in `if` statement + + * 822eabc Catch exceptions raised when making changes to jenkins + + * 91b583b Improve and correct execption raising + + * f096917 Raise an exception if we fail to cache the config xml + +- **PR** `#43026`_: (*rallytime*) Back-port `#43020`_ to 2016.11 + @ *2017-08-17T23:19:46Z* + + - **PR** `#43020`_: (*gtmanfred*) test with gem that appears to be abandoned + | refs: `#43026`_ + * 2957467 Merge pull request `#43026`_ from rallytime/`bp-43020`_ + * 0eb15a1 test with gem that appears to be abandoned + +- **PR** `#43033`_: (*rallytime*) Back-port `#42760`_ to 2016.11 + @ *2017-08-17T22:24:43Z* + + - **ISSUE** `#40490`_: (*alxwr*) saltstack x509 incompatible to m2crypto 0.26.0 + | refs: `#42760`_ + - **PR** `#42760`_: (*AFriemann*) Catch TypeError thrown by m2crypto when parsing missing subjects in c… + | refs: `#43033`_ + * 4150b09 Merge pull request `#43033`_ from rallytime/`bp-42760`_ + * 3e3f7f5 Catch TypeError thrown by m2crypto when parsing missing subjects in certificate files. + +- **PR** `#43032`_: (*rallytime*) Back-port `#42547`_ to 2016.11 + @ *2017-08-17T21:53:50Z* + + - **PR** `#42547`_: (*blarghmatey*) Updated testinfra modules to work with more recent versions + | refs: `#43032`_ + * b124d36 Merge pull request `#43032`_ from rallytime/`bp-42547`_ + * ea4d7f4 Updated testinfra modules to work with more recent versions + +- **PR** `#43027`_: (*pabloh007*) Fixes ignore push flag for docker.push module issue `#42992`_ + @ *2017-08-17T19:55:37Z* + + - **ISSUE** `#42992`_: (*pabloh007*) docker.save flag push does is ignored + * a88386a Merge pull request `#43027`_ from pabloh007/fix-docker-save-push-2016-11 + * d0fd949 Fixes ignore push flag for docker.push module issue `#42992`_ + +- **PR** `#42890`_: (*DmitryKuzmenko*) Make chunked mode in salt-cp optional + @ *2017-08-17T18:37:44Z* + + - **ISSUE** `#42627`_: (*taigrrr8*) salt-cp no longer works. Was working a few months back. + | refs: `#42890`_ + * 51d1684 Merge pull request `#42890`_ from DSRCorporation/bugs/42627_salt-cp + * cfddbf1 Apply code review: update the doc + + * afedd3b Typos and version fixes in the doc. + + * 9fedf60 Fixed 'test_valid_docs' test. + + * 9993886 Make chunked mode in salt-cp optional (disabled by default). + +- **PR** `#43009`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-08-17T18:00:09Z* + + - **PR** `#42954`_: (*Ch3LL*) [2016.3] Bump latest and previous versions + - **PR** `#42949`_: (*Ch3LL*) Add Security Notice to 2016.3.7 Release Notes + - **PR** `#42942`_: (*Ch3LL*) [2016.3] Add clean_id function to salt.utils.verify.py + * b3c253c Merge pull request `#43009`_ from rallytime/merge-2016.11 + * 566ba4f Merge branch '2016.3' into '2016.11' + + * 13b8637 Merge pull request `#42942`_ from Ch3LL/2016.3.6_follow_up + + * f281e17 move additional minion config options to 2016.3.8 release notes + + * 168604b remove merge conflict + + * 8a07d95 update release notes with cve number + + * 149633f Add release notes for 2016.3.7 release + + * 7a4cddc Add clean_id function to salt.utils.verify.py + + * bbb1b29 Merge pull request `#42954`_ from Ch3LL/latest_2016.3 + + * b551e66 [2016.3] Bump latest and previous versions + + * 5d5edc5 Merge pull request `#42949`_ from Ch3LL/2016.3.7_docs + + * d75d374 Add Security Notice to 2016.3.7 Release Notes + +- **PR** `#43021`_: (*terminalmage*) Use socket.AF_INET6 to get the correct value instead of doing an OS check + @ *2017-08-17T17:57:09Z* + + - **PR** `#43014`_: (*Ch3LL*) Change AF_INET6 family for mac in test_host_to_ips + | refs: `#43021`_ + * 37c63e7 Merge pull request `#43021`_ from terminalmage/fix-network-test + * 4089b7b Use socket.AF_INET6 to get the correct value instead of doing an OS check + +- **PR** `#43019`_: (*rallytime*) Update bootstrap script to latest stable: v2017.08.17 + @ *2017-08-17T17:56:41Z* + + * 8f64232 Merge pull request `#43019`_ from rallytime/bootstrap_2017.08.17 + * 2f762b3 Update bootstrap script to latest stable: v2017.08.17 + +- **PR** `#43014`_: (*Ch3LL*) Change AF_INET6 family for mac in test_host_to_ips + | refs: `#43021`_ + @ *2017-08-17T16:17:51Z* + + * ff1caeee Merge pull request `#43014`_ from Ch3LL/fix_network_mac + * b8eee44 Change AF_INET6 family for mac in test_host_to_ips + +- **PR** `#42968`_: (*vutny*) [DOCS] Fix link to Salt Cloud Feature Matrix + @ *2017-08-16T13:16:16Z* + + * 1ee9499 Merge pull request `#42968`_ from vutny/doc-salt-cloud-ref + * 44ed53b [DOCS] Fix link to Salt Cloud Feature Matrix + +- **PR** `#42291`_: (*vutny*) Fix `#38839`_: remove `state` from Reactor runner kwags + @ *2017-08-15T23:01:08Z* + + - **ISSUE** `#38839`_: (*DaveOHenry*) Invoking runner.cloud.action via reactor sls fails + | refs: `#42291`_ + * 923f974 Merge pull request `#42291`_ from vutny/`fix-38839`_ + * 5f8f98a Fix `#38839`_: remove `state` from Reactor runner kwags + +- **PR** `#42940`_: (*gtmanfred*) create new ip address before checking list of allocated ips + @ *2017-08-15T21:47:18Z* + + - **ISSUE** `#42644`_: (*stamak*) nova salt-cloud -P Private IPs returned, but not public. Checking for misidentified IPs + | refs: `#42940`_ + * c20bc7d Merge pull request `#42940`_ from gtmanfred/2016.11 + * 253e216 fix IP address spelling + + * bd63074 create new ip address before checking list of allocated ips + +- **PR** `#42959`_: (*rallytime*) Back-port `#42883`_ to 2016.11 + @ *2017-08-15T21:25:48Z* + + - **PR** `#42883`_: (*rallytime*) Fix failing boto tests + | refs: `#42959`_ + * d6496ec Merge pull request `#42959`_ from rallytime/`bp-42883`_ + * c6b9ca4 Lint fix: add missing space + + * 5597b1a Skip 2 failing tests in Python 3 due to upstream bugs + + * a0b19bd Update account id value in boto_secgroup module unit test + + * 60b406e @mock_elb needs to be changed to @mock_elb_deprecated as well + + * 6ae1111 Replace @mock_ec2 calls with @mock_ec2_deprecated calls + +- **PR** `#42944`_: (*Ch3LL*) [2016.11] Add clean_id function to salt.utils.verify.py + @ *2017-08-15T18:06:12Z* + + * 6366e05 Merge pull request `#42944`_ from Ch3LL/2016.11.6_follow_up + * 7e0a20a Add release notes for 2016.11.7 release + + * 63823f8 Add clean_id function to salt.utils.verify.py + +- **PR** `#42952`_: (*Ch3LL*) [2016.11] Bump latest and previous versions + @ *2017-08-15T17:23:02Z* + + * 49d339c Merge pull request `#42952`_ from Ch3LL/latest_2016.11 + * 74e7055 [2016.11] Bump latest and previous versions + +- **PR** `#42950`_: (*Ch3LL*) Add Security Notice to 2016.11.7 Release Notes + @ *2017-08-15T16:50:23Z* + + * b0d2e05 Merge pull request `#42950`_ from Ch3LL/2016.11.7_docs + * a6f902d Add Security Notice to 2016.11.77 Release Notes + +- **PR** `#42836`_: (*aneeshusa*) Backport salt.utils.versions from develop to 2016.11 + @ *2017-08-14T20:56:54Z* + + - **PR** `#42835`_: (*aneeshusa*) Fix typo in utils/versions.py module + | refs: `#42836`_ + * c0ff69f Merge pull request `#42836`_ from lyft/backport-utils.versions-to-2016.11 + * 86ce700 Backport salt.utils.versions from develop to 2016.11 + +- **PR** `#42919`_: (*rallytime*) Back-port `#42871`_ to 2016.11 + @ *2017-08-14T20:44:00Z* + + - **PR** `#42871`_: (*amalleo25*) Update joyent.rst + | refs: `#42919`_ + * 64a79dd Merge pull request `#42919`_ from rallytime/`bp-42871`_ + * 4e46c96 Update joyent.rst + +- **PR** `#42918`_: (*rallytime*) Back-port `#42848`_ to 2016.11 + @ *2017-08-14T20:43:43Z* + + - **ISSUE** `#42803`_: (*gmcwhistler*) master_type: str, not working as expected, parent salt-minion process dies. + | refs: `#42848`_ + - **ISSUE** `#42753`_: (*grichmond-salt*) SaltReqTimeout Error on Some Minions when One Master in a Multi-Master Configuration is Unavailable + | refs: `#42848`_ + - **PR** `#42848`_: (*DmitryKuzmenko*) Execute fire_master asynchronously in the main minion thread. + | refs: `#42918`_ + * bea8ec1 Merge pull request `#42918`_ from rallytime/`bp-42848`_ + * cdb4812 Make lint happier. + + * 62eca9b Execute fire_master asynchronously in the main minion thread. + +- **PR** `#42861`_: (*twangboy*) Fix pkg.install salt-minion using salt-call + @ *2017-08-14T19:07:22Z* + + * 52bce32 Merge pull request `#42861`_ from twangboy/win_pkg_install_salt + * 0d3789f Fix pkg.install salt-minion using salt-call + +- **PR** `#42798`_: (*s-sebastian*) Update return data before calling returners + @ *2017-08-14T15:51:30Z* + + * b9f4f87 Merge pull request `#42798`_ from s-sebastian/2016.11 + * 1cc8659 Update return data before calling returners + +- **PR** `#41977`_: (*abulford*) Fix dockerng.network_* ignoring of tests=True + @ *2017-08-11T18:37:20Z* + + - **ISSUE** `#41976`_: (*abulford*) dockerng network states do not respect test=True + | refs: `#41977`_ `#41977`_ + * c15d003 Merge pull request `#41977`_ from redmatter/fix-dockerng-network-ignores-test + * 1cc2aa5 Fix dockerng.network_* ignoring of tests=True + +- **PR** `#42886`_: (*sarcasticadmin*) Adding missing output flags to salt cli docs + @ *2017-08-11T18:35:19Z* + + * 3b9c3c5 Merge pull request `#42886`_ from sarcasticadmin/adding_docs_salt_outputs + * 744bf95 Adding missing output flags to salt cli + +- **PR** `#42882`_: (*gtmanfred*) make sure cmd is not run when npm isn't installed + @ *2017-08-11T17:53:14Z* + + * e5b98c8 Merge pull request `#42882`_ from gtmanfred/2016.11 + * da3402a make sure cmd is not run when npm isn't installed + +- **PR** `#42788`_: (*amendlik*) Remove waits and retries from Saltify deployment + @ *2017-08-11T15:38:05Z* + + * 5962c95 Merge pull request `#42788`_ from amendlik/saltify-timeout + * 928b523 Remove waits and retries from Saltify deployment + +- **PR** `#42877`_: (*terminalmage*) Add virtual func for cron state module + @ *2017-08-11T15:33:09Z* + + * 227ecdd Merge pull request `#42877`_ from terminalmage/add-cron-state-virtual + * f1de196 Add virtual func for cron state module + +- **PR** `#42859`_: (*terminalmage*) Add note about git CLI requirement for GitPython to GitFS tutorial + @ *2017-08-11T14:53:03Z* + + * ab9f6ce Merge pull request `#42859`_ from terminalmage/gitpython-git-cli-note + * 35e05c9 Add note about git CLI requirement for GitPython to GitFS tutorial + +- **PR** `#42856`_: (*gtmanfred*) skip cache_clean test if npm version is >= 5.0.0 + @ *2017-08-11T13:39:20Z* + + - **ISSUE** `#41770`_: (*Ch3LL*) NPM v5 incompatible with salt.modules.cache_list + | refs: `#42856`_ + - **ISSUE** `#475`_: (*thatch45*) Change yaml to use C bindings + | refs: `#42856`_ + * 682b4a8 Merge pull request `#42856`_ from gtmanfred/2016.11 + * b458b89 skip cache_clean test if npm version is >= 5.0.0 + +- **PR** `#42864`_: (*whiteinge*) Make syndic_log_file respect root_dir setting + @ *2017-08-11T13:28:21Z* + + * 01ea854 Merge pull request `#42864`_ from whiteinge/syndic-log-root_dir + * 4b1f55d Make syndic_log_file respect root_dir setting + +- **PR** `#42851`_: (*terminalmage*) Backport `#42651`_ to 2016.11 + @ *2017-08-10T18:02:39Z* + + - **PR** `#42651`_: (*gtmanfred*) python2- prefix for fedora 26 packages + * 2dde1f7 Merge pull request `#42851`_ from terminalmage/`bp-42651`_ + * a3da86e fix syntax + + * 6ecdbce make sure names are correct + + * f83b553 add py3 for versionlock + + * 21934f6 python2- prefix for fedora 26 packages + +- **PR** `#42806`_: (*rallytime*) Update doc references in glusterfs.volume_present + @ *2017-08-10T14:10:16Z* + + - **ISSUE** `#42683`_: (*rgcosma*) Gluster module broken in 2017.7 + | refs: `#42806`_ + * c746f79 Merge pull request `#42806`_ from rallytime/`fix-42683`_ + * 8c8640d Update doc references in glusterfs.volume_present + +- **PR** `#42829`_: (*twangboy*) Fix passing version in pkgs as shown in docs + @ *2017-08-10T14:07:24Z* + + * 27a8a26 Merge pull request `#42829`_ from twangboy/win_pkg_fix_install + * 83b9b23 Add winrepo to docs about supporting versions in pkgs + + * 81fefa6 Add ability to pass version in pkgs list + +- **PR** `#42838`_: (*twangboy*) Document requirements for win_pki + @ *2017-08-10T13:59:46Z* + + * 3c3ac6a Merge pull request `#42838`_ from twangboy/win_doc_pki + * f0a1d06 Standardize PKI Client + + * 7de687a Document requirements for win_pki + +- **PR** `#42805`_: (*rallytime*) Back-port `#42552`_ to 2016.11 + @ *2017-08-09T22:37:56Z* + + - **PR** `#42552`_: (*remijouannet*) update consul module following this documentation https://www.consul.… + | refs: `#42805`_ + * b3e2ae3 Merge pull request `#42805`_ from rallytime/`bp-42552`_ + * 5a91c1f update consul module following this documentation https://www.consul.io/api/acl.html + +- **PR** `#42804`_: (*rallytime*) Back-port `#42784`_ to 2016.11 + @ *2017-08-09T22:37:40Z* + + - **ISSUE** `#42731`_: (*infoveinx*) http.query template_data render exception + | refs: `#42804`_ + - **PR** `#42784`_: (*gtmanfred*) only read file if ret is not a string in http.query + | refs: `#42804`_ + * d2ee793 Merge pull request `#42804`_ from rallytime/`bp-42784`_ + * dbd29e4 only read file if it is not a string + +- **PR** `#42826`_: (*terminalmage*) Fix misspelling of "versions" + @ *2017-08-09T19:39:43Z* + + * 4cbf805 Merge pull request `#42826`_ from terminalmage/fix-spelling + * 00f9314 Fix misspelling of "versions" + +- **PR** `#42786`_: (*Ch3LL*) Fix typo for template_dict in http docs + @ *2017-08-08T18:14:50Z* + + * de997ed Merge pull request `#42786`_ from Ch3LL/fix_typo + * 90a2fb6 Fix typo for template_dict in http docs + +- **PR** `#42795`_: (*lomeroe*) backport `#42744`_ to 2016.11 + @ *2017-08-08T17:17:15Z* + + - **ISSUE** `#42600`_: (*twangboy*) Unable to set 'Not Configured' using win_lgpo execution module + | refs: `#42744`_ `#42795`_ + - **PR** `#42744`_: (*lomeroe*) fix `#42600`_ in develop + | refs: `#42795`_ + * bf6153e Merge pull request `#42795`_ from lomeroe/`bp-42744`__201611 + * 695f8c1 fix `#42600`_ in develop + +- **PR** `#42748`_: (*whiteinge*) Workaround Orchestrate problem that highstate outputter mutates data + @ *2017-08-07T21:11:33Z* + + - **ISSUE** `#42747`_: (*whiteinge*) Outputters mutate data which can be a problem for Runners and perhaps other things + | refs: `#42748`_ + * 61fad97 Merge pull request `#42748`_ from whiteinge/save-before-output + * de60b77 Workaround Orchestrate problem that highstate outputter mutates data + +- **PR** `#42764`_: (*amendlik*) Fix infinite loop with salt-cloud and Windows nodes + @ *2017-08-07T20:47:07Z* + + * a4e3e7e Merge pull request `#42764`_ from amendlik/cloud-win-loop + * f3dcfca Fix infinite loops on failed Windows deployments + +- **PR** `#42694`_: (*gtmanfred*) allow adding extra remotes to a repository + @ *2017-08-07T18:08:11Z* + + - **ISSUE** `#42690`_: (*ChristianBeer*) git.latest state with remote set fails on first try + | refs: `#42694`_ + * da85326 Merge pull request `#42694`_ from gtmanfred/2016.11 + * 1a0457a allow adding extra remotes to a repository + +- **PR** `#42669`_: (*garethgreenaway*) [2016.11] Fixes to augeas module + @ *2017-08-06T17:58:03Z* + + - **ISSUE** `#42642`_: (*githubcdr*) state.augeas + | refs: `#42669`_ `#43202`_ + * 7b2119f Merge pull request `#42669`_ from garethgreenaway/42642_2016_11_augeas_module_fix + * 2441308 Updating the call to shlex_split to pass the posix=False argument so that quotes are preserved. + +- **PR** `#42629`_: (*xiaoanyunfei*) tornado api + @ *2017-08-03T22:21:20Z* + + * 3072576 Merge pull request `#42629`_ from xiaoanyunfei/tornadoapi + * 1e13383 tornado api + +- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy + @ *2017-08-03T20:44:10Z* + + - **PR** `#33806`_: (*cachedout*) Work around upstream cherrypy bug + | refs: `#42655`_ + * f0f00fc Merge pull request `#42655`_ from whiteinge/rest_cherrypy-reenable-stats + * deb6316 Fix lint errors + + * 6bd91c8 Reenable cpstats for rest_cherrypy + +- **PR** `#42693`_: (*gilbsgilbs*) Fix RabbitMQ tags not properly set. + @ *2017-08-03T20:23:08Z* + + - **ISSUE** `#42686`_: (*gilbsgilbs*) Unable to set multiple RabbitMQ tags + | refs: `#42693`_ `#42693`_ + * 21cf15f Merge pull request `#42693`_ from gilbsgilbs/fix-rabbitmq-tags + * 78fccdc Cast to list in case tags is a tuple. + + * 287b57b Fix RabbitMQ tags not properly set. + +- **PR** `#42574`_: (*sbojarski*) Fixed error reporting in "boto_cfn.present" function. + @ *2017-08-01T17:55:29Z* + + - **ISSUE** `#41433`_: (*sbojarski*) boto_cfn.present fails when reporting error for failed state + | refs: `#42574`_ + * f2b0c9b Merge pull request `#42574`_ from sbojarski/boto-cfn-error-reporting + * 5c945f1 Fix debug message in "boto_cfn._validate" function. + + * 181a1be Fixed error reporting in "boto_cfn.present" function. + +- **PR** `#42623`_: (*terminalmage*) Fix unicode constructor in custom YAML loader + @ *2017-07-31T19:25:18Z* + + * bc1effc Merge pull request `#42623`_ from terminalmage/fix-unicode-constructor + * fcf4588 Fix unicode constructor in custom YAML loader + +- **PR** `#42515`_: (*gtmanfred*) Allow not interpreting backslashes in the repl + @ *2017-07-28T16:00:09Z* + + * cbf752c Merge pull request `#42515`_ from gtmanfred/backslash + * cc4e456 Allow not interpreting backslashes in the repl + +- **PR** `#42586`_: (*gdubroeucq*) [Fix] yumpkg.py: add option to the command "check-update" + @ *2017-07-27T23:52:00Z* + + - **ISSUE** `#42456`_: (*gdubroeucq*) Use yum lib + | refs: `#42586`_ + * 5494958 Merge pull request `#42586`_ from gdubroeucq/2016.11 + * 9c0b5cc Remove extra newline + + * d2ef448 yumpkg.py: clean + + * a96f7c0 yumpkg.py: add option to the command "check-update" + +- **PR** `#41988`_: (*abulford*) Fix dockerng.network_* name matching + @ *2017-07-27T21:25:06Z* + + - **ISSUE** `#41982`_: (*abulford*) dockerng.network_* matches too easily + | refs: `#41988`_ `#41988`_ + * 6b45deb Merge pull request `#41988`_ from redmatter/fix-dockerng-network-matching + * 9eea796 Add regression tests for `#41982`_ + + * 3369f00 Fix broken unit test test_network_absent + + * 0ef6cf6 Add trace logging of dockerng.networks result + + * 515c612 Fix dockerng.network_* name matching + +- **PR** `#42339`_: (*isbm*) Bugfix: Jobs scheduled to run at a future time stay pending for Salt minions (bsc`#1036125`_) + @ *2017-07-27T19:05:51Z* + + - **ISSUE** `#1036125`_: (**) + * 4b16109 Merge pull request `#42339`_ from isbm/isbm-jobs-scheduled-in-a-future-bsc1036125 + * bbba84c Bugfix: Jobs scheduled to run at a future time stay pending for Salt minions (bsc`#1036125`_) + +- **PR** `#42077`_: (*vutny*) Fix scheduled job run on Master if `when` parameter is a list + @ *2017-07-27T19:04:23Z* + + - **ISSUE** `#23516`_: (*dkiser*) BUG: cron job scheduler sporadically works + | refs: `#42077`_ + - **PR** `#41973`_: (*vutny*) Fix Master/Minion scheduled jobs based on Cron expressions + | refs: `#42077`_ + * 6c5a7c6 Merge pull request `#42077`_ from vutny/fix-jobs-scheduled-with-whens + * b1960ce Fix scheduled job run on Master if `when` parameter is a list + +- **PR** `#42414`_: (*vutny*) DOCS: unify hash sum with hash type format + @ *2017-07-27T18:48:40Z* + + * f9cb536 Merge pull request `#42414`_ from vutny/unify-hash-params-format + * d1f2a93 DOCS: unify hash sum with hash type format + +- **PR** `#42523`_: (*rallytime*) Add a mention of the True/False returns with __virtual__() + @ *2017-07-27T18:13:07Z* + + - **ISSUE** `#42375`_: (*dragonpaw*) salt.modules.*.__virtualname__ doens't work as documented. + | refs: `#42523`_ + * 535c922 Merge pull request `#42523`_ from rallytime/`fix-42375`_ + * 685c2cc Add information about returning a tuple with an error message + + * fa46651 Add a mention of the True/False returns with __virtual__() + +- **PR** `#42527`_: (*twangboy*) Document changes to Windows Update in Windows 10/Server 2016 + @ *2017-07-27T17:45:38Z* + + * 0df0e7e Merge pull request `#42527`_ from twangboy/win_wua + * 0373791 Correct capatlization + + * af3bcc9 Document changes to Windows Update in 10/2016 + +- **PR** `#42551`_: (*binocvlar*) Remove '-s' (--script) argument to parted within align_check function + @ *2017-07-27T17:35:31Z* + + * 69b0658 Merge pull request `#42551`_ from binocvlar/fix-lack-of-align-check-output + * c4fabaa Remove '-s' (--script) argument to parted within align_check function + +- **PR** `#42573`_: (*rallytime*) Back-port `#42433`_ to 2016.11 + @ *2017-07-27T13:51:21Z* + + - **ISSUE** `#42403`_: (*astronouth7303*) [2017.7] Pillar empty when state is applied from orchestrate + | refs: `#42433`_ + - **PR** `#42433`_: (*terminalmage*) Only force saltenv/pillarenv to be a string when not None + | refs: `#42573`_ + * 9e0b4e9 Merge pull request `#42573`_ from rallytime/`bp-42433`_ + * 0293429 Only force saltenv/pillarenv to be a string when not None + +- **PR** `#42571`_: (*twangboy*) Avoid loading system PYTHON* environment vars + @ *2017-07-26T22:48:55Z* + + * e931ed2 Merge pull request `#42571`_ from twangboy/win_add_pythonpath + * d55a44d Avoid loading user site packages + + * 9af1eb2 Ignore any PYTHON* environment vars already on the system + + * 4e2fb03 Add pythonpath to batch files and service + +- **PR** `#42387`_: (*DmitryKuzmenko*) Fix race condition in usage of weakvaluedict + @ *2017-07-25T20:57:42Z* + + - **ISSUE** `#42371`_: (*tsaridas*) Minion unresponsive after trying to failover + | refs: `#42387`_ + * de2f397 Merge pull request `#42387`_ from DSRCorporation/bugs/42371_KeyError_WeakValueDict + * e721c7e Don't use `key in weakvaluedict` because it could lie. + +- **PR** `#41968`_: (*root360-AndreasUlm*) Fix rabbitmqctl output sanitizer for version 3.6.10 + @ *2017-07-25T19:12:36Z* + + - **ISSUE** `#41955`_: (*root360-AndreasUlm*) rabbitmq 3.6.10 changed output => rabbitmq-module broken + | refs: `#41968`_ + * 641a9d7 Merge pull request `#41968`_ from root360-AndreasUlm/fix-rabbitmqctl-output-handler + * 76fd941 added tests for rabbitmq 3.6.10 output handler + + * 3602af1 Fix rabbitmqctl output handler for 3.6.10 + +- **PR** `#42479`_: (*gtmanfred*) validate ssh_interface for ec2 + @ *2017-07-25T18:37:18Z* + + - **ISSUE** `#42477`_: (*aikar*) Invalid ssh_interface value prevents salt-cloud provisioning without reason of why + | refs: `#42479`_ + * 66fede3 Merge pull request `#42479`_ from gtmanfred/interface + * c32c1b2 fix pylint + + * 99ec634 validate ssh_interface for ec2 + +- **PR** `#42516`_: (*rallytime*) Add info about top file to pillar walk-through example to include edit.vim + @ *2017-07-25T17:01:12Z* + + - **ISSUE** `#42405`_: (*felrivero*) The documentation is incorrectly compiled (PILLAR section) + | refs: `#42516`_ + * a925c70 Merge pull request `#42516`_ from rallytime/`fix-42405`_ + * e3a6717 Add info about top file to pillar walk-through example to include edit.vim + +- **PR** `#42509`_: (*clem-compilatio*) Fix _assign_floating_ips in openstack.py + @ *2017-07-24T17:14:13Z* + + - **ISSUE** `#42417`_: (*clem-compilatio*) salt-cloud - openstack - "no more floating IP addresses" error - but public_ip in node + | refs: `#42509`_ + * 1bd5bbc Merge pull request `#42509`_ from clem-compilatio/`fix-42417`_ + * 72924b0 Fix _assign_floating_ips in openstack.py + +- **PR** `#42464`_: (*garethgreenaway*) [2016.11] Small fix to modules/git.py + @ *2017-07-21T21:28:57Z* + + * 4bf35a7 Merge pull request `#42464`_ from garethgreenaway/2016_11_remove_tmp_identity_file + * ff24102 Uncomment the line that removes the temporary identity file. + +- **PR** `#42443`_: (*garethgreenaway*) [2016.11] Fix to slack engine + @ *2017-07-21T15:48:57Z* + + - **ISSUE** `#42357`_: (*Giandom*) Salt pillarenv problem with slack engine + | refs: `#42443`_ + * e2120db Merge pull request `#42443`_ from garethgreenaway/42357_pass_args_kwargs_correctly + * 635810b Updating the slack engine in 2016.11 to pass the args and kwrags correctly to LocalClient + +- **PR** `#42200`_: (*shengis*) Fix `#42198`_ + @ *2017-07-21T14:47:29Z* + + - **ISSUE** `#42198`_: (*shengis*) state sqlite3.row_absent fail with "parameters are of unsupported type" + | refs: `#42200`_ + * 8262cc9 Merge pull request `#42200`_ from shengis/sqlite3_fix_row_absent_2016.11 + * 407b8f4 Fix `#42198`_ If where_args is not set, not using it in the delete request. + +- **PR** `#42424`_: (*goten4*) Fix error message when tornado or pycurl is not installed + @ *2017-07-20T21:53:40Z* + + - **ISSUE** `#42413`_: (*goten4*) Invalid error message when proxy_host is set and tornado not installed + | refs: `#42424`_ + * d9df97e Merge pull request `#42424`_ from goten4/2016.11 + * 1c0574d Fix error message when tornado or pycurl is not installed + +- **PR** `#42350`_: (*twangboy*) Fixes problem with Version and OS Release related grains on certain versions of Python (2016.11) + @ *2017-07-19T17:07:26Z* + + * 42bb1a6 Merge pull request `#42350`_ from twangboy/win_fix_ver_grains_2016.11 + * 8c04840 Detect Server OS with a desktop release name + +- **PR** `#42356`_: (*meaksh*) Allow to check whether a function is available on the AliasesLoader wrapper + @ *2017-07-19T16:56:41Z* + + * 0a72e56 Merge pull request `#42356`_ from meaksh/2016.11-AliasesLoader-wrapper-fix + * 915d942 Allow to check whether a function is available on the AliasesLoader wrapper + +- **PR** `#42368`_: (*twangboy*) Remove build and dist directories before install (2016.11) + @ *2017-07-19T16:47:28Z* + + * 10eb7b7 Merge pull request `#42368`_ from twangboy/win_fix_build_2016.11 + * a7c910c Remove build and dist directories before install + +- **PR** `#42370`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-07-18T22:39:41Z* + + - **PR** `#42359`_: (*Ch3LL*) [2016.3] Update version numbers in doc config for 2017.7.0 release + * 016189f Merge pull request `#42370`_ from rallytime/merge-2016.11 + * 0aa5dde Merge branch '2016.3' into '2016.11' + + * e9b0f20 Merge pull request `#42359`_ from Ch3LL/doc-update-2016.3 + + * dc85b5e [2016.3] Update version numbers in doc config for 2017.7.0 release + +- **PR** `#42360`_: (*Ch3LL*) [2016.11] Update version numbers in doc config for 2017.7.0 release + @ *2017-07-18T19:23:30Z* + + * f06a6f1 Merge pull request `#42360`_ from Ch3LL/doc-update-2016.11 + * b90b7a7 [2016.11] Update version numbers in doc config for 2017.7.0 release + +- **PR** `#42319`_: (*rallytime*) Add more documentation for config options that are missing from master/minion docs + @ *2017-07-18T18:02:32Z* + + - **ISSUE** `#32400`_: (*rallytime*) Document Default Config Values + | refs: `#42319`_ + * e0595b0 Merge pull request `#42319`_ from rallytime/config-docs + * b40f980 Add more documentation for config options that are missing from master/minion docs + +- **PR** `#42352`_: (*CorvinM*) Multiple documentation fixes + @ *2017-07-18T15:10:37Z* + + - **ISSUE** `#42333`_: (*b3hni4*) Getting "invalid type of dict, a list is required" when trying to configure engines in master config file + | refs: `#42352`_ + * 7894040 Merge pull request `#42352`_ from CorvinM/issue42333 + * 526b6ee Multiple documentation fixes + +- **PR** `#42353`_: (*terminalmage*) is_windows is a function, not a propery/attribute + @ *2017-07-18T14:38:51Z* + + * b256001 Merge pull request `#42353`_ from terminalmage/fix-git-test + * 14cf6ce is_windows is a function, not a propery/attribute + +- **PR** `#42264`_: (*rallytime*) Update minion restart section in FAQ doc for windows + @ *2017-07-17T17:40:40Z* + + - **ISSUE** `#41116`_: (*hrumph*) FAQ has wrong instructions for upgrading Windows minion. + | refs: `#42264`_ + * 866a1fe Merge pull request `#42264`_ from rallytime/`fix-41116`_ + * bd63888 Add mono-spacing to salt-minion reference for consistency + + * 30d62f4 Update minion restart section in FAQ doc for windows + +- **PR** `#42275`_: (*terminalmage*) pkg.installed: pack name/version into pkgs argument + @ *2017-07-17T17:38:39Z* + + - **ISSUE** `#42194`_: (*jryberg*) pkg version: latest are now broken, appending -latest to filename + | refs: `#42275`_ + * 9a70708 Merge pull request `#42275`_ from terminalmage/issue42194 + * 6638749 pkg.installed: pack name/version into pkgs argument + +- **PR** `#42269`_: (*rallytime*) Add some clarity to "multiple quotes" section of yaml docs + @ *2017-07-17T17:38:18Z* + + - **ISSUE** `#41721`_: (*sazaro*) state.sysrc broken when setting the value to YES or NO + | refs: `#42269`_ + * e588f23 Merge pull request `#42269`_ from rallytime/`fix-41721`_ + * f2250d4 Add a note about using different styles of quotes. + + * 38d9b3d Add some clarity to "multiple quotes" section of yaml docs + +- **PR** `#42282`_: (*rallytime*) Handle libcloud objects that throw RepresenterErrors with --out=yaml + @ *2017-07-17T17:36:35Z* + + - **ISSUE** `#42152`_: (*dubb-b*) salt-cloud errors on Rackspace driver using -out=yaml + | refs: `#42282`_ + * 5aaa214 Merge pull request `#42282`_ from rallytime/`fix-42152`_ + * f032223 Handle libcloud objects that throw RepresenterErrors with --out=yaml + +- **PR** `#42308`_: (*lubyou*) Force file removal on Windows. Fixes `#42295`_ + @ *2017-07-17T17:12:13Z* + + - **ISSUE** `#42295`_: (*lubyou*) file.absent fails on windows if the file to be removed has the "readonly" attribute set + | refs: `#42308`_ + * fb5697a Merge pull request `#42308`_ from lubyou/42295-fix-file-absent-windows + * 026ccf4 Force file removal on Windows. Fixes `#42295`_ + +- **PR** `#42314`_: (*rallytime*) Add clarification to salt ssh docs about key auto-generation. + @ *2017-07-17T14:07:49Z* + + - **ISSUE** `#42267`_: (*gzcwnk*) salt-ssh not creating ssh keys automatically as per documentation + | refs: `#42314`_ + * da2a8a5 Merge pull request `#42314`_ from rallytime/`fix-42267`_ + * c406046 Add clarification to salt ssh docs about key auto-generation. + +- **PR** `#41945`_: (*garethgreenaway*) Fixes to modules/git.py + @ *2017-07-14T17:46:10Z* + + - **ISSUE** `#41936`_: (*michaelkarrer81*) git.latest identity does not set the correct user for the private key file on the minion + | refs: `#41945`_ + - **ISSUE** `#1`_: (*thatch45*) Enable regex on the salt cli + * acadd54 Merge pull request `#41945`_ from garethgreenaway/41936_allow_identity_files_with_user + * 44841e5 Moving the call to cp.get_file inside the with block to ensure the umask is preserved when we grab the file. + + * f9ba60e Merge pull request `#1`_ from terminalmage/pr-41945 + + * 1b60261 Restrict set_umask to mkstemp call only + + * 68549f3 Fixing umask to we can set files as executable. + + * 4949bf3 Updating to swap on the new salt.utils.files.set_umask context_manager + + * 8faa9f6 Updating PR with requested changes. + + * 494765e Updating the git module to allow an identity file to be used when passing the user parameter + +- **PR** `#42289`_: (*CorvinM*) Multiple empty_password fixes for state.user + @ *2017-07-14T16:14:02Z* + + - **ISSUE** `#42240`_: (*casselt*) empty_password in user.present always changes password, even with test=True + | refs: `#42289`_ + - **PR** `#41543`_: (*cri-epita*) Fix user creation with empty password + | refs: `#42289`_ `#42289`_ + * f90e04a Merge pull request `#42289`_ from CorvinM/`bp-41543`_ + * 357dc22 Fix user creation with empty password + +- **PR** `#42123`_: (*vutny*) DOCS: describe importing custom util classes + @ *2017-07-12T15:53:24Z* + + * a91a3f8 Merge pull request `#42123`_ from vutny/fix-master-utils-import + * 6bb8b8f Add missing doc for ``utils_dirs`` Minion config option + + * f1bc58f Utils: add example of module import + +- **PR** `#42261`_: (*rallytime*) Some minor doc fixes for dnsutil module so they'll render correctly + @ *2017-07-11T23:14:53Z* + + * e2aa511 Merge pull request `#42261`_ from rallytime/minor-doc-fix + * 8c76bbb Some minor doc fixes for dnsutil module so they'll render correctly + +- **PR** `#42262`_: (*rallytime*) Back-port `#42224`_ to 2016.11 + @ *2017-07-11T23:14:25Z* + + - **PR** `#42224`_: (*tdutrion*) Remove duplicate instruction in Openstack Rackspace config example + | refs: `#42262`_ + * 3e9dfbc Merge pull request `#42262`_ from rallytime/`bp-42224`_ + * c31ded3 Remove duplicate instruction in Openstack Rackspace config example + +- **PR** `#42181`_: (*garethgreenaway*) fixes to state.py for names parameter + @ *2017-07-11T21:21:32Z* + + - **ISSUE** `#42137`_: (*kiemlicz*) cmd.run with multiple commands - random order of execution + | refs: `#42181`_ + * 7780579 Merge pull request `#42181`_ from garethgreenaway/42137_backport_fix_from_2017_7 + * a34970b Back porting the fix for 2017.7 that ensures the order of the names parameter. + +- **PR** `#42253`_: (*gtmanfred*) Only use unassociated ips when unable to allocate + @ *2017-07-11T20:53:51Z* + + - **PR** `#38965`_: (*toanju*) salt-cloud will use list_floating_ips for OpenStack + | refs: `#42253`_ + - **PR** `#34280`_: (*kevinanderson1*) salt-cloud will use list_floating_ips for Openstack + | refs: `#38965`_ + * 7253786 Merge pull request `#42253`_ from gtmanfred/2016.11 + * 53e2576 Only use unassociated ips when unable to allocate + +- **PR** `#42252`_: (*UtahDave*) simple docstring updates + @ *2017-07-11T20:48:33Z* + + * b2a4698 Merge pull request `#42252`_ from UtahDave/2016.11local + * e6a9563 simple doc updates + +- **PR** `#42235`_: (*astronouth7303*) Abolish references to `dig` in examples. + @ *2017-07-10T20:06:11Z* + + - **ISSUE** `#42232`_: (*astronouth7303*) Half of dnsutil refers to dig + | refs: `#42235`_ + * 781fe13 Merge pull request `#42235`_ from astronouth7303/patch-1-2016.3 + * 4cb51bd Make note of dig partial requirement. + + * 08e7d83 Abolish references to `dig` in examples. + +- **PR** `#42215`_: (*twangboy*) Add missing config to example + @ *2017-07-07T20:18:44Z* + + * 83cbd76 Merge pull request `#42215`_ from twangboy/win_iis_docs + * c07e220 Add missing config to example + +- **PR** `#42211`_: (*terminalmage*) Only pass a saltenv in orchestration if one was explicitly passed (2016.11) + @ *2017-07-07T20:16:35Z* + + * 274946a Merge pull request `#42211`_ from terminalmage/issue40928 + * 22a18fa Only pass a saltenv in orchestration if one was explicitly passed (2016.11) + +- **PR** `#42173`_: (*rallytime*) Back-port `#37424`_ to 2016.11 + @ *2017-07-07T16:39:59Z* + + - **PR** `#37424`_: (*kojiromike*) Avoid Early Convert ret['comment'] to String + | refs: `#42173`_ + * 89261cf Merge pull request `#42173`_ from rallytime/`bp-37424`_ + * 01addb6 Avoid Early Convert ret['comment'] to String + +- **PR** `#42175`_: (*rallytime*) Back-port `#39366`_ to 2016.11 + @ *2017-07-06T19:51:47Z* + + - **ISSUE** `#39365`_: (*dglloyd*) service.running fails if sysv script has no status command and enable: True + | refs: `#39366`_ + - **PR** `#39366`_: (*dglloyd*) Pass sig to service.status in after_toggle + | refs: `#42175`_ + * 3b17fb7 Merge pull request `#42175`_ from rallytime/`bp-39366`_ + * 53f7b98 Pass sig to service.status in after_toggle + +- **PR** `#42172`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-07-06T18:16:29Z* + + - **PR** `#42155`_: (*phsteve*) Fix docs for puppet.plugin_sync + * ea16f47 Merge pull request `#42172`_ from rallytime/merge-2016.11 + * b1fa332 Merge branch '2016.3' into '2016.11' + + * 8fa1fa5 Merge pull request `#42155`_ from phsteve/doc-fix-puppet + + * fb2cb78 Fix docs for puppet.plugin_sync so code-block renders properly and sync is spelled consistently + +- **PR** `#42176`_: (*rallytime*) Back-port `#42109`_ to 2016.11 + @ *2017-07-06T18:15:35Z* + + - **PR** `#42109`_: (*arthurlogilab*) [doc] Update aws.rst - add Debian default username + | refs: `#42176`_ + * 6307b98 Merge pull request `#42176`_ from rallytime/`bp-42109`_ + * 686926d Update aws.rst - add Debian default username + +- **PR** `#42095`_: (*terminalmage*) Add debug logging to dockerng.login + @ *2017-07-06T17:13:05Z* + + * 28c4e4c Merge pull request `#42095`_ from terminalmage/docker-login-debugging + * bd27870 Add debug logging to dockerng.login + +- **PR** `#42119`_: (*terminalmage*) Fix regression in CLI pillar override for salt-call + @ *2017-07-06T17:02:52Z* + + - **ISSUE** `#42116`_: (*terminalmage*) CLI pillar override regression in 2017.7.0rc1 + | refs: `#42119`_ + * 2b754bc Merge pull request `#42119`_ from terminalmage/issue42116 + * 9a26894 Add integration test for 42116 + + * 1bb42bb Fix regression when CLI pillar override is used with salt-call + +- **PR** `#42121`_: (*terminalmage*) Fix pillar.get when saltenv is passed + @ *2017-07-06T16:52:34Z* + + - **ISSUE** `#42114`_: (*clallen*) saltenv bug in pillar.get execution module function + | refs: `#42121`_ + * 8c0a83c Merge pull request `#42121`_ from terminalmage/issue42114 + * d142912 Fix pillar.get when saltenv is passed + +- **PR** `#42094`_: (*terminalmage*) Prevent command from showing in exception when output_loglevel=quiet + @ *2017-07-06T16:18:09Z* + + * 687992c Merge pull request `#42094`_ from terminalmage/quiet-exception + * 47d61f4 Prevent command from showing in exception when output_loglevel=quiet + +- **PR** `#42163`_: (*vutny*) Fix `#42115`_: parse libcloud "rc" version correctly + @ *2017-07-06T16:15:07Z* + + - **ISSUE** `#42115`_: (*nomeelnoj*) Installing EPEL repo breaks salt-cloud + | refs: `#42163`_ + * dad2551 Merge pull request `#42163`_ from vutny/`fix-42115`_ + * b27b1e3 Fix `#42115`_: parse libcloud "rc" version correctly + +- **PR** `#42164`_: (*Ch3LL*) Fix kerberos create_keytab doc + @ *2017-07-06T15:55:33Z* + + * 2a8ae2b Merge pull request `#42164`_ from Ch3LL/fix_kerb_doc + * 7c0fb24 Fix kerberos create_keytab doc + +- **PR** `#42141`_: (*rallytime*) Back-port `#42098`_ to 2016.11 + @ *2017-07-06T15:11:49Z* + + - **PR** `#42098`_: (*twangboy*) Change repo_ng to repo-ng + | refs: `#42141`_ + * 678d4d4 Merge pull request `#42141`_ from rallytime/`bp-42098`_ + * bd80243 Change repo_ng to repo-ng + +- **PR** `#42140`_: (*rallytime*) Back-port `#42097`_ to 2016.11 + @ *2017-07-06T15:11:29Z* + + - **PR** `#42097`_: (*gtmanfred*) require large timediff for ipv6 warning + | refs: `#42140`_ + * c8afd7a Merge pull request `#42140`_ from rallytime/`bp-42097`_ + * 9c4e132 Import datetime + + * 1435bf1 require large timediff for ipv6 warning + +- **PR** `#42142`_: (*Ch3LL*) Update builds available for rc1 + @ *2017-07-05T21:11:56Z* + + * c239664 Merge pull request `#42142`_ from Ch3LL/change_builds + * e1694af Update builds available for rc1 + +- **PR** `#42078`_: (*damon-atkins*) pkg.install and pkg.remove fix version number input. + @ *2017-07-05T06:04:57Z* + + * 4780d78 Merge pull request `#42078`_ from damon-atkins/fix_convert_flt_str_version_on_cmd_line + * 09d37dd Fix comment typo + + * 7167549 Handle version=None when converted to a string it becomes 'None' parm should default to empty string rather than None, it would fix better with existing code. + + * 4fb2bb1 Fix typo + + * cf55c33 pkg.install and pkg.remove on the command line take number version numbers, store them within a float. However version is a string, to support versions numbers like 1.3.4 + +- **PR** `#42105`_: (*Ch3LL*) Update releasecanddiate doc with new 2017.7.0rc1 Release + @ *2017-07-04T03:14:42Z* + + * 46d575a Merge pull request `#42105`_ from Ch3LL/update_rc + * d4e7b91 Update releasecanddiate doc with new 2017.7.0rc1 Release + +- **PR** `#42099`_: (*rallytime*) Remove references in docs to pip install salt-cloud + @ *2017-07-03T22:13:44Z* + + - **ISSUE** `#41885`_: (*astronouth7303*) Recommended pip installation outdated? + | refs: `#42099`_ + * d38548b Merge pull request `#42099`_ from rallytime/`fix-41885`_ + * c2822e0 Remove references in docs to pip install salt-cloud + +- **PR** `#42086`_: (*abulford*) Make result=true if Docker volume already exists + @ *2017-07-03T15:48:33Z* + + - **ISSUE** `#42076`_: (*abulford*) dockerng.volume_present test looks as though it would cause a change + | refs: `#42086`_ `#42086`_ + * 81d606a Merge pull request `#42086`_ from redmatter/fix-dockerng-volume-present-result + * 8d54968 Make result=true if Docker volume already exists + +- **PR** `#42021`_: (*gtmanfred*) Set concurrent to True when running states with sudo + @ *2017-06-30T21:02:15Z* + + - **ISSUE** `#25842`_: (*shikhartanwar*) Running salt-minion as non-root user to execute sudo commands always returns an error + | refs: `#42021`_ + * 7160697 Merge pull request `#42021`_ from gtmanfred/2016.11 + * 26beb18 Set concurrent to True when running states with sudo + +- **PR** `#42029`_: (*terminalmage*) Mock socket.getaddrinfo in unit.utils.network_test.NetworkTestCase.test_host_to_ips + @ *2017-06-30T20:58:56Z* + + * b784fbb Merge pull request `#42029`_ from terminalmage/host_to_ips + * 26f848e Mock socket.getaddrinfo in unit.utils.network_test.NetworkTestCase.test_host_to_ips + +- **PR** `#42055`_: (*dmurphy18*) Upgrade support for gnupg v2.1 and higher + @ *2017-06-30T20:54:02Z* + + * e067020 Merge pull request `#42055`_ from dmurphy18/handle_gnupgv21 + * e20cea6 Upgrade support for gnupg v2.1 and higher + +- **PR** `#42048`_: (*Ch3LL*) Add initial 2016.11.7 Release Notes + @ *2017-06-30T16:00:05Z* + + * 74ba2ab Merge pull request `#42048`_ from Ch3LL/add_11.7 + * 1de5e00 Add initial 2016.11.7 Release Notes + +- **PR** `#42024`_: (*leeclemens*) doc: Specify versionadded for SELinux policy install/uninstall + @ *2017-06-29T23:29:50Z* + + * ca4e619 Merge pull request `#42024`_ from leeclemens/doc/selinux + * b63a3c0 doc: Specify versionadded for SELinux policy install/uninstall + +- **PR** `#42030`_: (*whiteinge*) Re-add msgpack to mocked imports + @ *2017-06-29T20:47:59Z* + + - **PR** `#42028`_: (*whiteinge*) Revert "Allow docs to be built under Python 3" + | refs: `#42030`_ + - **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + * 50856d0 Merge pull request `#42030`_ from whiteinge/revert-py3-doc-chagnes-pt-2 + * 18dfa98 Re-add msgpack to mocked imports + +- **PR** `#42028`_: (*whiteinge*) Revert "Allow docs to be built under Python 3" + | refs: `#42030`_ + @ *2017-06-29T19:47:46Z* + + - **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + * 53031d2 Merge pull request `#42028`_ from saltstack/revert-41961-py3_doc + * 5592e6e Revert "Allow docs to be built under Python 3" + +- **PR** `#42017`_: (*lorengordon*) Fixes typo "nozerconf" -> "nozeroconf" + @ *2017-06-29T17:30:48Z* + + - **ISSUE** `#42013`_: (*dusto*) Misspelled nozeroconf in salt/modules/rh_ip.py + | refs: `#42017`_ + * 1416bf7 Merge pull request `#42017`_ from lorengordon/issue-42013 + * b6cf5f2 Fixes typo nozerconf -> nozeroconf + +- **PR** `#41906`_: (*terminalmage*) Better support for numeric saltenvs + @ *2017-06-29T17:19:33Z* + + * 0ebb50b Merge pull request `#41906`_ from terminalmage/numeric-saltenv + * 2d798de Better support for numeric saltenvs + +- **PR** `#41995`_: (*terminalmage*) Temporarily set the umask before writing an auth token + @ *2017-06-29T01:09:48Z* + + * 6a3c03c Merge pull request `#41995`_ from terminalmage/token-umask + * 4f54b00 Temporarily set the umask before writing an auth token + +- **PR** `#41999`_: (*terminalmage*) Update IP address for unit.utils.network_test.NetworkTestCase.test_host_to_ips + @ *2017-06-29T01:01:31Z* + + * e3801b0 Merge pull request `#41999`_ from terminalmage/fix-network-test + * fb6a933 Update IP address for unit.utils.network_test.NetworkTestCase.test_host_to_ips + +- **PR** `#41991`_: (*Da-Juan*) Accept a list for state_aggregate global setting + @ *2017-06-29T00:58:59Z* + + - **ISSUE** `#18659`_: (*whiteinge*) mod_aggregate not working for list-form configuration + | refs: `#41991`_ + * a7f3892 Merge pull request `#41991`_ from Da-Juan/fix-state_aggregate-list + * c9075b8 Accept a list for state_aggregate setting + +- **PR** `#41993`_: (*UtahDave*) change out salt support link to SaltConf link + @ *2017-06-29T00:55:20Z* + + * 7424f87 Merge pull request `#41993`_ from UtahDave/2016.11local + * bff050a change out salt support link to SaltConf link + +- **PR** `#41987`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-06-28T20:19:11Z* + + - **PR** `#41981`_: (*Ch3LL*) [2016.3] Bump latest release version to 2016.11.6 + * 3b9ccf0 Merge pull request `#41987`_ from rallytime/merge-2016.11 + * 48867c4 Merge branch '2016.3' into '2016.11' + + * c589eae Merge pull request `#41981`_ from Ch3LL/11.6_3 + + * 2516ae1 [2016.3] Bump latest release version to 2016.11.6 + +- **PR** `#41985`_: (*rallytime*) Back-port `#41780`_ to 2016.11 + @ *2017-06-28T20:18:57Z* + + - **PR** `#41780`_: (*ferringb*) Fix salt.util.render_jinja_tmpl usage for when not used in an environmnet + | refs: `#41985`_ + * 768339d Merge pull request `#41985`_ from rallytime/`bp-41780`_ + * 8f8d3a4 Fix salt.util.render_jinja_tmpl usage for when not used in an environment. + +- **PR** `#41986`_: (*rallytime*) Back-port `#41820`_ to 2016.11 + @ *2017-06-28T20:18:43Z* + + - **ISSUE** `#34963`_: (*craigafinch*) Incorrect behavior or documentation for comments in salt.states.pkgrepo.managed + | refs: `#41820`_ + - **PR** `#41820`_: (*nhavens*) Fix yum repo file comments to work as documented in pkgrepo.managed + | refs: `#41986`_ + * bd9090c Merge pull request `#41986`_ from rallytime/`bp-41820`_ + * 72320e3 Fix yum repo file comments to work as documented in pkgrepo.managed + +- **PR** `#41973`_: (*vutny*) Fix Master/Minion scheduled jobs based on Cron expressions + | refs: `#42077`_ + @ *2017-06-28T16:39:02Z* + + * a31da52 Merge pull request `#41973`_ from vutny/fix-croniter-scheduled-jobs + * 148788e Fix Master/Minion scheduled jobs based on Cron expressions + +- **PR** `#41980`_: (*Ch3LL*) [2016.11] Bump latest release version to 2016.11.6 + @ *2017-06-28T15:35:11Z* + + * 689ff93 Merge pull request `#41980`_ from Ch3LL/11.6_11 + * fe4f571 [2016.11] Bump latest release version to 2016.11.6 + +- **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + @ *2017-06-27T21:11:54Z* + + * 82b1eb2 Merge pull request `#41961`_ from cachedout/py3_doc + * 7aacddf Allow docs to be built under Python 3 + +- **PR** `#41948`_: (*davidjb*) Fix Composer state's `name` docs; formatting + @ *2017-06-27T17:51:29Z* + + - **PR** `#41933`_: (*davidjb*) Fix Composer state's `name` docs and improve formatting + | refs: `#41948`_ + * f0eb51d Merge pull request `#41948`_ from davidjb/patch-9 + * 0e4b3d9 Fix Composer state's `name` docs; formatting + +- **PR** `#41914`_: (*vutny*) archive.extracted: fix hash sum verification for local archives + @ *2017-06-26T17:59:27Z* + + * e28e10d Merge pull request `#41914`_ from vutny/fix-archive-extracted-local-file-hash + * 54910fe archive.extracted: fix hash sum verification for local archives + +- **PR** `#41912`_: (*Ch3LL*) Allow pacman module to run on Manjaro + @ *2017-06-26T15:35:20Z* + + * 76ad6ff Merge pull request `#41912`_ from Ch3LL/fix_manjaro + * e4dd72a Update os_name_map in core grains for new manjaro systems + + * aa7c839 Allow pacman module to run on Manjaro + +- **PR** `#41516`_: (*kstreee*) Implements MessageClientPool to avoid blocking waiting for zeromq and tcp communications. + @ *2017-06-26T14:41:38Z* + + - **ISSUE** `#38093`_: (*DmitryKuzmenko*) Make threads avoid blocking waiting while communicating using TCP transport. + | refs: `#41516`_ `#41516`_ + - **PR** `#37878`_: (*kstreee*) Makes threads avoid blocking waiting while communicating using Zeromq. + | refs: `#41516`_ `#41516`_ + * ff67d47 Merge pull request `#41516`_ from kstreee/fix-blocking-waiting-tcp-connection + * df96969 Removes redundant closing statements. + + * 94b9ea5 Implements MessageClientPool to avoid blocking waiting for zeromq and tcp communications. + +- **PR** `#41888`_: (*Ch3LL*) Add additional commits to 2016.11.6 release notes + @ *2017-06-22T16:19:00Z* + + * c90cb67 Merge pull request `#41888`_ from Ch3LL/change_release + * 4e1239d Add additional commits to 2016.11.6 release notes + +- **PR** `#41882`_: (*Ch3LL*) Add pycryptodome to crypt_test + @ *2017-06-21T19:51:10Z* + + * 4a32644 Merge pull request `#41882`_ from Ch3LL/fix_crypt_test + * 6f70dbd Add pycryptodome to crypt_test + +- **PR** `#41877`_: (*Ch3LL*) Fix netstat and routes test + @ *2017-06-21T16:16:58Z* + + * 13df29e Merge pull request `#41877`_ from Ch3LL/fix_netstat_test + * d2076a6 Patch salt.utils.which for test_route test + + * 51f7e10 Patch salt.utils.which for test_netstat test + +- **PR** `#41566`_: (*morganwillcock*) win_certutil: workaround for reading serial numbers with non-English languages + @ *2017-06-21T15:40:29Z* + + - **ISSUE** `#41367`_: (*lubyou*) certutil.add_store does not work on non english windows versions or on Windows 10 (localised or English) + | refs: `#41566`_ + * 66f8c83 Merge pull request `#41566`_ from morganwillcock/certutil + * c337d52 Fix test data for test_get_serial, and a typo + + * 7f69613 test and lint fixes + + * 8ee4843 Suppress output of crypt context and be more specifc with whitespace vs. serial + + * 61f817d Match serials based on output position (fix for non-English languages) + +- **PR** `#41679`_: (*terminalmage*) Prevent unnecessary duplicate pillar compilation + @ *2017-06-21T15:32:42Z* + + * 4d0f5c4 Merge pull request `#41679`_ from terminalmage/get-top-file-envs + * a916e8d Improve normalization of saltenv/pillarenv usage for states + + * 02f293a Update state unit tests to reflect recent changes + + * b7e5c11 Don't compile pillar data when getting top file envs + + * 8d6fdb7 Don't compile pillar twice for salt-call + + * d2abfbf Add initial_pillar argument to salt.state + + * 70186de salt.pillar: rename the "pillar" argument to "pillar_override" + +- **PR** `#41853`_: (*vutny*) Fix master side scheduled jobs to return events + @ *2017-06-20T22:06:29Z* + + - **ISSUE** `#39668`_: (*mirceaulinic*) Master scheduled job not recorded on the event bus + | refs: `#41658`_ + - **ISSUE** `#12653`_: (*pengyao*) salt schedule doesn't return jobs result info to master + | refs: `#41853`_ + - **PR** `#41695`_: (*xiaoanyunfei*) fix max RecursionError, Ellipsis + | refs: `#41853`_ + - **PR** `#41658`_: (*garethgreenaway*) Fixes to the salt scheduler + | refs: `#41853`_ + * 29b0acc Merge pull request `#41853`_ from vutny/fix-master-schedule-event + * e206c38 Fix master side scheduled jobs to return events + + +.. _`#1`: https://github.com/saltstack/salt/issues/1 +.. _`#1036125`: https://github.com/saltstack/salt/issues/1036125 +.. _`#12653`: https://github.com/saltstack/salt/issues/12653 +.. _`#15171`: https://github.com/saltstack/salt/issues/15171 +.. _`#18659`: https://github.com/saltstack/salt/issues/18659 +.. _`#23516`: https://github.com/saltstack/salt/issues/23516 +.. _`#25842`: https://github.com/saltstack/salt/issues/25842 +.. _`#32400`: https://github.com/saltstack/salt/issues/32400 +.. _`#33806`: https://github.com/saltstack/salt/pull/33806 +.. _`#34280`: https://github.com/saltstack/salt/pull/34280 +.. _`#34963`: https://github.com/saltstack/salt/issues/34963 +.. _`#37424`: https://github.com/saltstack/salt/pull/37424 +.. _`#37878`: https://github.com/saltstack/salt/pull/37878 +.. _`#38093`: https://github.com/saltstack/salt/issues/38093 +.. _`#38839`: https://github.com/saltstack/salt/issues/38839 +.. _`#38965`: https://github.com/saltstack/salt/pull/38965 +.. _`#39365`: https://github.com/saltstack/salt/issues/39365 +.. _`#39366`: https://github.com/saltstack/salt/pull/39366 +.. _`#39668`: https://github.com/saltstack/salt/issues/39668 +.. _`#40490`: https://github.com/saltstack/salt/issues/40490 +.. _`#41116`: https://github.com/saltstack/salt/issues/41116 +.. _`#41367`: https://github.com/saltstack/salt/issues/41367 +.. _`#41433`: https://github.com/saltstack/salt/issues/41433 +.. _`#41516`: https://github.com/saltstack/salt/pull/41516 +.. _`#41543`: https://github.com/saltstack/salt/pull/41543 +.. _`#41566`: https://github.com/saltstack/salt/pull/41566 +.. _`#41658`: https://github.com/saltstack/salt/pull/41658 +.. _`#41679`: https://github.com/saltstack/salt/pull/41679 +.. _`#41695`: https://github.com/saltstack/salt/pull/41695 +.. _`#41721`: https://github.com/saltstack/salt/issues/41721 +.. _`#41770`: https://github.com/saltstack/salt/issues/41770 +.. _`#41780`: https://github.com/saltstack/salt/pull/41780 +.. _`#41820`: https://github.com/saltstack/salt/pull/41820 +.. _`#41853`: https://github.com/saltstack/salt/pull/41853 +.. _`#41877`: https://github.com/saltstack/salt/pull/41877 +.. _`#41882`: https://github.com/saltstack/salt/pull/41882 +.. _`#41885`: https://github.com/saltstack/salt/issues/41885 +.. _`#41888`: https://github.com/saltstack/salt/pull/41888 +.. _`#41906`: https://github.com/saltstack/salt/pull/41906 +.. _`#41912`: https://github.com/saltstack/salt/pull/41912 +.. _`#41914`: https://github.com/saltstack/salt/pull/41914 +.. _`#41933`: https://github.com/saltstack/salt/pull/41933 +.. _`#41936`: https://github.com/saltstack/salt/issues/41936 +.. _`#41945`: https://github.com/saltstack/salt/pull/41945 +.. _`#41948`: https://github.com/saltstack/salt/pull/41948 +.. _`#41955`: https://github.com/saltstack/salt/issues/41955 +.. _`#41961`: https://github.com/saltstack/salt/pull/41961 +.. _`#41968`: https://github.com/saltstack/salt/pull/41968 +.. _`#41973`: https://github.com/saltstack/salt/pull/41973 +.. _`#41976`: https://github.com/saltstack/salt/issues/41976 +.. _`#41977`: https://github.com/saltstack/salt/pull/41977 +.. _`#41980`: https://github.com/saltstack/salt/pull/41980 +.. _`#41981`: https://github.com/saltstack/salt/pull/41981 +.. _`#41982`: https://github.com/saltstack/salt/issues/41982 +.. _`#41985`: https://github.com/saltstack/salt/pull/41985 +.. _`#41986`: https://github.com/saltstack/salt/pull/41986 +.. _`#41987`: https://github.com/saltstack/salt/pull/41987 +.. _`#41988`: https://github.com/saltstack/salt/pull/41988 +.. _`#41991`: https://github.com/saltstack/salt/pull/41991 +.. _`#41993`: https://github.com/saltstack/salt/pull/41993 +.. _`#41995`: https://github.com/saltstack/salt/pull/41995 +.. _`#41999`: https://github.com/saltstack/salt/pull/41999 +.. _`#42013`: https://github.com/saltstack/salt/issues/42013 +.. _`#42017`: https://github.com/saltstack/salt/pull/42017 +.. _`#42021`: https://github.com/saltstack/salt/pull/42021 +.. _`#42024`: https://github.com/saltstack/salt/pull/42024 +.. _`#42028`: https://github.com/saltstack/salt/pull/42028 +.. _`#42029`: https://github.com/saltstack/salt/pull/42029 +.. _`#42030`: https://github.com/saltstack/salt/pull/42030 +.. _`#42041`: https://github.com/saltstack/salt/issues/42041 +.. _`#42045`: https://github.com/saltstack/salt/pull/42045 +.. _`#42048`: https://github.com/saltstack/salt/pull/42048 +.. _`#42055`: https://github.com/saltstack/salt/pull/42055 +.. _`#42076`: https://github.com/saltstack/salt/issues/42076 +.. _`#42077`: https://github.com/saltstack/salt/pull/42077 +.. _`#42078`: https://github.com/saltstack/salt/pull/42078 +.. _`#42086`: https://github.com/saltstack/salt/pull/42086 +.. _`#42094`: https://github.com/saltstack/salt/pull/42094 +.. _`#42095`: https://github.com/saltstack/salt/pull/42095 +.. _`#42097`: https://github.com/saltstack/salt/pull/42097 +.. _`#42098`: https://github.com/saltstack/salt/pull/42098 +.. _`#42099`: https://github.com/saltstack/salt/pull/42099 +.. _`#42105`: https://github.com/saltstack/salt/pull/42105 +.. _`#42109`: https://github.com/saltstack/salt/pull/42109 +.. _`#42114`: https://github.com/saltstack/salt/issues/42114 +.. _`#42115`: https://github.com/saltstack/salt/issues/42115 +.. _`#42116`: https://github.com/saltstack/salt/issues/42116 +.. _`#42119`: https://github.com/saltstack/salt/pull/42119 +.. _`#42121`: https://github.com/saltstack/salt/pull/42121 +.. _`#42123`: https://github.com/saltstack/salt/pull/42123 +.. _`#42137`: https://github.com/saltstack/salt/issues/42137 +.. _`#42140`: https://github.com/saltstack/salt/pull/42140 +.. _`#42141`: https://github.com/saltstack/salt/pull/42141 +.. _`#42142`: https://github.com/saltstack/salt/pull/42142 +.. _`#42152`: https://github.com/saltstack/salt/issues/42152 +.. _`#42155`: https://github.com/saltstack/salt/pull/42155 +.. _`#42163`: https://github.com/saltstack/salt/pull/42163 +.. _`#42164`: https://github.com/saltstack/salt/pull/42164 +.. _`#42172`: https://github.com/saltstack/salt/pull/42172 +.. _`#42173`: https://github.com/saltstack/salt/pull/42173 +.. _`#42175`: https://github.com/saltstack/salt/pull/42175 +.. _`#42176`: https://github.com/saltstack/salt/pull/42176 +.. _`#42181`: https://github.com/saltstack/salt/pull/42181 +.. _`#42194`: https://github.com/saltstack/salt/issues/42194 +.. _`#42198`: https://github.com/saltstack/salt/issues/42198 +.. _`#42200`: https://github.com/saltstack/salt/pull/42200 +.. _`#42211`: https://github.com/saltstack/salt/pull/42211 +.. _`#42215`: https://github.com/saltstack/salt/pull/42215 +.. _`#42224`: https://github.com/saltstack/salt/pull/42224 +.. _`#42232`: https://github.com/saltstack/salt/issues/42232 +.. _`#42235`: https://github.com/saltstack/salt/pull/42235 +.. _`#42240`: https://github.com/saltstack/salt/issues/42240 +.. _`#42252`: https://github.com/saltstack/salt/pull/42252 +.. _`#42253`: https://github.com/saltstack/salt/pull/42253 +.. _`#42261`: https://github.com/saltstack/salt/pull/42261 +.. _`#42262`: https://github.com/saltstack/salt/pull/42262 +.. _`#42264`: https://github.com/saltstack/salt/pull/42264 +.. _`#42267`: https://github.com/saltstack/salt/issues/42267 +.. _`#42269`: https://github.com/saltstack/salt/pull/42269 +.. _`#42275`: https://github.com/saltstack/salt/pull/42275 +.. _`#42279`: https://github.com/saltstack/salt/issues/42279 +.. _`#42282`: https://github.com/saltstack/salt/pull/42282 +.. _`#42289`: https://github.com/saltstack/salt/pull/42289 +.. _`#42291`: https://github.com/saltstack/salt/pull/42291 +.. _`#42295`: https://github.com/saltstack/salt/issues/42295 +.. _`#42308`: https://github.com/saltstack/salt/pull/42308 +.. _`#42314`: https://github.com/saltstack/salt/pull/42314 +.. _`#42319`: https://github.com/saltstack/salt/pull/42319 +.. _`#42329`: https://github.com/saltstack/salt/issues/42329 +.. _`#42333`: https://github.com/saltstack/salt/issues/42333 +.. _`#42339`: https://github.com/saltstack/salt/pull/42339 +.. _`#42350`: https://github.com/saltstack/salt/pull/42350 +.. _`#42352`: https://github.com/saltstack/salt/pull/42352 +.. _`#42353`: https://github.com/saltstack/salt/pull/42353 +.. _`#42356`: https://github.com/saltstack/salt/pull/42356 +.. _`#42357`: https://github.com/saltstack/salt/issues/42357 +.. _`#42359`: https://github.com/saltstack/salt/pull/42359 +.. _`#42360`: https://github.com/saltstack/salt/pull/42360 +.. _`#42368`: https://github.com/saltstack/salt/pull/42368 +.. _`#42370`: https://github.com/saltstack/salt/pull/42370 +.. _`#42371`: https://github.com/saltstack/salt/issues/42371 +.. _`#42375`: https://github.com/saltstack/salt/issues/42375 +.. _`#42387`: https://github.com/saltstack/salt/pull/42387 +.. _`#42403`: https://github.com/saltstack/salt/issues/42403 +.. _`#42405`: https://github.com/saltstack/salt/issues/42405 +.. _`#42413`: https://github.com/saltstack/salt/issues/42413 +.. _`#42414`: https://github.com/saltstack/salt/pull/42414 +.. _`#42417`: https://github.com/saltstack/salt/issues/42417 +.. _`#42424`: https://github.com/saltstack/salt/pull/42424 +.. _`#42433`: https://github.com/saltstack/salt/pull/42433 +.. _`#42443`: https://github.com/saltstack/salt/pull/42443 +.. _`#42456`: https://github.com/saltstack/salt/issues/42456 +.. _`#42464`: https://github.com/saltstack/salt/pull/42464 +.. _`#42477`: https://github.com/saltstack/salt/issues/42477 +.. _`#42479`: https://github.com/saltstack/salt/pull/42479 +.. _`#42509`: https://github.com/saltstack/salt/pull/42509 +.. _`#42515`: https://github.com/saltstack/salt/pull/42515 +.. _`#42516`: https://github.com/saltstack/salt/pull/42516 +.. _`#42523`: https://github.com/saltstack/salt/pull/42523 +.. _`#42527`: https://github.com/saltstack/salt/pull/42527 +.. _`#42547`: https://github.com/saltstack/salt/pull/42547 +.. _`#42551`: https://github.com/saltstack/salt/pull/42551 +.. _`#42552`: https://github.com/saltstack/salt/pull/42552 +.. _`#42571`: https://github.com/saltstack/salt/pull/42571 +.. _`#42573`: https://github.com/saltstack/salt/pull/42573 +.. _`#42574`: https://github.com/saltstack/salt/pull/42574 +.. _`#42586`: https://github.com/saltstack/salt/pull/42586 +.. _`#42600`: https://github.com/saltstack/salt/issues/42600 +.. _`#42623`: https://github.com/saltstack/salt/pull/42623 +.. _`#42627`: https://github.com/saltstack/salt/issues/42627 +.. _`#42629`: https://github.com/saltstack/salt/pull/42629 +.. _`#42642`: https://github.com/saltstack/salt/issues/42642 +.. _`#42644`: https://github.com/saltstack/salt/issues/42644 +.. _`#42651`: https://github.com/saltstack/salt/pull/42651 +.. _`#42655`: https://github.com/saltstack/salt/pull/42655 +.. _`#42663`: https://github.com/saltstack/salt/pull/42663 +.. _`#42669`: https://github.com/saltstack/salt/pull/42669 +.. _`#42683`: https://github.com/saltstack/salt/issues/42683 +.. _`#42686`: https://github.com/saltstack/salt/issues/42686 +.. _`#42690`: https://github.com/saltstack/salt/issues/42690 +.. _`#42693`: https://github.com/saltstack/salt/pull/42693 +.. _`#42694`: https://github.com/saltstack/salt/pull/42694 +.. _`#42731`: https://github.com/saltstack/salt/issues/42731 +.. _`#42744`: https://github.com/saltstack/salt/pull/42744 +.. _`#42747`: https://github.com/saltstack/salt/issues/42747 +.. _`#42748`: https://github.com/saltstack/salt/pull/42748 +.. _`#42753`: https://github.com/saltstack/salt/issues/42753 +.. _`#42760`: https://github.com/saltstack/salt/pull/42760 +.. _`#42764`: https://github.com/saltstack/salt/pull/42764 +.. _`#42784`: https://github.com/saltstack/salt/pull/42784 +.. _`#42786`: https://github.com/saltstack/salt/pull/42786 +.. _`#42788`: https://github.com/saltstack/salt/pull/42788 +.. _`#42795`: https://github.com/saltstack/salt/pull/42795 +.. _`#42798`: https://github.com/saltstack/salt/pull/42798 +.. _`#42803`: https://github.com/saltstack/salt/issues/42803 +.. _`#42804`: https://github.com/saltstack/salt/pull/42804 +.. _`#42805`: https://github.com/saltstack/salt/pull/42805 +.. _`#42806`: https://github.com/saltstack/salt/pull/42806 +.. _`#42826`: https://github.com/saltstack/salt/pull/42826 +.. _`#42829`: https://github.com/saltstack/salt/pull/42829 +.. _`#42835`: https://github.com/saltstack/salt/pull/42835 +.. _`#42836`: https://github.com/saltstack/salt/pull/42836 +.. _`#42838`: https://github.com/saltstack/salt/pull/42838 +.. _`#42848`: https://github.com/saltstack/salt/pull/42848 +.. _`#42851`: https://github.com/saltstack/salt/pull/42851 +.. _`#42856`: https://github.com/saltstack/salt/pull/42856 +.. _`#42859`: https://github.com/saltstack/salt/pull/42859 +.. _`#42861`: https://github.com/saltstack/salt/pull/42861 +.. _`#42864`: https://github.com/saltstack/salt/pull/42864 +.. _`#42869`: https://github.com/saltstack/salt/issues/42869 +.. _`#42871`: https://github.com/saltstack/salt/pull/42871 +.. _`#42877`: https://github.com/saltstack/salt/pull/42877 +.. _`#42882`: https://github.com/saltstack/salt/pull/42882 +.. _`#42883`: https://github.com/saltstack/salt/pull/42883 +.. _`#42886`: https://github.com/saltstack/salt/pull/42886 +.. _`#42890`: https://github.com/saltstack/salt/pull/42890 +.. _`#42918`: https://github.com/saltstack/salt/pull/42918 +.. _`#42919`: https://github.com/saltstack/salt/pull/42919 +.. _`#42940`: https://github.com/saltstack/salt/pull/42940 +.. _`#42942`: https://github.com/saltstack/salt/pull/42942 +.. _`#42944`: https://github.com/saltstack/salt/pull/42944 +.. _`#42949`: https://github.com/saltstack/salt/pull/42949 +.. _`#42950`: https://github.com/saltstack/salt/pull/42950 +.. _`#42952`: https://github.com/saltstack/salt/pull/42952 +.. _`#42954`: https://github.com/saltstack/salt/pull/42954 +.. _`#42959`: https://github.com/saltstack/salt/pull/42959 +.. _`#42968`: https://github.com/saltstack/salt/pull/42968 +.. _`#42969`: https://github.com/saltstack/salt/pull/42969 +.. _`#42985`: https://github.com/saltstack/salt/pull/42985 +.. _`#42986`: https://github.com/saltstack/salt/pull/42986 +.. _`#42992`: https://github.com/saltstack/salt/issues/42992 +.. _`#43009`: https://github.com/saltstack/salt/pull/43009 +.. _`#43014`: https://github.com/saltstack/salt/pull/43014 +.. _`#43019`: https://github.com/saltstack/salt/pull/43019 +.. _`#43020`: https://github.com/saltstack/salt/pull/43020 +.. _`#43021`: https://github.com/saltstack/salt/pull/43021 +.. _`#43023`: https://github.com/saltstack/salt/pull/43023 +.. _`#43026`: https://github.com/saltstack/salt/pull/43026 +.. _`#43027`: https://github.com/saltstack/salt/pull/43027 +.. _`#43031`: https://github.com/saltstack/salt/pull/43031 +.. _`#43032`: https://github.com/saltstack/salt/pull/43032 +.. _`#43033`: https://github.com/saltstack/salt/pull/43033 +.. _`#43036`: https://github.com/saltstack/salt/issues/43036 +.. _`#43037`: https://github.com/saltstack/salt/pull/43037 +.. _`#43048`: https://github.com/saltstack/salt/pull/43048 +.. _`#43054`: https://github.com/saltstack/salt/pull/43054 +.. _`#43060`: https://github.com/saltstack/salt/pull/43060 +.. _`#43064`: https://github.com/saltstack/salt/pull/43064 +.. _`#43092`: https://github.com/saltstack/salt/pull/43092 +.. _`#43100`: https://github.com/saltstack/salt/pull/43100 +.. _`#43101`: https://github.com/saltstack/salt/issues/43101 +.. _`#43103`: https://github.com/saltstack/salt/pull/43103 +.. _`#43116`: https://github.com/saltstack/salt/pull/43116 +.. _`#43143`: https://github.com/saltstack/salt/issues/43143 +.. _`#43151`: https://github.com/saltstack/salt/pull/43151 +.. _`#43154`: https://github.com/saltstack/salt/pull/43154 +.. _`#43171`: https://github.com/saltstack/salt/pull/43171 +.. _`#43173`: https://github.com/saltstack/salt/pull/43173 +.. _`#43178`: https://github.com/saltstack/salt/pull/43178 +.. _`#43179`: https://github.com/saltstack/salt/pull/43179 +.. _`#43191`: https://github.com/saltstack/salt/pull/43191 +.. _`#43196`: https://github.com/saltstack/salt/pull/43196 +.. _`#43198`: https://github.com/saltstack/salt/issues/43198 +.. _`#43199`: https://github.com/saltstack/salt/pull/43199 +.. _`#43202`: https://github.com/saltstack/salt/pull/43202 +.. _`#43228`: https://github.com/saltstack/salt/pull/43228 +.. _`#43271`: https://github.com/saltstack/salt/pull/43271 +.. _`#475`: https://github.com/saltstack/salt/issues/475 +.. _`#495`: https://github.com/saltstack/salt/issues/495 +.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424 +.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366 +.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543 +.. _`bp-41780`: https://github.com/saltstack/salt/pull/41780 +.. _`bp-41820`: https://github.com/saltstack/salt/pull/41820 +.. _`bp-42097`: https://github.com/saltstack/salt/pull/42097 +.. _`bp-42098`: https://github.com/saltstack/salt/pull/42098 +.. _`bp-42109`: https://github.com/saltstack/salt/pull/42109 +.. _`bp-42224`: https://github.com/saltstack/salt/pull/42224 +.. _`bp-42433`: https://github.com/saltstack/salt/pull/42433 +.. _`bp-42547`: https://github.com/saltstack/salt/pull/42547 +.. _`bp-42552`: https://github.com/saltstack/salt/pull/42552 +.. _`bp-42651`: https://github.com/saltstack/salt/pull/42651 +.. _`bp-42744`: https://github.com/saltstack/salt/pull/42744 +.. _`bp-42760`: https://github.com/saltstack/salt/pull/42760 +.. _`bp-42784`: https://github.com/saltstack/salt/pull/42784 +.. _`bp-42848`: https://github.com/saltstack/salt/pull/42848 +.. _`bp-42871`: https://github.com/saltstack/salt/pull/42871 +.. _`bp-42883`: https://github.com/saltstack/salt/pull/42883 +.. _`bp-43020`: https://github.com/saltstack/salt/pull/43020 +.. _`bp-43031`: https://github.com/saltstack/salt/pull/43031 +.. _`bp-43116`: https://github.com/saltstack/salt/pull/43116 +.. _`fix-38839`: https://github.com/saltstack/salt/issues/38839 +.. _`fix-41116`: https://github.com/saltstack/salt/issues/41116 +.. _`fix-41721`: https://github.com/saltstack/salt/issues/41721 +.. _`fix-41885`: https://github.com/saltstack/salt/issues/41885 +.. _`fix-42115`: https://github.com/saltstack/salt/issues/42115 +.. _`fix-42152`: https://github.com/saltstack/salt/issues/42152 +.. _`fix-42267`: https://github.com/saltstack/salt/issues/42267 +.. _`fix-42375`: https://github.com/saltstack/salt/issues/42375 +.. _`fix-42405`: https://github.com/saltstack/salt/issues/42405 +.. _`fix-42417`: https://github.com/saltstack/salt/issues/42417 +.. _`fix-42683`: https://github.com/saltstack/salt/issues/42683 diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 5bd9ec8a809..97121af0903 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -117,6 +117,194 @@ file. For example: These commands will run in sequence **before** the bootstrap script is executed. +New pillar/master_tops module called saltclass +---------------------------------------------- + +This module clones the behaviour of reclass (http://reclass.pantsfullofunix.net/), without the need of an external app, and add several features to improve flexibility. +Saltclass lets you define your nodes from simple ``yaml`` files (``.yml``) through hierarchical class inheritance with the possibility to override pillars down the tree. + +**Features** + +- Define your nodes through hierarchical class inheritance +- Reuse your reclass datas with minimal modifications + - applications => states + - parameters => pillars +- Use Jinja templating in your yaml definitions +- Access to the following Salt objects in Jinja + - ``__opts__`` + - ``__salt__`` + - ``__grains__`` + - ``__pillars__`` + - ``minion_id`` +- Chose how to merge or override your lists using ^ character (see examples) +- Expand variables ${} with possibility to escape them if needed \${} (see examples) +- Ignores missing node/class and will simply return empty without breaking the pillar module completely - will be logged + +An example subset of datas is available here: http://git.mauras.ch/salt/saltclass/src/master/examples + +========================== =========== +Terms usable in yaml files Description +========================== =========== +classes A list of classes that will be processed in order +states A list of states that will be returned by master_tops function +pillars A yaml dictionnary that will be returned by the ext_pillar function +environment Node saltenv that will be used by master_tops +========================== =========== + +A class consists of: + +- zero or more parent classes +- zero or more states +- any number of pillars + +A child class can override pillars from a parent class. +A node definition is a class in itself with an added ``environment`` parameter for ``saltenv`` definition. + +**class names** + +Class names mimic salt way of defining states and pillar files. +This means that ``default.users`` class name will correspond to one of these: + +- ``/classes/default/users.yml`` +- ``/classes/default/users/init.yml`` + +**Saltclass tree** + +A saltclass tree would look like this: + +.. code-block:: text + + + ├── classes + │ ├── app + │ │ ├── borgbackup.yml + │ │ └── ssh + │ │ └── server.yml + │ ├── default + │ │ ├── init.yml + │ │ ├── motd.yml + │ │ └── users.yml + │ ├── roles + │ │ ├── app.yml + │ │ └── nginx + │ │ ├── init.yml + │ │ └── server.yml + │ └── subsidiaries + │ ├── gnv.yml + │ ├── qls.yml + │ └── zrh.yml + └── nodes + ├── geneva + │ └── gnv.node1.yml + ├── lausanne + │ ├── qls.node1.yml + │ └── qls.node2.yml + ├── node127.yml + └── zurich + ├── zrh.node1.yml + ├── zrh.node2.yml + └── zrh.node3.yml + +**Examples** + +``/nodes/lausanne/qls.node1.yml`` + +.. code-block:: yaml + + environment: base + + classes: + {% for class in ['default'] %} + - {{ class }} + {% endfor %} + - subsidiaries.{{ __grains__['id'].split('.')[0] }} + +``/classes/default/init.yml`` + +.. code-block:: yaml + + classes: + - default.users + - default.motd + + states: + - openssh + + pillars: + default: + network: + dns: + srv1: 192.168.0.1 + srv2: 192.168.0.2 + domain: example.com + ntp: + srv1: 192.168.10.10 + srv2: 192.168.10.20 + +``/classes/subsidiaries/gnv.yml`` + +.. code-block:: yaml + + pillars: + default: + network: + sub: Geneva + dns: + srv1: 10.20.0.1 + srv2: 10.20.0.2 + srv3: 192.168.1.1 + domain: gnv.example.com + users: + adm1: + uid: 1210 + gid: 1210 + gecos: 'Super user admin1' + homedir: /srv/app/adm1 + adm3: + uid: 1203 + gid: 1203 + gecos: 'Super user adm + +Variable expansions: + +Escaped variables are rendered as is - ``${test}`` + +Missing variables are rendered as is - ``${net:dns:srv2}`` + +.. code-block:: yaml + + pillars: + app: + config: + dns: + srv1: ${default:network:dns:srv1} + srv2: ${net:dns:srv2} + uri: https://application.domain/call?\${test} + prod_parameters: + - p1 + - p2 + - p3 + pkg: + - app-core + - app-backend + +List override: + +Not using ``^`` as the first entry will simply merge the lists + +.. code-block:: yaml + + pillars: + app: + pkg: + - ^ + - app-frontend + + +**Known limitation** + +Currently you can't have both a variable and an escaped variable in the same string as the escaped one will not be correctly rendered - '\${xx}' will stay as is instead of being rendered as '${xx}' + Newer PyWinRM Versions ---------------------- diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 38e3c3362ed..2ab9ad9967f 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -369,46 +369,13 @@ class LoadAuth(object): eauth_config = self.opts['external_auth'][eauth] if not groups: groups = [] - group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups - - # First we need to know if the user is allowed to proceed via any of their group memberships. - group_auth_match = False - for group_config in group_perm_keys: - if group_config.rstrip('%') in groups: - group_auth_match = True - break - # If a group_auth_match is set it means only that we have a - # user which matches at least one or more of the groups defined - # in the configuration file. - - external_auth_in_db = False - for entry in eauth_config: - if entry.startswith('^'): - external_auth_in_db = True - break - - # If neither a catchall, a named membership or a group - # membership is found, there is no need to continue. Simply - # deny the user access. - if not ((name in eauth_config) | - ('*' in eauth_config) | - group_auth_match | external_auth_in_db): - # Auth successful, but no matching user found in config - log.warning('Authorization failure occurred.') - return None # We now have an authenticated session and it is time to determine # what the user has access to. - auth_list = [] - if name in eauth_config: - auth_list = eauth_config[name] - elif '*' in eauth_config: - auth_list = eauth_config['*'] - if group_auth_match: - auth_list = self.ckminions.fill_auth_list_from_groups( - eauth_config, - groups, - auth_list) + auth_list = self.ckminions.fill_auth_list( + eauth_config, + name, + groups) auth_list = self.__process_acl(load, auth_list) diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py index 5276a8e43cf..9aca365fa7e 100644 --- a/salt/cache/redis_cache.py +++ b/salt/cache/redis_cache.py @@ -481,18 +481,17 @@ def list_(bank): Lists entries stored in the specified bank. ''' redis_server = _get_redis_server() - bank_keys_redis_key = _get_bank_keys_redis_key(bank) - bank_keys = None + bank_redis_key = _get_bank_redis_key(bank) try: - bank_keys = redis_server.smembers(bank_keys_redis_key) + banks = redis_server.smembers(bank_redis_key) except (RedisConnectionError, RedisResponseError) as rerr: - mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key, + mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key, rerr=rerr) log.error(mesg) raise SaltCacheError(mesg) - if not bank_keys: + if not banks: return [] - return list(bank_keys) + return list(banks) def contains(bank, key): @@ -500,15 +499,11 @@ def contains(bank, key): Checks if the specified bank contains the specified key. ''' redis_server = _get_redis_server() - bank_keys_redis_key = _get_bank_keys_redis_key(bank) - bank_keys = None + bank_redis_key = _get_bank_redis_key(bank) try: - bank_keys = redis_server.smembers(bank_keys_redis_key) + return redis_server.sismember(bank_redis_key, key) except (RedisConnectionError, RedisResponseError) as rerr: - mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key, + mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key, rerr=rerr) log.error(mesg) raise SaltCacheError(mesg) - if not bank_keys: - return False - return key in bank_keys diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 3dc463c4b12..ac45a7a62ba 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -3543,16 +3543,15 @@ def list_nodes_min(location=None, call=None): for instance in instances: if isinstance(instance['instancesSet']['item'], list): - for item in instance['instancesSet']['item']: - state = item['instanceState']['name'] - name = _extract_name_tag(item) - id = item['instanceId'] + items = instance['instancesSet']['item'] else: - item = instance['instancesSet']['item'] + items = [instance['instancesSet']['item']] + + for item in items: state = item['instanceState']['name'] name = _extract_name_tag(item) id = item['instanceId'] - ret[name] = {'state': state, 'id': id} + ret[name] = {'state': state, 'id': id} return ret diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index c3a1b56c0e6..8767c964254 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -101,7 +101,7 @@ __virtualname__ = 'libvirt' log = logging.getLogger(__name__) -def libvirt_error_handler(ctx, error): +def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument ''' Redirect stderr prints from libvirt to salt logging. ''' diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index d1caaab282b..959688ac847 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -7,6 +7,7 @@ XenServer Cloud Driver The XenServer driver is designed to work with a Citrix XenServer. Requires XenServer SDK +(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ ) Place a copy of the XenAPI.py in the Python site-packages folder. @@ -157,13 +158,27 @@ def _get_session(): default=False, search_global=False ) - session = XenAPI.Session(url, ignore_ssl=ignore_ssl) - log.debug('url: {} user: {} password: {}, originator: {}'.format( - url, - user, - 'XXX-pw-redacted-XXX', - originator)) - session.xenapi.login_with_password(user, password, api_version, originator) + try: + session = XenAPI.Session(url, ignore_ssl=ignore_ssl) + log.debug('url: {} user: {} password: {}, originator: {}'.format( + url, + user, + 'XXX-pw-redacted-XXX', + originator)) + session.xenapi.login_with_password( + user, password, api_version, originator) + except XenAPI.Failure as ex: + pool_master_addr = str(ex.__dict__['details'][1]) + slash_parts = url.split('/') + new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr + session = XenAPI.Session(new_url) + log.debug('session is -> url: {} user: {} password: {}, originator:{}'.format( + new_url, + user, + 'XXX-pw-redacted-XXX', + originator)) + session.xenapi.login_with_password( + user, password, api_version, originator) return session @@ -182,14 +197,19 @@ def list_nodes(): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: - ret[record['name_label']] = { - 'id': record['uuid'], - 'image': record['other_config']['base_template_name'], - 'name': record['name_label'], - 'size': record['memory_dynamic_max'], - 'state': record['power_state'], - 'private_ips': get_vm_ip(record['name_label'], session), - 'public_ips': None} + try: + base_template_name = record['other_config']['base_template_name'] + except Exception: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) + ret[record['name_label']] = {'id': record['uuid'], + 'image': base_template_name, + 'name': record['name_label'], + 'size': record['memory_dynamic_max'], + 'state': record['power_state'], + 'private_ips': get_vm_ip(record['name_label'], session), + 'public_ips': None} return ret @@ -296,10 +316,17 @@ def list_nodes_full(session=None): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + # deal with cases where the VM doesn't have 'base_template_name' attribute + try: + base_template_name = record['other_config']['base_template_name'] + except Exception: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) vm_cfg = session.xenapi.VM.get_record(vm) vm_cfg['id'] = record['uuid'] vm_cfg['name'] = record['name_label'] - vm_cfg['image'] = record['other_config']['base_template_name'] + vm_cfg['image'] = base_template_name vm_cfg['size'] = None vm_cfg['state'] = record['power_state'] vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session) @@ -455,8 +482,14 @@ def show_instance(name, session=None, call=None): vm = _get_vm(name, session=session) record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + try: + base_template_name = record['other_config']['base_template_name'] + except Exception: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) ret = {'id': record['uuid'], - 'image': record['other_config']['base_template_name'], + 'image': base_template_name, 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], @@ -716,7 +749,7 @@ def _copy_vm(template=None, name=None, session=None, sr=None): ''' Create VM by copy - This is faster and should be used if source and target are + This is slower and should be used if source and target are NOT in the same storage repository template = object reference diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 6a89e1f4857..f47b0ef373f 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -337,6 +337,9 @@ VALID_OPTS = { # Whether or not processes should be forked when needed. The alternative is to use threading. 'multiprocessing': bool, + # Maximum number of concurrently active processes at any given point in time + 'process_count_max': int, + # Whether or not the salt minion should run scheduled mine updates 'mine_enabled': bool, @@ -746,6 +749,10 @@ VALID_OPTS = { 'fileserver_limit_traversal': bool, 'fileserver_verify_config': bool, + # Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is + # applied only if the user didn't matched by other matchers. + 'permissive_acl': bool, + # Optionally enables keeping the calculated user's auth list in the token file. 'keep_acl_in_token': bool, @@ -1258,6 +1265,7 @@ DEFAULT_MINION_OPTS = { 'auto_accept': True, 'autosign_timeout': 120, 'multiprocessing': True, + 'process_count_max': -1, 'mine_enabled': True, 'mine_return_job': False, 'mine_interval': 60, @@ -1526,6 +1534,7 @@ DEFAULT_MASTER_OPTS = { 'external_auth': {}, 'token_expire': 43200, 'token_expire_user_override': False, + 'permissive_acl': False, 'keep_acl_in_token': False, 'eauth_acl_module': '', 'eauth_tokens': 'localfs', diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py new file mode 100644 index 00000000000..4520321c36a --- /dev/null +++ b/salt/config/schemas/esxi.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + + + salt.config.schemas.esxi + ~~~~~~~~~~~~~~~~~~~~~~~~ + + ESXi host configuration schemas +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +from salt.utils.schema import (DefinitionsSchema, + Schema, + ComplexSchemaItem, + ArrayItem, + IntegerItem, + BooleanItem, + StringItem, + OneOfItem) + + +class VMwareScsiAddressItem(StringItem): + pattern = r'vmhba\d+:C\d+:T\d+:L\d+' + + +class DiskGroupDiskScsiAddressItem(ComplexSchemaItem): + ''' + Schema item of a ESXi host disk group containing disk SCSI addresses + ''' + + title = 'Diskgroup Disk Scsi Address Item' + description = 'ESXi host diskgroup item containing disk SCSI addresses' + + cache_scsi_addr = VMwareScsiAddressItem( + title='Cache Disk Scsi Address', + description='Specifies the SCSI address of the cache disk', + required=True) + + capacity_scsi_addrs = ArrayItem( + title='Capacity Scsi Addresses', + description='Array with the SCSI addresses of the capacity disks', + items=VMwareScsiAddressItem(), + min_items=1) + + +class DiskGroupDiskIdItem(ComplexSchemaItem): + ''' + Schema item of a ESXi host disk group containg disk ids + ''' + + title = 'Diskgroup Disk Id Item' + description = 'ESXi host diskgroup item containing disk ids' + + cache_id = StringItem( + title='Cache Disk Id', + description='Specifies the id of the cache disk', + pattern=r'[^\s]+') + + capacity_ids = ArrayItem( + title='Capacity Disk Ids', + description='Array with the ids of the capacity disks', + items=StringItem(pattern=r'[^\s]+'), + min_items=1) + + +class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema): + ''' + Schema of ESXi host diskgroups containing disk SCSI addresses + ''' + + title = 'Diskgroups Disk Scsi Address Schema' + description = 'ESXi host diskgroup schema containing disk SCSI addresses' + diskgroups = ArrayItem( + title='Diskgroups', + description='List of diskgroups in an ESXi host', + min_items=1, + items=DiskGroupDiskScsiAddressItem(), + required=True) + erase_disks = BooleanItem( + title='Erase Diskgroup Disks', + required=True) + + +class DiskGroupsDiskIdSchema(DefinitionsSchema): + ''' + Schema of ESXi host diskgroups containing disk ids + ''' + + title = 'Diskgroups Disk Id Schema' + description = 'ESXi host diskgroup schema containing disk ids' + diskgroups = ArrayItem( + title='DiskGroups', + description='List of disk groups in an ESXi host', + min_items=1, + items=DiskGroupDiskIdItem(), + required=True) + + +class VmfsDatastoreDiskIdItem(ComplexSchemaItem): + ''' + Schema item of a VMFS datastore referencing a backing disk id + ''' + + title = 'VMFS Datastore Disk Id Item' + description = 'VMFS datastore item referencing a backing disk id' + name = StringItem( + title='Name', + description='Specifies the name of the VMFS datastore', + required=True) + backing_disk_id = StringItem( + title='Backing Disk Id', + description=('Specifies the id of the disk backing the VMFS ' + 'datastore'), + pattern=r'[^\s]+', + required=True) + vmfs_version = IntegerItem( + title='VMFS Version', + description='VMFS version', + enum=[1, 2, 3, 5]) + + +class VmfsDatastoreDiskScsiAddressItem(ComplexSchemaItem): + ''' + Schema item of a VMFS datastore referencing a backing disk SCSI address + ''' + + title = 'VMFS Datastore Disk Scsi Address Item' + description = 'VMFS datastore item referencing a backing disk SCSI address' + name = StringItem( + title='Name', + description='Specifies the name of the VMFS datastore', + required=True) + backing_disk_scsi_addr = VMwareScsiAddressItem( + title='Backing Disk Scsi Address', + description=('Specifies the SCSI address of the disk backing the VMFS ' + 'datastore'), + required=True) + vmfs_version = IntegerItem( + title='VMFS Version', + description='VMFS version', + enum=[1, 2, 3, 5]) + + +class VmfsDatastoreSchema(DefinitionsSchema): + ''' + Schema of a VMFS datastore + ''' + + title = 'VMFS Datastore Schema' + description = 'Schema of a VMFS datastore' + datastore = OneOfItem( + items=[VmfsDatastoreDiskScsiAddressItem(), + VmfsDatastoreDiskIdItem()], + required=True) + + +class HostCacheSchema(DefinitionsSchema): + ''' + Schema of ESXi host cache + ''' + + title = 'Host Cache Schema' + description = 'Schema of the ESXi host cache' + enabled = BooleanItem( + title='Enabled', + required=True) + datastore = VmfsDatastoreDiskScsiAddressItem(required=True) + swap_size = StringItem( + title='Host cache swap size (in GB or %)', + pattern=r'(\d+GiB)|(([0-9]|([1-9][0-9])|100)%)', + required=True) + erase_backing_disk = BooleanItem( + title='Erase Backup Disk', + required=True) + + +class SimpleHostCacheSchema(Schema): + ''' + Simplified Schema of ESXi host cache + ''' + + title = 'Simple Host Cache Schema' + description = 'Simplified schema of the ESXi host cache' + enabled = BooleanItem( + title='Enabled', + required=True) + datastore_name = StringItem(title='Datastore Name', + required=True) + swap_size_MiB = IntegerItem(title='Host cache swap size in MiB', + minimum=1) + + +class EsxiProxySchema(Schema): + ''' + Schema of the esxi proxy input + ''' + + title = 'Esxi Proxy Schema' + description = 'Esxi proxy schema' + additional_properties = False + proxytype = StringItem(required=True, + enum=['esxi']) + host = StringItem(pattern=r'[^\s]+') # Used when connecting directly + vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter + esxi_host = StringItem() + username = StringItem() + passwords = ArrayItem(min_items=1, + items=StringItem(), + unique_items=True) + mechanism = StringItem(enum=['userpass', 'sspi']) + # TODO Should be changed when anyOf is supported for schemas + domain = StringItem() + principal = StringItem() + protocol = StringItem() + port = IntegerItem(minimum=1) diff --git a/salt/config/schemas/vcenter.py b/salt/config/schemas/vcenter.py index 4867923f27a..1d76fb43a51 100644 --- a/salt/config/schemas/vcenter.py +++ b/salt/config/schemas/vcenter.py @@ -14,6 +14,8 @@ from __future__ import absolute_import # Import Salt libs from salt.utils.schema import (Schema, + ArrayItem, + IntegerItem, StringItem) @@ -31,3 +33,25 @@ class VCenterEntitySchema(Schema): vcenter = StringItem(title='vCenter', description='Specifies the vcenter hostname', required=True) + + +class VCenterProxySchema(Schema): + ''' + Schema for the configuration for the proxy to connect to a VCenter. + ''' + title = 'VCenter Proxy Connection Schema' + description = 'Schema that describes the connection to a VCenter' + additional_properties = False + proxytype = StringItem(required=True, + enum=['vcenter']) + vcenter = StringItem(required=True, pattern=r'[^\s]+') + mechanism = StringItem(required=True, enum=['userpass', 'sspi']) + username = StringItem() + passwords = ArrayItem(min_items=1, + items=StringItem(), + unique_items=True) + + domain = StringItem() + principal = StringItem(default='host') + protocol = StringItem(default='https') + port = IntegerItem(minimum=1) diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 9c59194a054..07b2738b799 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -170,6 +170,14 @@ def clean_old_jobs(opts): def mk_key(opts, user): + if HAS_PWD: + uid = None + try: + uid = pwd.getpwnam(user).pw_uid + except KeyError: + # User doesn't exist in the system + if opts['client_acl_verify']: + return None if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. @@ -197,9 +205,9 @@ def mk_key(opts, user): # Write access is necessary since on subsequent runs, if the file # exists, it needs to be written to again. Windows enforces this. os.chmod(keyfile, 0o600) - if HAS_PWD: + if HAS_PWD and uid is not None: try: - os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) + os.chown(keyfile, uid, -1) except OSError: # The master is not being run as root and can therefore not # chown the key file @@ -214,27 +222,26 @@ def access_keys(opts): ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. - users = [] keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) + for user in acl_users: + log.info('Preparing the %s key for local communication', user) + key = mk_key(opts, user) + if key is not None: + keys[user] = key + + # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function') for user in pwd.getpwall(): - users.append(user.pw_name) - log.profile('End pwd.getpwall() call in masterarpi access_keys function') - for user in acl_users: - log.info('Preparing the %s key for local communication', user) - keys[user] = mk_key(opts, user) - - # Check other users matching ACL patterns - if HAS_PWD: - for user in users: + user = user.pw_name if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) + log.profile('End pwd.getpwall() call in masterarpi access_keys function') return keys diff --git a/salt/exceptions.py b/salt/exceptions.py index 1a253dff046..7215112ea33 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -442,6 +442,18 @@ class VMwareObjectRetrievalError(VMwareSaltError): ''' +class VMwareObjectExistsError(VMwareSaltError): + ''' + Used when a VMware object exists + ''' + + +class VMwareObjectNotFoundError(VMwareSaltError): + ''' + Used when a VMware object was not found + ''' + + class VMwareApiError(VMwareSaltError): ''' Used when representing a generic VMware API error diff --git a/salt/grains/core.py b/salt/grains/core.py index 57142ded3fd..c613c27d64e 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -16,6 +16,7 @@ import os import json import socket import sys +import glob import re import platform import logging @@ -65,6 +66,7 @@ __salt__ = { 'cmd.run_all': salt.modules.cmdmod._run_all_quiet, 'smbios.records': salt.modules.smbios.records, 'smbios.get': salt.modules.smbios.get, + 'cmd.run_ps': salt.modules.cmdmod.powershell, } log = logging.getLogger(__name__) @@ -2472,3 +2474,119 @@ def default_gateway(): except Exception as exc: pass return grains + + +def fc_wwn(): + ''' + Return list of fiber channel HBA WWNs + ''' + grains = {} + grains['fc_wwn'] = False + if salt.utils.platform.is_linux(): + grains['fc_wwn'] = _linux_wwns() + elif salt.utils.platform.is_windows(): + grains['fc_wwn'] = _windows_wwns() + return grains + + +def iscsi_iqn(): + ''' + Return iSCSI IQN + ''' + grains = {} + grains['iscsi_iqn'] = False + if salt.utils.platform.is_linux(): + grains['iscsi_iqn'] = _linux_iqn() + elif salt.utils.platform.is_windows(): + grains['iscsi_iqn'] = _windows_iqn() + elif salt.utils.platform.is_aix(): + grains['iscsi_iqn'] = _aix_iqn() + return grains + + +def _linux_iqn(): + ''' + Return iSCSI IQN from a Linux host. + ''' + ret = [] + + initiator = '/etc/iscsi/initiatorname.iscsi' + + if os.path.isfile(initiator): + with salt.utils.files.fopen(initiator, 'r') as _iscsi: + for line in _iscsi: + if line.find('InitiatorName') != -1: + iqn = line.split('=') + ret.extend([iqn[1]]) + return ret + + +def _aix_iqn(): + ''' + Return iSCSI IQN from an AIX host. + ''' + ret = [] + + aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name' + + aixret = __salt__['cmd.run'](aixcmd) + if aixret[0].isalpha(): + iqn = aixret.split() + ret.extend([iqn[1]]) + return ret + + +def _linux_wwns(): + ''' + Return Fibre Channel port WWNs from a Linux host. + ''' + ret = [] + + for fcfile in glob.glob('/sys/class/fc_host/*/port_name'): + with salt.utils.files.fopen(fcfile, 'r') as _wwn: + for line in _wwn: + ret.extend([line[2:]]) + return ret + + +def _windows_iqn(): + ''' + Return iSCSI IQN from a Windows host. + ''' + ret = [] + + wmic = salt.utils.path.which('wmic') + + if not wmic: + return ret + + namespace = r'\\root\WMI' + mspath = 'MSiSCSIInitiator_MethodClass' + get = 'iSCSINodeName' + + cmdret = __salt__['cmd.run_all']( + '{0} /namespace:{1} path {2} get {3} /format:table'.format( + wmic, namespace, mspath, get)) + + for line in cmdret['stdout'].splitlines(): + if line[0].isalpha(): + continue + ret.extend([line]) + + return ret + + +def _windows_wwns(): + ''' + Return Fibre Channel port WWNs from a Windows host. + ''' + ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}' + + ret = [] + + cmdret = __salt__['cmd.run_ps'](ps_cmd) + + for line in cmdret: + ret.append(line) + + return ret diff --git a/salt/minion.py b/salt/minion.py index 6316f76eec4..25e7fe28d20 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1333,6 +1333,7 @@ class Minion(MinionBase): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True + @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data @@ -1365,6 +1366,15 @@ class Minion(MinionBase): self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners + + process_count_max = self.opts.get('process_count_max') + if process_count_max > 0: + process_count = len(salt.utils.minion.running(self.opts)) + while process_count >= process_count_max: + log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid'])) + yield tornado.gen.sleep(10) + process_count = len(salt.utils.minion.running(self.opts)) + # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index a4d1ab9c1eb..5e1ba2d03db 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -42,6 +42,7 @@ from __future__ import absolute_import import logging import json import yaml +import time # Import salt libs from salt.ext import six @@ -2148,6 +2149,7 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, salt myminion boto_iam.list_entities_for_policy mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + retries = 30 params = {} for arg in ('path_prefix', 'entity_filter'): @@ -2155,21 +2157,26 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, params[arg] = locals()[arg] policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) - try: - allret = { - 'policy_groups': [], - 'policy_users': [], - 'policy_roles': [], - } - for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params): - for k, v in six.iteritems(allret): - v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k)) - return allret - except boto.exception.BotoServerError as e: - log.debug(e) - msg = 'Failed to list {0} policy entities.' - log.error(msg.format(policy_name)) - return {} + while retries: + try: + allret = { + 'policy_groups': [], + 'policy_users': [], + 'policy_roles': [], + } + for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params): + for k, v in six.iteritems(allret): + v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k)) + return allret + except boto.exception.BotoServerError as e: + if e.error_code == 'Throttling': + log.debug("Throttled by AWS API, will retry in 5 seconds...") + time.sleep(5) + retries -= 1 + continue + log.error('Failed to list {0} policy entities: {1}'.format(policy_name, e.message)) + return {} + return {} def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None, diff --git a/salt/modules/boto_rds.py b/salt/modules/boto_rds.py index f57b9633deb..cf778bd86e3 100644 --- a/salt/modules/boto_rds.py +++ b/salt/modules/boto_rds.py @@ -505,10 +505,17 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot", param_list = [] for key, value in six.iteritems(parameters): - item = (key, value, apply_method) + item = odict.OrderedDict() + item.update({'ParameterName': key}) + item.update({'ApplyMethod': apply_method}) + if type(value) is bool: + item.update({'ParameterValue': 'on' if value else 'off'}) + else: + item.update({'ParameterValue': str(value)}) param_list.append(item) - if not len(param_list): - return {'results': False} + + if not len(param_list): + return {'results': False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) @@ -843,6 +850,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None, 'message': 'Could not establish a connection to RDS'} kwargs = {} + kwargs.update({'DBParameterGroupName': name}) for key in ('Marker', 'Source'): if locals()[key] is not None: kwargs[key] = str(locals()[key]) @@ -850,26 +858,23 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None, if locals()['MaxRecords'] is not None: kwargs['MaxRecords'] = int(locals()['MaxRecords']) - r = conn.describe_db_parameters(DBParameterGroupName=name, **kwargs) + pag = conn.get_paginator('describe_db_parameters') + pit = pag.paginate(**kwargs) - if not r: - return {'result': False, - 'message': 'Failed to get RDS parameters for group {0}.' - .format(name)} - - results = r['Parameters'] keys = ['ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod'] parameters = odict.OrderedDict() ret = {'result': True} - for result in results: - data = odict.OrderedDict() - for k in keys: - data[k] = result.get(k) - parameters[result.get('ParameterName')] = data + for p in pit: + for result in p['Parameters']: + data = odict.OrderedDict() + for k in keys: + data[k] = result.get(k) + + parameters[result.get('ParameterName')] = data ret['parameters'] = parameters return ret diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index fa9a93c59a6..8c907d9479c 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -599,9 +599,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None, try: vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags, region=region, key=key, keyid=keyid, profile=profile) - return {'exists': bool(vpc_ids)} - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound': + # VPC was not found: handle the error and return False. + return {'exists': False} + return {'error': boto_err} + + return {'exists': bool(vpc_ids)} def create(cidr_block, instance_tenancy=None, vpc_name=None, @@ -723,27 +728,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None, try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile) - if not vpc_id: + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound': + # VPC was not found: handle the error and return None. return {'vpc': None} + return {'error': boto_err} - filter_parameters = {'vpc_ids': vpc_id} + if not vpc_id: + return {'vpc': None} + filter_parameters = {'vpc_ids': vpc_id} + + try: vpcs = conn.get_all_vpcs(**filter_parameters) + except BotoServerError as err: + return {'error': salt.utils.boto.get_error(err)} - if vpcs: - vpc = vpcs[0] # Found! - log.debug('Found VPC: {0}'.format(vpc.id)) + if vpcs: + vpc = vpcs[0] # Found! + log.debug('Found VPC: {0}'.format(vpc.id)) - keys = ('id', 'cidr_block', 'is_default', 'state', 'tags', - 'dhcp_options_id', 'instance_tenancy') - _r = dict([(k, getattr(vpc, k)) for k in keys]) - _r.update({'region': getattr(vpc, 'region').name}) - return {'vpc': _r} - else: - return {'vpc': None} - - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + keys = ('id', 'cidr_block', 'is_default', 'state', 'tags', + 'dhcp_options_id', 'instance_tenancy') + _r = dict([(k, getattr(vpc, k)) for k in keys]) + _r.update({'region': getattr(vpc, 'region').name}) + return {'vpc': _r} + else: + return {'vpc': None} def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None, @@ -809,7 +821,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None Given subnet properties, find and return matching subnet ids ''' - if not any(subnet_name, tags, cidr): + if not any([subnet_name, tags, cidr]): raise SaltInvocationError('At least one of the following must be ' 'specified: subnet_name, cidr or tags.') @@ -927,34 +939,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None, try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - filter_parameters = {'filters': {}} + except BotoServerError as err: + return {'error': salt.utils.boto.get_error(err)} - if subnet_id: - filter_parameters['subnet_ids'] = [subnet_id] - - if subnet_name: - filter_parameters['filters']['tag:Name'] = subnet_name - - if cidr: - filter_parameters['filters']['cidr'] = cidr - - if tags: - for tag_name, tag_value in six.iteritems(tags): - filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value - - if zones: - filter_parameters['filters']['availability_zone'] = zones + filter_parameters = {'filters': {}} + if subnet_id: + filter_parameters['subnet_ids'] = [subnet_id] + if subnet_name: + filter_parameters['filters']['tag:Name'] = subnet_name + if cidr: + filter_parameters['filters']['cidr'] = cidr + if tags: + for tag_name, tag_value in six.iteritems(tags): + filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value + if zones: + filter_parameters['filters']['availability_zone'] = zones + try: subnets = conn.get_all_subnets(**filter_parameters) - log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets)) - if subnets: - log.info('Subnet {0} exists.'.format(subnet_name or subnet_id)) - return {'exists': True} - else: - log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id)) + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound': + # Subnet was not found: handle the error and return False. return {'exists': False} - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + return {'error': boto_err} + + log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets)) + if subnets: + log.info('Subnet {0} exists.'.format(subnet_name or subnet_id)) + return {'exists': True} + else: + log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id)) + return {'exists': False} def get_subnet_association(subnets, region=None, key=None, keyid=None, diff --git a/salt/modules/esxi.py b/salt/modules/esxi.py index a4c1f8ddcc4..ee1f981022e 100644 --- a/salt/modules/esxi.py +++ b/salt/modules/esxi.py @@ -56,3 +56,7 @@ def cmd(command, *args, **kwargs): proxy_cmd = proxy_prefix + '.ch_config' return __proxy__[proxy_cmd](command, *args, **kwargs) + + +def get_details(): + return __proxy__['esxi.get_details']() diff --git a/salt/modules/file.py b/salt/modules/file.py index 7dfd5ced011..45e73b87406 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2318,14 +2318,14 @@ def replace(path, if not_found_content is None: not_found_content = repl if prepend_if_not_found: - new_file.insert(0, not_found_content + b'\n') + new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep)) else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith(b'\n'): - new_file[-1] += b'\n' - new_file.append(not_found_content + b'\n') + if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)): + new_file[-1] += salt.utils.to_bytes(os.linesep) + new_file.append(not_found_content + salt.utils.to_bytes(os.linesep)) has_changes = True if not dry_run: try: @@ -2336,9 +2336,9 @@ def replace(path, raise CommandExecutionError("Exception: {0}".format(exc)) # write new content in the file while avoiding partial reads try: - fh_ = salt.utils.atomicfile.atomic_open(path, 'w') + fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: - fh_.write(salt.utils.stringutils.to_str(line)) + fh_.write(salt.utils.stringutils.to_bytes(line)) finally: fh_.close() @@ -2508,9 +2508,10 @@ def blockreplace(path, try: fi_file = fileinput.input(path, inplace=False, backup=False, - bufsize=1, mode='r') + bufsize=1, mode='rb') for line in fi_file: + line = salt.utils.to_str(line) result = line if marker_start in line: @@ -2523,14 +2524,24 @@ def blockreplace(path, # end of block detected in_block = False - # Check for multi-line '\n' terminated content as split will - # introduce an unwanted additional new line. - if content and content[-1] == '\n': - content = content[:-1] + # Handle situations where there may be multiple types + # of line endings in the same file. Separate the content + # into lines. Account for Windows-style line endings + # using os.linesep, then by linux-style line endings + # using '\n' + split_content = [] + for linesep_line in content.split(os.linesep): + for content_line in linesep_line.split('\n'): + split_content.append(content_line) + + # Trim any trailing new lines to avoid unwanted + # additional new lines + while not split_content[-1]: + split_content.pop() # push new block content in file - for cline in content.split('\n'): - new_file.append(cline + '\n') + for content_line in split_content: + new_file.append(content_line + os.linesep) done = True @@ -2558,25 +2569,25 @@ def blockreplace(path, if not done: if prepend_if_not_found: # add the markers and content at the beginning of file - new_file.insert(0, marker_end + '\n') + new_file.insert(0, marker_end + os.linesep) if append_newline is True: - new_file.insert(0, content + '\n') + new_file.insert(0, content + os.linesep) else: new_file.insert(0, content) - new_file.insert(0, marker_start + '\n') + new_file.insert(0, marker_start + os.linesep) done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith('\n'): - new_file[-1] += '\n' + if not new_file[-1].endswith(os.linesep): + new_file[-1] += os.linesep # add the markers and content at the end of file - new_file.append(marker_start + '\n') + new_file.append(marker_start + os.linesep) if append_newline is True: - new_file.append(content + '\n') + new_file.append(content + os.linesep) else: new_file.append(content) - new_file.append(marker_end + '\n') + new_file.append(marker_end + os.linesep) done = True else: raise CommandExecutionError( @@ -2607,9 +2618,9 @@ def blockreplace(path, # write new content in the file while avoiding partial reads try: - fh_ = salt.utils.atomicfile.atomic_open(path, 'w') + fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: - fh_.write(line) + fh_.write(salt.utils.to_bytes(line)) finally: fh_.close() @@ -3749,6 +3760,14 @@ def source_list(source, source_hash, saltenv): single_src = next(iter(single)) single_hash = single[single_src] if single[single_src] else source_hash urlparsed_single_src = _urlparse(single_src) + # Fix this for Windows + if salt.utils.is_windows(): + # urlparse doesn't handle a local Windows path without the + # protocol indicator (file://). The scheme will be the + # drive letter instead of the protocol. So, we'll add the + # protocol and re-parse + if urlparsed_single_src.scheme.lower() in string.ascii_lowercase: + urlparsed_single_src = _urlparse('file://' + single_src) proto = urlparsed_single_src.scheme if proto == 'salt': path, senv = salt.utils.url.parse(single_src) @@ -3760,10 +3779,15 @@ def source_list(source, source_hash, saltenv): elif proto.startswith('http') or proto == 'ftp': ret = (single_src, single_hash) break - elif proto == 'file' and os.path.exists(urlparsed_single_src.path): + elif proto == 'file' and ( + os.path.exists(urlparsed_single_src.netloc) or + os.path.exists(urlparsed_single_src.path) or + os.path.exists(os.path.join( + urlparsed_single_src.netloc, + urlparsed_single_src.path))): ret = (single_src, single_hash) break - elif single_src.startswith('/') and os.path.exists(single_src): + elif single_src.startswith(os.sep) and os.path.exists(single_src): ret = (single_src, single_hash) break elif isinstance(single, six.string_types): @@ -3774,14 +3798,26 @@ def source_list(source, source_hash, saltenv): ret = (single, source_hash) break urlparsed_src = _urlparse(single) + if salt.utils.is_windows(): + # urlparse doesn't handle a local Windows path without the + # protocol indicator (file://). The scheme will be the + # drive letter instead of the protocol. So, we'll add the + # protocol and re-parse + if urlparsed_src.scheme.lower() in string.ascii_lowercase: + urlparsed_src = _urlparse('file://' + single) proto = urlparsed_src.scheme - if proto == 'file' and os.path.exists(urlparsed_src.path): + if proto == 'file' and ( + os.path.exists(urlparsed_src.netloc) or + os.path.exists(urlparsed_src.path) or + os.path.exists(os.path.join( + urlparsed_src.netloc, + urlparsed_src.path))): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': ret = (single, source_hash) break - elif single.startswith('/') and os.path.exists(single): + elif single.startswith(os.sep) and os.path.exists(single): ret = (single, source_hash) break if ret is None: @@ -4281,7 +4317,8 @@ def extract_hash(hash_fn, def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False): ''' - Check the permissions on files, modify attributes and chown if needed + Check the permissions on files, modify attributes and chown if needed. File + attributes are only verified if lsattr(1) is installed. CLI Example: @@ -4293,6 +4330,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) ``follow_symlinks`` option added ''' name = os.path.expanduser(name) + lsattr_cmd = salt.utils.path.which('lsattr') if not ret: ret = {'name': name, @@ -4318,7 +4356,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) perms['lmode'] = salt.utils.normalize_mode(cur['mode']) is_dir = os.path.isdir(name) - if not salt.utils.platform.is_windows() and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd: # List attributes on file perms['lattrs'] = ''.join(lsattr(name)[name]) # Remove attributes on file so changes can be enforced. @@ -4429,7 +4467,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if __opts__['test'] is True and ret['changes']: ret['result'] = None - if not salt.utils.platform.is_windows() and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd: # Replace attributes on file if it had been removed if perms['lattrs']: chattr(name, operator='add', attributes=perms['lattrs']) diff --git a/salt/modules/heat.py b/salt/modules/heat.py index d3e01f8d9ae..255d5643c42 100644 --- a/salt/modules/heat.py +++ b/salt/modules/heat.py @@ -101,8 +101,6 @@ def _construct_yaml_str(self, node): Construct for yaml ''' return self.construct_scalar(node) -YamlLoader.add_constructor(u'tag:yaml.org,2002:str', - _construct_yaml_str) YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp', _construct_yaml_str) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 22575802703..36d7cc4df14 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -83,7 +83,7 @@ def __virtual__(): return False, 'python kubernetes library not found' -if not salt.utils.is_windows(): +if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): @@ -713,7 +713,7 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: diff --git a/salt/modules/puppet.py b/salt/modules/puppet.py index 58b3963c8cd..0462152e037 100644 --- a/salt/modules/puppet.py +++ b/salt/modules/puppet.py @@ -68,9 +68,7 @@ class _Puppet(object): self.vardir = 'C:\\ProgramData\\PuppetLabs\\puppet\\var' self.rundir = 'C:\\ProgramData\\PuppetLabs\\puppet\\run' self.confdir = 'C:\\ProgramData\\PuppetLabs\\puppet\\etc' - self.useshell = True else: - self.useshell = False self.puppet_version = __salt__['cmd.run']('puppet --version') if 'Enterprise' in self.puppet_version: self.vardir = '/var/opt/lib/pe-puppet' @@ -106,7 +104,10 @@ class _Puppet(object): ' --{0} {1}'.format(k, v) for k, v in six.iteritems(self.kwargs)] ) - return '{0} {1}'.format(cmd, args) + # Ensure that the puppet call will return 0 in case of exit code 2 + if salt.utils.platform.is_windows(): + return 'cmd /V:ON /c {0} {1} ^& if !ERRORLEVEL! EQU 2 (EXIT 0) ELSE (EXIT /B)'.format(cmd, args) + return '({0} {1}) || test $? -eq 2'.format(cmd, args) def arguments(self, args=None): ''' @@ -169,12 +170,7 @@ def run(*args, **kwargs): puppet.kwargs.update(salt.utils.args.clean_kwargs(**kwargs)) - ret = __salt__['cmd.run_all'](repr(puppet), python_shell=puppet.useshell) - if ret['retcode'] in [0, 2]: - ret['retcode'] = 0 - else: - ret['retcode'] = 1 - + ret = __salt__['cmd.run_all'](repr(puppet), python_shell=True) return ret diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index 14beb37bef2..8bcf06fbe81 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -27,6 +27,20 @@ Installation Prerequisites pip install purestorage +- Configure Pure Storage FlashArray authentication. Use one of the following + three methods. + + 1) From the minion config + .. code-block:: yaml + + pure_tags: + fa: + san_ip: management vip or hostname for the FlashArray + api_token: A valid api token for the FlashArray being managed + + 2) From environment (PUREFA_IP and PUREFA_API) + 3) From the pillar (PUREFA_IP and PUREFA_API) + :maintainer: Simon Dodsley (simon@purestorage.com) :maturity: new :requires: purestorage @@ -195,7 +209,7 @@ def snap_create(name, suffix=None): Will return False is volume selected to snap does not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume to snapshot @@ -231,7 +245,7 @@ def snap_delete(name, suffix=None, eradicate=False): Will return False if selected snapshot does not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -273,7 +287,7 @@ def snap_eradicate(name, suffix=None): Will retunr False is snapshot is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -306,7 +320,7 @@ def volume_create(name, size=None): Will return False if volume already exists. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume (truncated to 63 characters) @@ -344,7 +358,7 @@ def volume_delete(name, eradicate=False): Will return False if volume doesn't exist is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -383,7 +397,7 @@ def volume_eradicate(name): Will return False is volume is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -413,7 +427,7 @@ def volume_extend(name, size): Will return False if new size is less than or equal to existing size. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -451,7 +465,7 @@ def snap_volume_create(name, target, overwrite=False): Will return False if target volume already exists and overwrite is not specified, or selected snapshot doesn't exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume snapshot @@ -497,7 +511,7 @@ def volume_clone(name, target, overwrite=False): Will return False if source volume doesn't exist, or target volume already exists and overwrite not specified. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -541,7 +555,7 @@ def volume_attach(name, host): Host and volume must exist or else will return False. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -574,7 +588,7 @@ def volume_detach(name, host): Will return False if either host or volume do not exist, or if selected volume isn't already connected to the host. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -608,7 +622,7 @@ def host_create(name, iqn=None, wwn=None): Fibre Channel parameters are not in a valid format. See Pure Storage FlashArray documentation. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host (truncated to 63 characters) @@ -659,7 +673,7 @@ def host_update(name, iqn=None, wwn=None): by another host, or are not in a valid format. See Pure Storage FlashArray documentation. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host @@ -699,7 +713,7 @@ def host_delete(name): Will return False if the host doesn't exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host @@ -735,7 +749,7 @@ def hg_create(name, host=None, volume=None): Will return False if hostgroup already exists, or if named host or volume do not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup (truncated to 63 characters) @@ -791,7 +805,7 @@ def hg_update(name, host=None, volume=None): Will return False is hostgroup doesn't exist, or host or volume do not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -837,7 +851,7 @@ def hg_delete(name): Will return False is hostgroup is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -875,7 +889,7 @@ def hg_remove(name, volume=None, host=None): Will return False is hostgroup does not exist, or named host or volume are not in the hostgroup. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -936,7 +950,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True): hostgroups, hosts or volumes * Named type for protection group does not exist - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1029,7 +1043,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None): * Incorrect type selected for current protection group type * Specified type does not exist - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1119,7 +1133,7 @@ def pg_delete(name, eradicate=False): Will return False if protection group is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1156,7 +1170,7 @@ def pg_eradicate(name): Will return False if protection group is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1188,7 +1202,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None): * Protection group does not exist * Specified type is not currently associated with the protection group - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index c0f91c4ee7c..4b53c78f0bc 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -464,7 +464,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype) cmd = 'semanage fcontext -l | egrep ' + \ "'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs) - current_entry_text = __salt__['cmd.shell'](cmd) + current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True) if current_entry_text == '': return None ret = {} diff --git a/salt/modules/status.py b/salt/modules/status.py index edb268267ff..24d593d25f8 100644 --- a/salt/modules/status.py +++ b/salt/modules/status.py @@ -132,7 +132,7 @@ def procs(): uind = 0 pind = 0 cind = 0 - plines = __salt__['cmd.run'](__grains__['ps']).splitlines() + plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines() guide = plines.pop(0).split() if 'USER' in guide: uind = guide.index('USER') @@ -1417,7 +1417,7 @@ def pid(sig): ''' cmd = __grains__['ps'] - output = __salt__['cmd.run_stdout'](cmd) + output = __salt__['cmd.run_stdout'](cmd, python_shell=True) pids = '' for line in output.splitlines(): diff --git a/salt/modules/vcenter.py b/salt/modules/vcenter.py new file mode 100644 index 00000000000..bac3c674b49 --- /dev/null +++ b/salt/modules/vcenter.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +''' +Module used to access the vcenter proxy connection methods +''' +from __future__ import absolute_import + +# Import python libs +import logging +import salt.utils + + +log = logging.getLogger(__name__) + +__proxyenabled__ = ['vcenter'] +# Define the module's virtual name +__virtualname__ = 'vcenter' + + +def __virtual__(): + ''' + Only work on proxy + ''' + if salt.utils.is_proxy(): + return __virtualname__ + return False + + +def get_details(): + return __proxy__['vcenter.get_details']() diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index d6aabb74e4c..c88485d5551 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -177,13 +177,18 @@ import salt.utils.http import salt.utils.path import salt.utils.vmware import salt.utils.vsan +import salt.utils.pbm from salt.exceptions import CommandExecutionError, VMwareSaltError, \ ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \ - VMwareApiError, InvalidEntityError + VMwareApiError, InvalidEntityError, VMwareObjectExistsError from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ ESXClusterEntitySchema from salt.config.schemas.vcenter import VCenterEntitySchema +from salt.config.schemas.esxi import DiskGroupsDiskIdSchema, \ + VmfsDatastoreSchema, SimpleHostCacheSchema + +log = logging.getLogger(__name__) # Import Third Party Libs try: @@ -193,7 +198,15 @@ except ImportError: HAS_JSONSCHEMA = False try: - from pyVmomi import vim, vmodl, VmomiSupport + from pyVmomi import vim, vmodl, pbm, VmomiSupport + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + log.error('pyVmomi not loaded: Incompatible versions ' + 'of Python. See Issue #29537.') + raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -204,24 +217,11 @@ if esx_cli: else: HAS_ESX_CLI = False -log = logging.getLogger(__name__) - __virtualname__ = 'vsphere' -__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter'] +__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] def __virtual__(): - if not HAS_JSONSCHEMA: - return False, 'Execution module did not load: jsonschema not found' - if not HAS_PYVMOMI: - return False, 'Execution module did not load: pyVmomi not found' - - # We check the supported vim versions to infer the pyVmomi version - if 'vim25/6.0' in VmomiSupport.versionMap and \ - sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): - - return False, ('Execution module did not load: Incompatible versions ' - 'of Python and pyVmomi present. See Issue #29537.') return __virtualname__ @@ -254,6 +254,8 @@ def _get_proxy_connection_details(): details = __salt__['esxcluster.get_details']() elif proxytype == 'esxdatacenter': details = __salt__['esxdatacenter.get_details']() + elif proxytype == 'vcenter': + details = __salt__['vcenter.get_details']() else: raise CommandExecutionError('\'{0}\' proxy is not supported' ''.format(proxytype)) @@ -379,7 +381,7 @@ def gets_service_instance_via_proxy(fn): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def get_service_instance_via_proxy(service_instance=None): ''' Returns a service instance to the proxied endpoint (vCenter/ESXi host). @@ -399,7 +401,7 @@ def get_service_instance_via_proxy(service_instance=None): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def disconnect(service_instance): ''' Disconnects from a vCenter or ESXi host @@ -1934,7 +1936,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') @gets_service_instance_via_proxy def test_vcenter_connection(service_instance=None): ''' @@ -3622,9 +3624,1331 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N return ret +def _get_dvs_config_dict(dvs_name, dvs_config): + ''' + Returns the dict representation of the DVS config + + dvs_name + The name of the DVS + + dvs_config + The DVS config + ''' + log.trace('Building the dict of the DVS \'{0}\' config'.format(dvs_name)) + conf_dict = {'name': dvs_name, + 'contact_email': dvs_config.contact.contact, + 'contact_name': dvs_config.contact.name, + 'description': dvs_config.description, + 'lacp_api_version': dvs_config.lacpApiVersion, + 'network_resource_control_version': + dvs_config.networkResourceControlVersion, + 'network_resource_management_enabled': + dvs_config.networkResourceManagementEnabled, + 'max_mtu': dvs_config.maxMtu} + if isinstance(dvs_config.uplinkPortPolicy, + vim.DVSNameArrayUplinkPortPolicy): + conf_dict.update( + {'uplink_names': dvs_config.uplinkPortPolicy.uplinkPortName}) + return conf_dict + + +def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol): + ''' + Returns the dict representation of the DVS link discovery protocol + + dvs_name + The name of the DVS + + dvs_link_disc_protocl + The DVS link discovery protocol + ''' + log.trace('Building the dict of the DVS \'{0}\' link discovery ' + 'protocol'.format(dvs_name)) + return {'operation': dvs_link_disc_protocol.operation, + 'protocol': dvs_link_disc_protocol.protocol} + + +def _get_dvs_product_info(dvs_name, dvs_product_info): + ''' + Returns the dict representation of the DVS product_info + + dvs_name + The name of the DVS + + dvs_product_info + The DVS product info + ''' + log.trace('Building the dict of the DVS \'{0}\' product ' + 'info'.format(dvs_name)) + return {'name': dvs_product_info.name, + 'vendor': dvs_product_info.vendor, + 'version': dvs_product_info.version} + + +def _get_dvs_capability(dvs_name, dvs_capability): + ''' + Returns the dict representation of the DVS product_info + + dvs_name + The name of the DVS + + dvs_capability + The DVS capability + ''' + log.trace('Building the dict of the DVS \'{0}\' capability' + ''.format(dvs_name)) + return {'operation_supported': dvs_capability.dvsOperationSupported, + 'portgroup_operation_supported': + dvs_capability.dvPortGroupOperationSupported, + 'port_operation_supported': dvs_capability.dvPortOperationSupported} + + +def _get_dvs_infrastructure_traffic_resources(dvs_name, + dvs_infra_traffic_ress): + ''' + Returns a list of dict representations of the DVS infrastructure traffic + resource + + dvs_name + The name of the DVS + + dvs_infra_traffic_ress + The DVS infrastructure traffic resources + ''' + log.trace('Building the dicts of the DVS \'{0}\' infrastructure traffic ' + 'resources'.format(dvs_name)) + res_dicts = [] + for res in dvs_infra_traffic_ress: + res_dict = {'key': res.key, + 'limit': res.allocationInfo.limit, + 'reservation': res.allocationInfo.reservation} + if res.allocationInfo.shares: + res_dict.update({'num_shares': res.allocationInfo.shares.shares, + 'share_level': res.allocationInfo.shares.level}) + res_dicts.append(res_dict) + return res_dicts + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy +def list_dvss(datacenter=None, dvs_names=None, service_instance=None): + ''' + Returns a list of distributed virtual switches (DVSs). + The list can be filtered by the datacenter or DVS names. + + datacenter + The datacenter to look for DVSs in. + Default value is None. + + dvs_names + List of DVS names to look for. If None, all DVSs are returned. + Default value is None. + + .. code-block:: bash + + salt '*' vsphere.list_dvss + + salt '*' vsphere.list_dvss dvs_names=[dvs1,dvs2] + ''' + ret_list = [] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + + for dvs in salt.utils.vmware.get_dvss(dc_ref, dvs_names, (not dvs_names)): + dvs_dict = {} + # XXX: Because of how VMware did DVS object inheritance we can\'t + # be more restrictive when retrieving the dvs config, we have to + # retrieve the entire object + props = salt.utils.vmware.get_properties_of_managed_object( + dvs, ['name', 'config', 'capability', 'networkResourcePool']) + dvs_dict = _get_dvs_config_dict(props['name'], props['config']) + # Product info + dvs_dict.update( + {'product_info': + _get_dvs_product_info(props['name'], + props['config'].productInfo)}) + # Link Discovery Protocol + if props['config'].linkDiscoveryProtocolConfig: + dvs_dict.update( + {'link_discovery_protocol': + _get_dvs_link_discovery_protocol( + props['name'], + props['config'].linkDiscoveryProtocolConfig)}) + # Capability + dvs_dict.update({'capability': + _get_dvs_capability(props['name'], + props['capability'])}) + # InfrastructureTrafficResourceConfig - available with vSphere 6.0 + if hasattr(props['config'], 'infrastructureTrafficResourceConfig'): + dvs_dict.update({ + 'infrastructure_traffic_resource_pools': + _get_dvs_infrastructure_traffic_resources( + props['name'], + props['config'].infrastructureTrafficResourceConfig)}) + ret_list.append(dvs_dict) + return ret_list + + +def _apply_dvs_config(config_spec, config_dict): + ''' + Applies the values of the config dict dictionary to a config spec + (vim.VMwareDVSConfigSpec) + ''' + if config_dict.get('name'): + config_spec.name = config_dict['name'] + if config_dict.get('contact_email') or config_dict.get('contact_name'): + if not config_spec.contact: + config_spec.contact = vim.DVSContactInfo() + config_spec.contact.contact = config_dict.get('contact_email') + config_spec.contact.name = config_dict.get('contact_name') + if config_dict.get('description'): + config_spec.description = config_dict.get('description') + if config_dict.get('max_mtu'): + config_spec.maxMtu = config_dict.get('max_mtu') + if config_dict.get('lacp_api_version'): + config_spec.lacpApiVersion = config_dict.get('lacp_api_version') + if config_dict.get('network_resource_control_version'): + config_spec.networkResourceControlVersion = \ + config_dict.get('network_resource_control_version') + if config_dict.get('uplink_names'): + if not config_spec.uplinkPortPolicy or \ + not isinstance(config_spec.uplinkPortPolicy, + vim.DVSNameArrayUplinkPortPolicy): + + config_spec.uplinkPortPolicy = \ + vim.DVSNameArrayUplinkPortPolicy() + config_spec.uplinkPortPolicy.uplinkPortName = \ + config_dict['uplink_names'] + + +def _apply_dvs_link_discovery_protocol(disc_prot_config, disc_prot_dict): + ''' + Applies the values of the disc_prot_dict dictionary to a link discovery + protocol config object (vim.LinkDiscoveryProtocolConfig) + ''' + disc_prot_config.operation = disc_prot_dict['operation'] + disc_prot_config.protocol = disc_prot_dict['protocol'] + + +def _apply_dvs_product_info(product_info_spec, product_info_dict): + ''' + Applies the values of the product_info_dict dictionary to a product info + spec (vim.DistributedVirtualSwitchProductSpec) + ''' + if product_info_dict.get('name'): + product_info_spec.name = product_info_dict['name'] + if product_info_dict.get('vendor'): + product_info_spec.vendor = product_info_dict['vendor'] + if product_info_dict.get('version'): + product_info_spec.version = product_info_dict['version'] + + +def _apply_dvs_capability(capability_spec, capability_dict): + ''' + Applies the values of the capability_dict dictionary to a DVS capability + object (vim.vim.DVSCapability) + ''' + if 'operation_supported' in capability_dict: + capability_spec.dvsOperationSupported = \ + capability_dict['operation_supported'] + if 'port_operation_supported' in capability_dict: + capability_spec.dvPortOperationSupported = \ + capability_dict['port_operation_supported'] + if 'portgroup_operation_supported' in capability_dict: + capability_spec.dvPortGroupOperationSupported = \ + capability_dict['portgroup_operation_supported'] + + +def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources, + resource_dicts): + ''' + Applies the values of the resource dictionaries to infra traffic resources, + creating the infra traffic resource if required + (vim.DistributedVirtualSwitchProductSpec) + ''' + for res_dict in resource_dicts: + filtered_traffic_resources = \ + [r for r in infra_traffic_resources if r.key == res_dict['key']] + if filtered_traffic_resources: + traffic_res = filtered_traffic_resources[0] + else: + traffic_res = vim.DvsHostInfrastructureTrafficResource() + traffic_res.key = res_dict['key'] + traffic_res.allocationInfo = \ + vim.DvsHostInfrastructureTrafficResourceAllocation() + infra_traffic_resources.append(traffic_res) + if res_dict.get('limit'): + traffic_res.allocationInfo.limit = res_dict['limit'] + if res_dict.get('reservation'): + traffic_res.allocationInfo.reservation = res_dict['reservation'] + if res_dict.get('num_shares') or res_dict.get('share_level'): + if not traffic_res.allocationInfo.shares: + traffic_res.allocationInfo.shares = vim.SharesInfo() + if res_dict.get('share_level'): + traffic_res.allocationInfo.shares.level = \ + vim.SharesLevel(res_dict['share_level']) + if res_dict.get('num_shares'): + #XXX Even though we always set the number of shares if provided, + #the vCenter will ignore it unless the share level is 'custom'. + traffic_res.allocationInfo.shares.shares = res_dict['num_shares'] + + +def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): + ''' + Applies the values of the resource dictionaries to network resource pools, + creating the resource pools if required + (vim.DVSNetworkResourcePoolConfigSpec) + ''' + for res_dict in resource_dicts: + ress = [r for r in network_resource_pools if r.key == res_dict['key']] + if ress: + res = ress[0] + else: + res = vim.DVSNetworkResourcePoolConfigSpec() + res.key = res_dict['key'] + res.allocationInfo = \ + vim.DVSNetworkResourcePoolAllocationInfo() + network_resource_pools.append(res) + if res_dict.get('limit'): + res.allocationInfo.limit = res_dict['limit'] + if res_dict.get('num_shares') and res_dict.get('share_level'): + if not res.allocationInfo.shares: + res.allocationInfo.shares = vim.SharesInfo() + res.allocationInfo.shares.shares = res_dict['num_shares'] + res.allocationInfo.shares.level = \ + vim.SharesLevel(res_dict['share_level']) + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def create_dvs(dvs_dict, dvs_name, service_instance=None): + ''' + Creates a distributed virtual switch (DVS). + + Note: The ``dvs_name`` param will override any name set in ``dvs_dict``. + + dvs_dict + Dict representation of the new DVS (exmaple in salt.states.dvs) + + dvs_name + Name of the DVS to be created. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name + ''' + log.trace('Creating dvs \'{0}\' with dict = {1}'.format(dvs_name, + dvs_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + # Make the name of the DVS consistent with the call + dvs_dict['name'] = dvs_name + # Build the config spec from the input + dvs_create_spec = vim.DVSCreateSpec() + dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() + _apply_dvs_config(dvs_create_spec.configSpec, dvs_dict) + if dvs_dict.get('product_info'): + dvs_create_spec.productInfo = vim.DistributedVirtualSwitchProductSpec() + _apply_dvs_product_info(dvs_create_spec.productInfo, + dvs_dict['product_info']) + if dvs_dict.get('capability'): + dvs_create_spec.capability = vim.DVSCapability() + _apply_dvs_capability(dvs_create_spec.capability, + dvs_dict['capability']) + if dvs_dict.get('link_discovery_protocol'): + dvs_create_spec.configSpec.linkDiscoveryProtocolConfig = \ + vim.LinkDiscoveryProtocolConfig() + _apply_dvs_link_discovery_protocol( + dvs_create_spec.configSpec.linkDiscoveryProtocolConfig, + dvs_dict['link_discovery_protocol']) + if dvs_dict.get('infrastructure_traffic_resource_pools'): + dvs_create_spec.configSpec.infrastructureTrafficResourceConfig = [] + _apply_dvs_infrastructure_traffic_resources( + dvs_create_spec.configSpec.infrastructureTrafficResourceConfig, + dvs_dict['infrastructure_traffic_resource_pools']) + log.trace('dvs_create_spec = {}'.format(dvs_create_spec)) + salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec) + if 'network_resource_management_enabled' in dvs_dict: + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, + dvs_names=[dvs_name]) + if not dvs_refs: + raise VMwareObjectRetrievalError( + 'DVS \'{0}\' wasn\'t found in datacenter \'{1}\'' + ''.format(dvs_name, datacenter)) + dvs_ref = dvs_refs[0] + salt.utils.vmware.set_dvs_network_resource_management_enabled( + dvs_ref, dvs_dict['network_resource_management_enabled']) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def update_dvs(dvs_dict, dvs, service_instance=None): + ''' + Updates a distributed virtual switch (DVS). + + Note: Updating the product info, capability, uplinks of a DVS is not + supported so the corresponding entries in ``dvs_dict`` will be + ignored. + + dvs_dict + Dictionary with the values the DVS should be update with + (exmaple in salt.states.dvs) + + dvs + Name of the DVS to be updated. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1 + ''' + # Remove ignored properties + log.trace('Updating dvs \'{0}\' with dict = {1}'.format(dvs, dvs_dict)) + for prop in ['product_info', 'capability', 'uplink_names', 'name']: + if prop in dvs_dict: + del dvs_dict[prop] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' wasn\'t found in ' + 'datacenter \'{1}\'' + ''.format(dvs, datacenter)) + dvs_ref = dvs_refs[0] + # Build the config spec from the input + dvs_props = salt.utils.vmware.get_properties_of_managed_object( + dvs_ref, ['config', 'capability']) + dvs_config = vim.VMwareDVSConfigSpec() + # Copy all of the properties in the config of the of the DVS to a + # DvsConfigSpec + skipped_properties = ['host'] + for prop in dvs_config.__dict__.keys(): + if prop in skipped_properties: + continue + if hasattr(dvs_props['config'], prop): + setattr(dvs_config, prop, getattr(dvs_props['config'], prop)) + _apply_dvs_config(dvs_config, dvs_dict) + if dvs_dict.get('link_discovery_protocol'): + if not dvs_config.linkDiscoveryProtocolConfig: + dvs_config.linkDiscoveryProtocolConfig = \ + vim.LinkDiscoveryProtocolConfig() + _apply_dvs_link_discovery_protocol( + dvs_config.linkDiscoveryProtocolConfig, + dvs_dict['link_discovery_protocol']) + if dvs_dict.get('infrastructure_traffic_resource_pools'): + if not dvs_config.infrastructureTrafficResourceConfig: + dvs_config.infrastructureTrafficResourceConfig = [] + _apply_dvs_infrastructure_traffic_resources( + dvs_config.infrastructureTrafficResourceConfig, + dvs_dict['infrastructure_traffic_resource_pools']) + log.trace('dvs_config= {}'.format(dvs_config)) + salt.utils.vmware.update_dvs(dvs_ref, dvs_config_spec=dvs_config) + if 'network_resource_management_enabled' in dvs_dict: + salt.utils.vmware.set_dvs_network_resource_management_enabled( + dvs_ref, dvs_dict['network_resource_management_enabled']) + return True + + +def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config): + ''' + Returns the out shaping policy of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' out shaping ' + 'config'.format(pg_name)) + out_shaping_policy = pg_default_port_config.outShapingPolicy + if not out_shaping_policy: + return {} + return {'average_bandwidth': out_shaping_policy.averageBandwidth.value, + 'burst_size': out_shaping_policy.burstSize.value, + 'enabled': out_shaping_policy.enabled.value, + 'peak_bandwidth': out_shaping_policy.peakBandwidth.value} + + +def _get_dvportgroup_security_policy(pg_name, pg_default_port_config): + ''' + Returns the security policy of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' security policy ' + 'config'.format(pg_name)) + sec_policy = pg_default_port_config.securityPolicy + if not sec_policy: + return {} + return {'allow_promiscuous': sec_policy.allowPromiscuous.value, + 'forged_transmits': sec_policy.forgedTransmits.value, + 'mac_changes': sec_policy.macChanges.value} + + +def _get_dvportgroup_teaming(pg_name, pg_default_port_config): + ''' + Returns the teaming of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' teaming' + 'config'.format(pg_name)) + teaming_policy = pg_default_port_config.uplinkTeamingPolicy + if not teaming_policy: + return {} + ret_dict = {'notify_switches': teaming_policy.notifySwitches.value, + 'policy': teaming_policy.policy.value, + 'reverse_policy': teaming_policy.reversePolicy.value, + 'rolling_order': teaming_policy.rollingOrder.value} + if teaming_policy.failureCriteria: + failure_criteria = teaming_policy.failureCriteria + ret_dict.update({'failure_criteria': { + 'check_beacon': failure_criteria.checkBeacon.value, + 'check_duplex': failure_criteria.checkDuplex.value, + 'check_error_percent': failure_criteria.checkErrorPercent.value, + 'check_speed': failure_criteria.checkSpeed.value, + 'full_duplex': failure_criteria.fullDuplex.value, + 'percentage': failure_criteria.percentage.value, + 'speed': failure_criteria.speed.value}}) + if teaming_policy.uplinkPortOrder: + uplink_order = teaming_policy.uplinkPortOrder + ret_dict.update({'port_order': { + 'active': uplink_order.activeUplinkPort, + 'standby': uplink_order.standbyUplinkPort}}) + return ret_dict + + +def _get_dvportgroup_dict(pg_ref): + ''' + Returns a dictionary with a distributed virutal portgroup data + + + pg_ref + Portgroup reference + ''' + props = salt.utils.vmware.get_properties_of_managed_object( + pg_ref, ['name', 'config.description', 'config.numPorts', + 'config.type', 'config.defaultPortConfig']) + pg_dict = {'name': props['name'], + 'description': props.get('config.description'), + 'num_ports': props['config.numPorts'], + 'type': props['config.type']} + if props['config.defaultPortConfig']: + dpg = props['config.defaultPortConfig'] + if dpg.vlan and \ + isinstance(dpg.vlan, + vim.VmwareDistributedVirtualSwitchVlanIdSpec): + + pg_dict.update({'vlan_id': dpg.vlan.vlanId}) + pg_dict.update({'out_shaping': + _get_dvportgroup_out_shaping( + props['name'], + props['config.defaultPortConfig'])}) + pg_dict.update({'security_policy': + _get_dvportgroup_security_policy( + props['name'], + props['config.defaultPortConfig'])}) + pg_dict.update({'teaming': + _get_dvportgroup_teaming( + props['name'], + props['config.defaultPortConfig'])}) + return pg_dict + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None): + ''' + Returns a list of distributed virtual switch portgroups. + The list can be filtered by the portgroup names or by the DVS. + + dvs + Name of the DVS containing the portgroups. + Default value is None. + + portgroup_names + List of portgroup names to look for. If None, all portgroups are + returned. + Default value is None + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_dvporgroups + + salt '*' vsphere.list_dvportgroups dvs=dvs1 + + salt '*' vsphere.list_dvportgroups portgroup_names=[pg1] + + salt '*' vsphere.list_dvportgroups dvs=dvs1 portgroup_names=[pg1] + ''' + ret_dict = [] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + if dvs: + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + dvs_ref = dvs_refs[0] + get_all_portgroups = True if not portgroup_names else False + for pg_ref in salt.utils.vmware.get_dvportgroups( + parent_ref=dvs_ref if dvs else dc_ref, + portgroup_names=portgroup_names, + get_all_portgroups=get_all_portgroups): + + ret_dict.append(_get_dvportgroup_dict(pg_ref)) + return ret_dict + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def list_uplink_dvportgroup(dvs, service_instance=None): + ''' + Returns the uplink portgroup of a distributed virtual switch. + + dvs + Name of the DVS containing the portgroup. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_uplink_dvportgroup dvs=dvs_name + ''' + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0]) + return _get_dvportgroup_dict(uplink_pg_ref) + + +def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): + ''' + Applies the values in out_shaping_conf to an out_shaping object + + pg_name + The name of the portgroup + + out_shaping + The vim.DVSTrafficShapingPolicy to apply the config to + + out_shaping_conf + The out shaping config + ''' + log.trace('Building portgroup\'s \'{0}\' out shaping ' + 'policy'.format(pg_name)) + if out_shaping_conf.get('average_bandwidth'): + out_shaping.averageBandwidth = vim.LongPolicy() + out_shaping.averageBandwidth.value = \ + out_shaping_conf['average_bandwidth'] + if out_shaping_conf.get('burst_size'): + out_shaping.burstSize = vim.LongPolicy() + out_shaping.burstSize.value = out_shaping_conf['burst_size'] + if 'enabled' in out_shaping_conf: + out_shaping.enabled = vim.BoolPolicy() + out_shaping.enabled.value = out_shaping_conf['enabled'] + if out_shaping_conf.get('peak_bandwidth'): + out_shaping.peakBandwidth = vim.LongPolicy() + out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth'] + + +def _apply_dvportgroup_security_policy(pg_name, sec_policy, sec_policy_conf): + ''' + Applies the values in sec_policy_conf to a security policy object + + pg_name + The name of the portgroup + + sec_policy + The vim.DVSTrafficShapingPolicy to apply the config to + + sec_policy_conf + The out shaping config + ''' + log.trace('Building portgroup\'s \'{0}\' security policy '.format(pg_name)) + if 'allow_promiscuous' in sec_policy_conf: + sec_policy.allowPromiscuous = vim.BoolPolicy() + sec_policy.allowPromiscuous.value = \ + sec_policy_conf['allow_promiscuous'] + if 'forged_transmits' in sec_policy_conf: + sec_policy.forgedTransmits = vim.BoolPolicy() + sec_policy.forgedTransmits.value = sec_policy_conf['forged_transmits'] + if 'mac_changes' in sec_policy_conf: + sec_policy.macChanges = vim.BoolPolicy() + sec_policy.macChanges.value = sec_policy_conf['mac_changes'] + + +def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf): + ''' + Applies the values in teaming_conf to a teaming policy object + + pg_name + The name of the portgroup + + teaming + The vim.VmwareUplinkPortTeamingPolicy to apply the config to + + teaming_conf + The teaming config + ''' + log.trace('Building portgroup\'s \'{0}\' teaming'.format(pg_name)) + if 'notify_switches' in teaming_conf: + teaming.notifySwitches = vim.BoolPolicy() + teaming.notifySwitches.value = teaming_conf['notify_switches'] + if 'policy' in teaming_conf: + teaming.policy = vim.StringPolicy() + teaming.policy.value = teaming_conf['policy'] + if 'reverse_policy' in teaming_conf: + teaming.reversePolicy = vim.BoolPolicy() + teaming.reversePolicy.value = teaming_conf['reverse_policy'] + if 'rolling_order' in teaming_conf: + teaming.rollingOrder = vim.BoolPolicy() + teaming.rollingOrder.value = teaming_conf['rolling_order'] + if 'failure_criteria' in teaming_conf: + if not teaming.failureCriteria: + teaming.failureCriteria = vim.DVSFailureCriteria() + failure_criteria_conf = teaming_conf['failure_criteria'] + if 'check_beacon' in failure_criteria_conf: + teaming.failureCriteria.checkBeacon = vim.BoolPolicy() + teaming.failureCriteria.checkBeacon.value = \ + failure_criteria_conf['check_beacon'] + if 'check_duplex' in failure_criteria_conf: + teaming.failureCriteria.checkDuplex = vim.BoolPolicy() + teaming.failureCriteria.checkDuplex.value = \ + failure_criteria_conf['check_duplex'] + if 'check_error_percent' in failure_criteria_conf: + teaming.failureCriteria.checkErrorPercent = vim.BoolPolicy() + teaming.failureCriteria.checkErrorPercent.value = \ + failure_criteria_conf['check_error_percent'] + if 'check_speed' in failure_criteria_conf: + teaming.failureCriteria.checkSpeed = vim.StringPolicy() + teaming.failureCriteria.checkSpeed.value = \ + failure_criteria_conf['check_speed'] + if 'full_duplex' in failure_criteria_conf: + teaming.failureCriteria.fullDuplex = vim.BoolPolicy() + teaming.failureCriteria.fullDuplex.value = \ + failure_criteria_conf['full_duplex'] + if 'percentage' in failure_criteria_conf: + teaming.failureCriteria.percentage = vim.IntPolicy() + teaming.failureCriteria.percentage.value = \ + failure_criteria_conf['percentage'] + if 'speed' in failure_criteria_conf: + teaming.failureCriteria.speed = vim.IntPolicy() + teaming.failureCriteria.speed.value = \ + failure_criteria_conf['speed'] + if 'port_order' in teaming_conf: + if not teaming.uplinkPortOrder: + teaming.uplinkPortOrder = vim.VMwareUplinkPortOrderPolicy() + if 'active' in teaming_conf['port_order']: + teaming.uplinkPortOrder.activeUplinkPort = \ + teaming_conf['port_order']['active'] + if 'standby' in teaming_conf['port_order']: + teaming.uplinkPortOrder.standbyUplinkPort = \ + teaming_conf['port_order']['standby'] + + +def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf): + ''' + Applies the values in conf to a distributed portgroup spec + + pg_name + The name of the portgroup + + pg_spec + The vim.DVPortgroupConfigSpec to apply the config to + + pg_conf + The portgroup config + ''' + log.trace('Building portgroup\'s \'{0}\' spec'.format(pg_name)) + if 'name' in pg_conf: + pg_spec.name = pg_conf['name'] + if 'description' in pg_conf: + pg_spec.description = pg_conf['description'] + if 'num_ports' in pg_conf: + pg_spec.numPorts = pg_conf['num_ports'] + if 'type' in pg_conf: + pg_spec.type = pg_conf['type'] + + if not pg_spec.defaultPortConfig: + for prop in ['vlan_id', 'out_shaping', 'security_policy', 'teaming']: + if prop in pg_conf: + pg_spec.defaultPortConfig = vim.VMwareDVSPortSetting() + if 'vlan_id' in pg_conf: + pg_spec.defaultPortConfig.vlan = \ + vim.VmwareDistributedVirtualSwitchVlanIdSpec() + pg_spec.defaultPortConfig.vlan.vlanId = pg_conf['vlan_id'] + if 'out_shaping' in pg_conf: + if not pg_spec.defaultPortConfig.outShapingPolicy: + pg_spec.defaultPortConfig.outShapingPolicy = \ + vim.DVSTrafficShapingPolicy() + _apply_dvportgroup_out_shaping( + pg_name, pg_spec.defaultPortConfig.outShapingPolicy, + pg_conf['out_shaping']) + if 'security_policy' in pg_conf: + if not pg_spec.defaultPortConfig.securityPolicy: + pg_spec.defaultPortConfig.securityPolicy = \ + vim.DVSSecurityPolicy() + _apply_dvportgroup_security_policy( + pg_name, pg_spec.defaultPortConfig.securityPolicy, + pg_conf['security_policy']) + if 'teaming' in pg_conf: + if not pg_spec.defaultPortConfig.uplinkTeamingPolicy: + pg_spec.defaultPortConfig.uplinkTeamingPolicy = \ + vim.VmwareUplinkPortTeamingPolicy() + _apply_dvportgroup_teaming( + pg_name, pg_spec.defaultPortConfig.uplinkTeamingPolicy, + pg_conf['teaming']) + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def create_dvportgroup(portgroup_dict, portgroup_name, dvs, + service_instance=None): + ''' + Creates a distributed virtual portgroup. + + Note: The ``portgroup_name`` param will override any name already set + in ``portgroup_dict``. + + portgroup_dict + Dictionary with the config values the portgroup should be created with + (exmaple in salt.states.dvs). + + portgroup_name + Name of the portgroup to be created. + + dvs + Name of the DVS that will contain the portgroup. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_dvportgroup portgroup_dict= + portgroup_name=pg1 dvs=dvs1 + ''' + log.trace('Creating portgroup\'{0}\' in dvs \'{1}\' ' + 'with dict = {2}'.format(portgroup_name, dvs, portgroup_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + # Make the name of the dvportgroup consistent with the parameter + portgroup_dict['name'] = portgroup_name + spec = vim.DVPortgroupConfigSpec() + _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict) + salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True): + ''' + Updates a distributed virtual portgroup. + + portgroup_dict + Dictionary with the values the portgroup should be update with + (exmaple in salt.states.dvs). + + portgroup + Name of the portgroup to be updated. + + dvs + Name of the DVS containing the portgroups. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.update_dvportgroup portgroup_dict= + portgroup=pg1 + + salt '*' vsphere.update_dvportgroup portgroup_dict= + portgroup=pg1 dvs=dvs1 + ''' + log.trace('Updating portgroup\'{0}\' in dvs \'{1}\' ' + 'with dict = {2}'.format(portgroup, dvs, portgroup_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0], + portgroup_names=[portgroup]) + if not pg_refs: + raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not ' + 'retrieved'.format(portgroup)) + pg_props = salt.utils.vmware.get_properties_of_managed_object(pg_refs[0], + ['config']) + spec = vim.DVPortgroupConfigSpec() + # Copy existing properties in spec + for prop in ['autoExpand', 'configVersion', 'defaultPortConfig', + 'description', 'name', 'numPorts', 'policy', 'portNameFormat', + 'scope', 'type', 'vendorSpecificConfig']: + setattr(spec, prop, getattr(pg_props['config'], prop)) + _apply_dvportgroup_config(portgroup, spec, portgroup_dict) + salt.utils.vmware.update_dvportgroup(pg_refs[0], spec) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def remove_dvportgroup(portgroup, dvs, service_instance=None): + ''' + Removes a distributed virtual portgroup. + + portgroup + Name of the portgroup to be removed. + + dvs + Name of the DVS containing the portgroups. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1 + ''' + log.trace('Removing portgroup\'{0}\' in dvs \'{1}\' ' + ''.format(portgroup, dvs)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0], + portgroup_names=[portgroup]) + if not pg_refs: + raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not ' + 'retrieved'.format(portgroup)) + salt.utils.vmware.remove_dvportgroup(pg_refs[0]) + return True + + +def _get_policy_dict(policy): + '''Returns a dictionary representation of a policy''' + profile_dict = {'name': policy.name, + 'description': policy.description, + 'resource_type': policy.resourceType.resourceType} + subprofile_dicts = [] + if isinstance(policy, pbm.profile.CapabilityBasedProfile) and \ + isinstance(policy.constraints, + pbm.profile.SubProfileCapabilityConstraints): + + for subprofile in policy.constraints.subProfiles: + subprofile_dict = {'name': subprofile.name, + 'force_provision': subprofile.forceProvision} + cap_dicts = [] + for cap in subprofile.capability: + cap_dict = {'namespace': cap.id.namespace, + 'id': cap.id.id} + # We assume there is one constraint with one value set + val = cap.constraint[0].propertyInstance[0].value + if isinstance(val, pbm.capability.types.Range): + val_dict = {'type': 'range', + 'min': val.min, + 'max': val.max} + elif isinstance(val, pbm.capability.types.DiscreteSet): + val_dict = {'type': 'set', + 'values': val.values} + else: + val_dict = {'type': 'scalar', + 'value': val} + cap_dict['setting'] = val_dict + cap_dicts.append(cap_dict) + subprofile_dict['capabilities'] = cap_dicts + subprofile_dicts.append(subprofile_dict) + profile_dict['subprofiles'] = subprofile_dicts + return profile_dict + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_storage_policies(policy_names=None, service_instance=None): + ''' + Returns a list of storage policies. + + policy_names + Names of policies to list. If None, all policies are listed. + Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_storage_policies + + salt '*' vsphere.list_storage_policy policy_names=[policy_name] + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + if not policy_names: + policies = salt.utils.pbm.get_storage_policies(profile_manager, + get_all_policies=True) + else: + policies = salt.utils.pbm.get_storage_policies(profile_manager, + policy_names) + return [_get_policy_dict(p) for p in policies] + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_default_vsan_policy(service_instance=None): + ''' + Returns the default vsan storage policy. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_storage_policies + + salt '*' vsphere.list_storage_policy policy_names=[policy_name] + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policies = salt.utils.pbm.get_storage_policies(profile_manager, + get_all_policies=True) + def_policies = [p for p in policies + if p.systemCreatedProfileType == 'VsanDefaultProfile'] + if not def_policies: + raise VMwareObjectRetrievalError('Default VSAN policy was not ' + 'retrieved') + return _get_policy_dict(def_policies[0]) + + +def _get_capability_definition_dict(cap_metadata): + # We assume each capability definition has one property with the same id + # as the capability so we display its type as belonging to the capability + # The object model permits multiple properties + return {'namespace': cap_metadata.id.namespace, + 'id': cap_metadata.id.id, + 'mandatory': cap_metadata.mandatory, + 'description': cap_metadata.summary.summary, + 'type': cap_metadata.propertyMetadata[0].type.typeName} + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_capability_definitions(service_instance=None): + ''' + Returns a list of the metadata of all capabilities in the vCenter. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_capabilities + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + ret_list = [_get_capability_definition_dict(c) for c in + salt.utils.pbm.get_capability_definitions(profile_manager)] + return ret_list + + +def _apply_policy_config(policy_spec, policy_dict): + '''Applies a policy dictionary to a policy spec''' + log.trace('policy_dict = {0}'.format(policy_dict)) + if policy_dict.get('name'): + policy_spec.name = policy_dict['name'] + if policy_dict.get('description'): + policy_spec.description = policy_dict['description'] + if policy_dict.get('subprofiles'): + # Incremental changes to subprofiles and capabilities are not + # supported because they would complicate updates too much + # The whole configuration of all sub-profiles is expected and applied + policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() + subprofiles = [] + for subprofile_dict in policy_dict['subprofiles']: + subprofile_spec = \ + pbm.profile.SubProfileCapabilityConstraints.SubProfile( + name=subprofile_dict['name']) + cap_specs = [] + if subprofile_dict.get('force_provision'): + subprofile_spec.forceProvision = \ + subprofile_dict['force_provision'] + for cap_dict in subprofile_dict['capabilities']: + prop_inst_spec = pbm.capability.PropertyInstance( + id=cap_dict['id'] + ) + setting_type = cap_dict['setting']['type'] + if setting_type == 'set': + prop_inst_spec.value = pbm.capability.types.DiscreteSet() + prop_inst_spec.value.values = cap_dict['setting']['values'] + elif setting_type == 'range': + prop_inst_spec.value = pbm.capability.types.Range() + prop_inst_spec.value.max = cap_dict['setting']['max'] + prop_inst_spec.value.min = cap_dict['setting']['min'] + elif setting_type == 'scalar': + prop_inst_spec.value = cap_dict['setting']['value'] + cap_spec = pbm.capability.CapabilityInstance( + id=pbm.capability.CapabilityMetadata.UniqueId( + id=cap_dict['id'], + namespace=cap_dict['namespace']), + constraint=[pbm.capability.ConstraintInstance( + propertyInstance=[prop_inst_spec])]) + cap_specs.append(cap_spec) + subprofile_spec.capability = cap_specs + subprofiles.append(subprofile_spec) + policy_spec.constraints.subProfiles = subprofiles + log.trace('updated policy_spec = {0}'.format(policy_spec)) + return policy_spec + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def create_storage_policy(policy_name, policy_dict, service_instance=None): + ''' + Creates a storage policy. + + Supported capability types: scalar, set, range. + + policy_name + Name of the policy to create. + The value of the argument will override any existing name in + ``policy_dict``. + + policy_dict + Dictionary containing the changes to apply to the policy. + (exmaple in salt.states.pbm) + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.create_storage_policy policy_name='policy name' + policy_dict="$policy_dict" + ''' + log.trace('create storage policy \'{0}\', dict = {1}' + ''.format(policy_name, policy_dict)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec() + # Hardcode the storage profile resource type + policy_create_spec.resourceType = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + # Set name argument + policy_dict['name'] = policy_name + log.trace('Setting policy values in policy_update_spec') + _apply_policy_config(policy_create_spec, policy_dict) + salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec) + return {'create_storage_policy': True} + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def update_storage_policy(policy, policy_dict, service_instance=None): + ''' + Updates a storage policy. + + Supported capability types: scalar, set, range. + + policy + Name of the policy to update. + + policy_dict + Dictionary containing the changes to apply to the policy. + (exmaple in salt.states.pbm) + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.update_storage_policy policy='policy name' + policy_dict="$policy_dict" + ''' + log.trace('updating storage policy, dict = {0}'.format(policy_dict)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) + if not policies: + raise VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) + policy_ref = policies[0] + policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec() + log.trace('Setting policy values in policy_update_spec') + for prop in ['description', 'constraints']: + setattr(policy_update_spec, prop, getattr(policy_ref, prop)) + _apply_policy_config(policy_update_spec, policy_dict) + salt.utils.pbm.update_storage_policy(profile_manager, policy_ref, + policy_update_spec) + return {'update_storage_policy': True} + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_default_storage_policy_of_datastore(datastore, service_instance=None): + ''' + Returns a list of datastores assign the the storage policies. + + datastore + Name of the datastore to assign. + The datastore needs to be visible to the VMware entity the proxy + points to. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 + ''' + log.trace('Listing the default storage policy of datastore \'{0}\'' + ''.format(datastore)) + # Find datastore + target_ref = _get_proxy_target(service_instance) + ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, + datastore_names=[datastore]) + if not ds_refs: + raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policy = salt.utils.pbm.get_default_storage_policy_of_datastore( + profile_manager, ds_refs[0]) + return _get_policy_dict(policy) + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def assign_default_storage_policy_to_datastore(policy, datastore, + service_instance=None): + ''' + Assigns a storage policy as the default policy to a datastore. + + policy + Name of the policy to assign. + + datastore + Name of the datastore to assign. + The datastore needs to be visible to the VMware entity the proxy + points to. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.assign_storage_policy_to_datastore + policy='policy name' datastore=ds1 + ''' + log.trace('Assigning policy {0} to datastore {1}' + ''.format(policy, datastore)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + # Find policy + policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) + if not policies: + raise VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) + policy_ref = policies[0] + # Find datastore + target_ref = _get_proxy_target(service_instance) + ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, + datastore_names=[datastore]) + if not ds_refs: + raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) + ds_ref = ds_refs[0] + salt.utils.pbm.assign_default_storage_policy_to_datastore( + profile_manager, policy_ref, ds_ref) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster', 'vcenter') +@gets_service_instance_via_proxy def list_datacenters_via_proxy(datacenter_names=None, service_instance=None): ''' Returns a list of dict representations of VMware datacenters. @@ -3661,7 +4985,7 @@ def list_datacenters_via_proxy(datacenter_names=None, service_instance=None): @depends(HAS_PYVMOMI) -@supports_proxies('esxdatacenter') +@supports_proxies('esxdatacenter', 'vcenter') @gets_service_instance_via_proxy def create_datacenter(datacenter_name, service_instance=None): ''' @@ -4236,6 +5560,60 @@ def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None, return ret_dict +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def create_vmfs_datastore(datastore_name, disk_id, vmfs_major_version, + safety_checks=True, service_instance=None): + ''' + Creates a ESXi host disk group with the specified cache and capacity disks. + + datastore_name + The name of the datastore to be created. + + disk_id + The disk id (canonical name) on which the datastore is created. + + vmfs_major_version + The VMFS major version. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_vmfs_datastore datastore_name=ds1 disk_id= + vmfs_major_version=5 + ''' + log.debug('Validating vmfs datastore input') + schema = VmfsDatastoreSchema.serialize() + try: + jsonschema.validate( + {'datastore': {'name': datastore_name, + 'backing_disk_id': disk_id, + 'vmfs_version': vmfs_major_version}}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if safety_checks: + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=[disk_id]) + if not disks: + raise VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\''.format(disk_id, + hostname)) + ds_ref = salt.utils.vmware.create_vmfs_datastore( + host_ref, datastore_name, disks[0], vmfs_major_version) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxi', 'esxcluster', 'esxdatacenter') @gets_service_instance_via_proxy @@ -4274,6 +5652,41 @@ def rename_datastore(datastore_name, new_datastore_name, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def remove_datastore(datastore, service_instance=None): + ''' + Removes a datastore. If multiple datastores an error is raised. + + datastore + Datastore name + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_datastore ds_name + ''' + log.trace('Removing datastore \'{0}\''.format(datastore)) + target = _get_proxy_target(service_instance) + taget_name = target.name + datastores = salt.utils.vmware.get_datastores( + service_instance, + reference=target, + datastore_names=[datastore]) + if not datastores: + raise VMwareObjectRetrievalError( + 'Datastore \'{0}\' was not found'.format(datastore)) + if len(datastores) > 1: + raise VMwareObjectRetrievalError( + 'Multiple datastores \'{0}\' were found'.format(datastore)) + salt.utils.vmware.remove_datastore(service_instance, datastores[0]) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxcluster', 'esxdatacenter') @gets_service_instance_via_proxy @@ -4488,6 +5901,601 @@ def assign_license(license_key, license_name, entity, entity_display_name, entity_name=entity_display_name) +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_hosts_via_proxy(hostnames=None, datacenter=None, + cluster=None, service_instance=None): + ''' + Returns a list of hosts for the the specified VMware environment. The list + of hosts can be filtered by datacenter name and/or cluster name + + hostnames + Hostnames to filter on. + + datacenter_name + Name of datacenter. Only hosts in this datacenter will be retrieved. + Default is None. + + cluster_name + Name of cluster. Only hosts in this cluster will be retrieved. If a + datacenter is not specified the first cluster with this name will be + considerred. Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + CLI Example: + + .. code-block:: bash + + salt '*' vsphere.list_hosts_via_proxy + + salt '*' vsphere.list_hosts_via_proxy hostnames=[esxi1.example.com] + + salt '*' vsphere.list_hosts_via_proxy datacenter=dc1 cluster=cluster1 + ''' + if cluster: + if not datacenter: + raise salt.exceptions.ArgumentValueError( + 'Datacenter is required when cluster is specified') + get_all_hosts = False + if not hostnames: + get_all_hosts = True + hosts = salt.utils.vmware.get_hosts(service_instance, + datacenter_name=datacenter, + host_names=hostnames, + cluster_name=cluster, + get_all_hosts=get_all_hosts) + return [salt.utils.vmware.get_managed_object_name(h) for h in hosts] + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None): + ''' + Returns a list of dict representations of the disks in an ESXi host. + The list of disks can be filtered by disk canonical names or + scsi addresses. + + disk_ids: + List of disk canonical names to be retrieved. Default is None. + + scsi_addresses + List of scsi addresses of disks to be retrieved. Default is None + + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_disks + + salt '*' vsphere.list_disks disk_ids='[naa.00, naa.001]' + + salt '*' vsphere.list_disks + scsi_addresses='[vmhba0:C0:T0:L0, vmhba1:C0:T0:L0]' + ''' + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + log.trace('Retrieving disks if host \'{0}\''.format(hostname)) + log.trace('disk ids = {0}'.format(disk_ids)) + log.trace('scsi_addresses = {0}'.format(scsi_addresses)) + # Default to getting all disks if no filtering is done + get_all_disks = True if not (disk_ids or scsi_addresses) else False + ret_list = [] + scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map( + host_ref, hostname=hostname) + canonical_name_to_scsi_address = { + lun.canonicalName: scsi_addr + for scsi_addr, lun in six.iteritems(scsi_address_to_lun)} + for d in salt.utils.vmware.get_disks(host_ref, disk_ids, scsi_addresses, + get_all_disks): + ret_list.append({'id': d.canonicalName, + 'scsi_address': + canonical_name_to_scsi_address[d.canonicalName]}) + return ret_list + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def erase_disk_partitions(disk_id=None, scsi_address=None, + service_instance=None): + ''' + Erases the partitions on a disk. + The disk can be specified either by the canonical name, or by the + scsi_address. + + disk_id + Canonical name of the disk. + Either ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + scsi_address + Scsi address of the disk. + ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0' + + salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001' + ''' + if not disk_id and not scsi_address: + raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' ' + 'needs to be specified') + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if not disk_id: + scsi_address_to_lun = \ + salt.utils.vmware.get_scsi_address_to_lun_map(host_ref) + if scsi_address not in scsi_address_to_lun: + raise VMwareObjectRetrievalError( + 'Scsi lun with address \'{0}\' was not found on host \'{1}\'' + ''.format(scsi_address, hostname)) + disk_id = scsi_address_to_lun[scsi_address].canonicalName + log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\'' + ''.format(hostname, disk_id, scsi_address)) + log.trace('Erasing disk partitions on disk \'{0}\' in host \'{1}\'' + ''.format(disk_id, hostname)) + salt.utils.vmware.erase_disk_partitions(service_instance, + host_ref, disk_id, + hostname=hostname) + log.info('Erased disk partitions on disk \'{0}\' on host \'{1}\'' + ''.format(disk_id, hostname)) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_disk_partitions(disk_id=None, scsi_address=None, + service_instance=None): + ''' + Lists the partitions on a disk. + The disk can be specified either by the canonical name, or by the + scsi_address. + + disk_id + Canonical name of the disk. + Either ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + scsi_address` + Scsi address of the disk. + ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_disk_partitions scsi_address='vmhaba0:C0:T0:L0' + + salt '*' vsphere.list_disk_partitions disk_id='naa.000000000000001' + ''' + if not disk_id and not scsi_address: + raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' ' + 'needs to be specified') + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if not disk_id: + scsi_address_to_lun = \ + salt.utils.vmware.get_scsi_address_to_lun_map(host_ref) + if scsi_address not in scsi_address_to_lun: + raise VMwareObjectRetrievalError( + 'Scsi lun with address \'{0}\' was not found on host \'{1}\'' + ''.format(scsi_address, hostname)) + disk_id = scsi_address_to_lun[scsi_address].canonicalName + log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\'' + ''.format(hostname, disk_id, scsi_address)) + log.trace('Listing disk partitions on disk \'{0}\' in host \'{1}\'' + ''.format(disk_id, hostname)) + partition_info = \ + salt.utils.vmware.get_disk_partition_info(host_ref, disk_id) + ret_list = [] + # NOTE: 1. The layout view has an extra 'None' partition for free space + # 2. The orders in the layout/partition views are not the same + for part_spec in partition_info.spec.partition: + part_layout = [p for p in partition_info.layout.partition + if p.partition == part_spec.partition][0] + part_dict = {'hostname': hostname, + 'device': disk_id, + 'format': partition_info.spec.partitionFormat, + 'partition': part_spec.partition, + 'type': part_spec.type, + 'sectors': + part_spec.endSector - part_spec.startSector + 1, + 'size_KB': + (part_layout.end.block - part_layout.start.block + 1) * + part_layout.start.blockSize / 1024} + ret_list.append(part_dict) + return ret_list + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_diskgroups(cache_disk_ids=None, service_instance=None): + ''' + Returns a list of disk group dict representation on an ESXi host. + The list of disk groups can be filtered by the cache disks + canonical names. If no filtering is applied, all disk groups are returned. + + cache_disk_ids: + List of cache disk canonical names of the disk groups to be retrieved. + Default is None. + + use_proxy_details + Specify whether to use the proxy minion's details instead of the + arguments + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_diskgroups + + salt '*' vsphere.list_diskgroups cache_disk_ids='[naa.000000000000001]' + ''' + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + log.trace('Listing diskgroups in \'{0}\''.format(hostname)) + get_all_diskgroups = True if not cache_disk_ids else False + ret_list = [] + for dg in salt.utils.vmware.get_diskgroups(host_ref, cache_disk_ids, + get_all_diskgroups): + ret_list.append( + {'cache_disk': dg.ssd.canonicalName, + 'capacity_disks': [d.canonicalName for d in dg.nonSsd]}) + return ret_list + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True, + service_instance=None): + ''' + Creates disk group on an ESXi host with the specified cache and + capacity disks. + + cache_disk_id + The canonical name of the disk to be used as a cache. The disk must be + ssd. + + capacity_disk_ids + A list containing canonical names of the capacity disks. Must contain at + least one id. Default is True. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_diskgroup cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if safety_checks: + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id]) + if diskgroups: + raise VMwareObjectExistsError( + 'Diskgroup with cache disk id \'{0}\' already exists ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + disk_ids = capacity_disk_ids[:] + disk_ids.insert(0, cache_disk_id) + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids) + for id in disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' + ''.format(id, hostname)) + cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0] + capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids] + vsan_disk_mgmt_system = \ + salt.utils.vsan.get_vsan_disk_management_system(service_instance) + dg = salt.utils.vsan.create_diskgroup(service_instance, + vsan_disk_mgmt_system, + host_ref, + cache_disk, + capacity_disks) + return True + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids, + safety_checks=True, service_instance=None): + ''' + Adds capacity disks to the disk group with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + capacity_disk_ids + A list containing canonical names of the capacity disks to add. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.add_capacity_to_diskgroup + cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) + if safety_checks: + for id in capacity_disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' + ''.format(id, hostname)) + diskgroups = \ + salt.utils.vmware.get_diskgroups( + host_ref, cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + vsan_disk_mgmt_system = \ + salt.utils.vsan.get_vsan_disk_management_system(service_instance) + salt.utils.vsan.add_capacity_to_diskgroup(service_instance, + vsan_disk_mgmt_system, + host_ref, + diskgroups[0], + disks) + return True + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def remove_capacity_from_diskgroup(cache_disk_id, capacity_disk_ids, + data_evacuation=True, safety_checks=True, + service_instance=None): + ''' + Remove capacity disks from the disk group with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + capacity_disk_ids + A list containing canonical names of the capacity disks to add. + + data_evacuation + Specifies whether to gracefully evacuate the data on the capacity disks + before removing them from the disk group. Default value is True. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_capacity_from_diskgroup + cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) + if safety_checks: + for id in capacity_disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' + ''.format(id, hostname)) + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, + cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('data_evacuation = {0}'.format(data_evacuation)) + salt.utils.vsan.remove_capacity_from_diskgroup( + service_instance, host_ref, diskgroups[0], + capacity_disks=[d for d in disks + if d.canonicalName in capacity_disk_ids], + data_evacuation=data_evacuation) + return True + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def remove_diskgroup(cache_disk_id, data_accessibility=True, + service_instance=None): + ''' + Remove the diskgroup with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + data_accessibility + Specifies whether to ensure data accessibility. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, + cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('data accessibility = {0}'.format(data_accessibility)) + salt.utils.vsan.remove_diskgroup( + service_instance, host_ref, diskgroups[0], + data_accessibility=data_accessibility) + return True + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def get_host_cache(service_instance=None): + ''' + Returns the host cache configuration on the proxy host. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.get_host_cache + ''' + # Default to getting all disks if no filtering is done + ret_dict = {} + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + hci = salt.utils.vmware.get_host_cache(host_ref) + if not hci: + log.debug('Host cache not configured on host \'{0}\''.format(hostname)) + ret_dict['enabled'] = False + return ret_dict + + # TODO Support multiple host cache info objects (on multiple datastores) + return {'enabled': True, + 'datastore': {'name': hci.key.name}, + 'swap_size': '{}MiB'.format(hci.swapSize)} + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def configure_host_cache(enabled, datastore=None, swap_size_MiB=None, + service_instance=None): + ''' + Configures the host cache on the selected host. + + enabled + Boolean flag specifying whether the host cache is enabled. + + datastore + Name of the datastore that contains the host cache. Must be set if + enabled is ``true``. + + swap_size_MiB + Swap size in Mibibytes. Needs to be set if enabled is ``true``. Must be + smaller thant the datastore size. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.configure_host_cache enabled=False + + salt '*' vsphere.configure_host_cache enabled=True datastore=ds1 + swap_size_MiB=1024 + ''' + log.debug('Validating host cache input') + schema = SimpleHostCacheSchema.serialize() + try: + jsonschema.validate({'enabled': enabled, + 'datastore_name': datastore, + 'swap_size_MiB': swap_size_MiB}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + if not enabled: + raise ArgumentValueError('Disabling the host cache is not supported') + ret_dict = {'enabled': False} + + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if datastore: + ds_refs = salt.utils.vmware.get_datastores( + service_instance, host_ref, datastore_names=[datastore]) + if not ds_refs: + raise VMwareObjectRetrievalError( + 'Datastore \'{0}\' was not found on host ' + '\'{1}\''.format(datastore, hostname)) + ds_ref = ds_refs[0] + salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or @@ -5116,7 +7124,7 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, @depends(HAS_PYVMOMI) -@supports_proxies('esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def _get_proxy_target(service_instance): ''' Returns the target object of a proxy. @@ -5144,6 +7152,21 @@ def _get_proxy_target(service_instance): reference = salt.utils.vmware.get_datacenter(service_instance, datacenter) + elif proxy_type == 'vcenter': + # vcenter proxy - the target is the root folder + reference = salt.utils.vmware.get_root_folder(service_instance) + elif proxy_type == 'esxi': + # esxi proxy + details = __proxy__['esxi.get_details']() + if 'vcenter' not in details: + raise InvalidEntityError('Proxies connected directly to ESXi ' + 'hosts are not supported') + references = salt.utils.vmware.get_hosts( + service_instance, host_names=details['esxi_host']) + if not references: + raise VMwareObjectRetrievalError( + 'ESXi host \'{0}\' was not found'.format(details['esxi_host'])) + reference = references[0] log.trace('reference = {0}'.format(reference)) return reference @@ -5167,3 +7190,19 @@ def _get_esxcluster_proxy_details(): det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter'), \ det.get('cluster') + + +def _get_esxi_proxy_details(): + ''' + Returns the running esxi's proxy details + ''' + det = __proxy__['esxi.get_details']() + host = det.get('host') + if det.get('vcenter'): + host = det['vcenter'] + esxi_hosts = None + if det.get('esxi_host'): + esxi_hosts = [det['esxi_host']] + return host, det.get('username'), det.get('password'), \ + det.get('protocol'), det.get('port'), det.get('mechanism'), \ + det.get('principal'), det.get('domain'), esxi_hosts diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py index 5549b3e2bfa..24441d185c4 100644 --- a/salt/modules/win_wua.py +++ b/salt/modules/win_wua.py @@ -110,7 +110,7 @@ def available(software=True, Include software updates in the results (default is True) drivers (bool): - Include driver updates in the results (default is False) + Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 849c5c4f291..0136390d7fc 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -1347,6 +1347,7 @@ def install(name=None, to_install = [] to_downgrade = [] to_reinstall = [] + _available = {} # The above three lists will be populated with tuples containing the # package name and the string being used for this particular package # modification. The reason for this method is that the string we use for diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index dc42400796c..fd8b291f821 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -77,6 +77,9 @@ def __virtual__(): ) == 0: return 'zfs' + if __grains__['kernel'] == 'OpenBSD': + return False + _zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse') if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')): return 'zfs' diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index 323958e2f91..2af4560c49e 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -343,14 +343,15 @@ def ext_pillar(minion_id, if minion_id in match: ngroup_dir = os.path.join( nodegroups_dir, str(nodegroup)) - ngroup_pillar.update( + ngroup_pillar = salt.utils.dictupdate.merge(ngroup_pillar, _construct_pillar(ngroup_dir, follow_dir_links, keep_newline, render_default, renderer_blacklist, renderer_whitelist, - template) + template), + strategy='recurse' ) else: if debug is True: diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 53e58be0ac1..12bab065d81 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -374,20 +374,20 @@ def __virtual__(): return False -def ext_pillar(minion_id, repo): +def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument ''' Checkout the ext_pillar sources and compile the resulting pillar SLS ''' opts = copy.deepcopy(__opts__) opts['pillar_roots'] = {} opts['__git_pillar'] = True - pillar = salt.utils.gitfs.GitPillar(opts) - pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + git_pillar = salt.utils.gitfs.GitPillar(opts) + git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) if __opts__.get('__role') == 'minion': # If masterless, fetch the remotes. We'll need to remove this once # we make the minion daemon able to run standalone. - pillar.fetch_remotes() - pillar.checkout() + git_pillar.fetch_remotes() + git_pillar.checkout() ret = {} merge_strategy = __opts__.get( 'pillar_source_merging_strategy', @@ -397,7 +397,14 @@ def ext_pillar(minion_id, repo): 'pillar_merge_lists', False ) - for pillar_dir, env in six.iteritems(pillar.pillar_dirs): + for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs): + # Map env if env == '__env__' before checking the env value + if env == '__env__': + env = opts.get('pillarenv') \ + or opts.get('environment') \ + or opts.get('git_pillar_base') + log.debug('__env__ maps to %s', env) + # If pillarenv is set, only grab pillars with that match pillarenv if opts['pillarenv'] and env != opts['pillarenv']: log.debug( @@ -406,7 +413,7 @@ def ext_pillar(minion_id, repo): env, pillar_dir, opts['pillarenv'] ) continue - if pillar_dir in pillar.pillar_linked_dirs: + if pillar_dir in git_pillar.pillar_linked_dirs: log.debug( 'git_pillar is skipping processing on %s as it is a ' 'mounted repo', pillar_dir @@ -418,12 +425,6 @@ def ext_pillar(minion_id, repo): 'env \'%s\'', pillar_dir, env ) - if env == '__env__': - env = opts.get('pillarenv') \ - or opts.get('environment') \ - or opts.get('git_pillar_base') - log.debug('__env__ maps to %s', env) - pillar_roots = [pillar_dir] if __opts__['git_pillar_includes']: @@ -433,7 +434,7 @@ def ext_pillar(minion_id, repo): # list, so that its top file is sourced from the correct # location and not from another git_pillar remote. pillar_roots.extend( - [d for (d, e) in six.iteritems(pillar.pillar_dirs) + [d for (d, e) in six.iteritems(git_pillar.pillar_dirs) if env == e and d != pillar_dir] ) diff --git a/salt/pillar/postgres.py b/salt/pillar/postgres.py index 58cd0e32985..7b6300989a6 100644 --- a/salt/pillar/postgres.py +++ b/salt/pillar/postgres.py @@ -90,7 +90,8 @@ class POSTGRESExtPillar(SqlBaseExtPillar): conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], - dbname=_options['db']) + dbname=_options['db'], + port=_options['port']) cursor = conn.cursor() try: yield cursor diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py new file mode 100644 index 00000000000..cf4b2c56f8f --- /dev/null +++ b/salt/pillar/rethinkdb_pillar.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +''' +Provide external pillar data from RethinkDB + +.. versionadded:: Oxygen + +:depends: rethinkdb (on the salt-master) + + +salt master rethinkdb configuration +=================================== +These variables must be configured in your master configuration file. + * ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'`` + * ``rethinkdb.port`` - The port the RethinkDB server listens on. + Defaults to ``'28015'`` + * ``rethinkdb.database`` - The database to connect to. + Defaults to ``'salt'`` + * ``rethinkdb.username`` - The username for connecting to RethinkDB. + Defaults to ``''`` + * ``rethinkdb.password`` - The password for connecting to RethinkDB. + Defaults to ``''`` + + +salt-master ext_pillar configuration +==================================== + +The ext_pillar function arguments are given in single line dictionary notation. + +.. code-block:: yaml + + ext_pillar: + - rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar} + +In the example above the following happens. + * The salt-master will look for external pillars in the 'ext_pillar' table + on the RethinkDB host + * The minion id will be matched against the 'minion_id' field + * Pillars will be retrieved from the nested field 'pillar_root' + * Found pillars will be merged inside a key called 'external_pillar' + + +Module Documentation +==================== +''' +from __future__ import absolute_import + +# Import python libraries +import logging + +# Import 3rd party libraries +try: + import rethinkdb + HAS_RETHINKDB = True +except ImportError: + HAS_RETHINKDB = False + +__virtualname__ = 'rethinkdb' + +__opts__ = { + 'rethinkdb.host': 'salt', + 'rethinkdb.port': '28015', + 'rethinkdb.database': 'salt', + 'rethinkdb.username': None, + 'rethinkdb.password': None +} + + +def __virtual__(): + if not HAS_RETHINKDB: + return False + return True + + +# Configure logging +log = logging.getLogger(__name__) + + +def ext_pillar(minion_id, + pillar, + table='pillar', + id_field=None, + field=None, + pillar_key=None): + ''' + Collect minion external pillars from a RethinkDB database + + Arguments: + * `table`: The RethinkDB table containing external pillar information. + Defaults to ``'pillar'`` + * `id_field`: Field in document containing the minion id. + If blank then we assume the table index matches minion ids + * `field`: Specific field in the document used for pillar data, if blank + then the entire document will be used + * `pillar_key`: The salt-master will nest found external pillars under + this key before merging into the minion pillars. If blank, external + pillars will be merged at top level + ''' + host = __opts__['rethinkdb.host'] + port = __opts__['rethinkdb.port'] + database = __opts__['rethinkdb.database'] + username = __opts__['rethinkdb.username'] + password = __opts__['rethinkdb.password'] + + log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar' + .format(host, port, username)) + + # Connect to the database + conn = rethinkdb.connect(host=host, + port=port, + db=database, + user=username, + password=password) + + data = None + + try: + + if id_field: + log.debug('ext_pillar.rethinkdb: looking up pillar. ' + 'table: {0}, field: {1}, minion: {2}'.format( + table, id_field, minion_id)) + + if field: + data = rethinkdb.table(table).filter( + {id_field: minion_id}).pluck(field).run(conn) + else: + data = rethinkdb.table(table).filter( + {id_field: minion_id}).run(conn) + + else: + log.debug('ext_pillar.rethinkdb: looking up pillar. ' + 'table: {0}, field: id, minion: {1}'.format( + table, minion_id)) + + if field: + data = rethinkdb.table(table).get(minion_id).pluck(field).run( + conn) + else: + data = rethinkdb.table(table).get(minion_id).run(conn) + + finally: + if conn.is_open(): + conn.close() + + if data.items: + + # Return nothing if multiple documents are found for a minion + if len(data.items) > 1: + log.error('ext_pillar.rethinkdb: ambiguous documents found for ' + 'minion {0}'.format(minion_id)) + return {} + + else: + result = data.items.pop() + + if pillar_key: + return {pillar_key: result} + return result + + else: + # No document found in the database + log.debug('ext_pillar.rethinkdb: no document found') + return {} diff --git a/salt/pillar/saltclass.py b/salt/pillar/saltclass.py new file mode 100644 index 00000000000..41732bffd07 --- /dev/null +++ b/salt/pillar/saltclass.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +''' +SaltClass Pillar Module + +.. code-block:: yaml + + ext_pillar: + - saltclass: + - path: /srv/saltclass + +''' + +# import python libs +from __future__ import absolute_import +import salt.utils.saltclass as sc +import logging + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + This module has no external dependencies + ''' + return True + + +def ext_pillar(minion_id, pillar, *args, **kwargs): + ''' + Node definitions path will be retrieved from args - or set to default - + then added to 'salt_data' dict that is passed to the 'get_pillars' function. + 'salt_data' dict is a convenient way to pass all the required datas to the function + It contains: + - __opts__ + - __salt__ + - __grains__ + - __pillar__ + - minion_id + - path + + If successfull the function will return a pillar dict for minion_id + ''' + # If path has not been set, make a default + for i in args: + if 'path' not in i: + path = '/srv/saltclass' + args[i]['path'] = path + log.warning('path variable unset, using default: {0}'.format(path)) + else: + path = i['path'] + + # Create a dict that will contain our salt dicts to pass it to reclass + salt_data = { + '__opts__': __opts__, + '__salt__': __salt__, + '__grains__': __grains__, + '__pillar__': pillar, + 'minion_id': minion_id, + 'path': path + } + + return sc.get_pillars(minion_id, salt_data) diff --git a/salt/proxy/esxi.py b/salt/proxy/esxi.py index 4edd50ac31d..1599c381c67 100644 --- a/salt/proxy/esxi.py +++ b/salt/proxy/esxi.py @@ -273,13 +273,22 @@ for standing up an ESXi host from scratch. # Import Python Libs from __future__ import absolute_import import logging +import os # Import Salt Libs -from salt.exceptions import SaltSystemExit +from salt.exceptions import SaltSystemExit, InvalidConfigError +from salt.config.schemas.esxi import EsxiProxySchema +from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['esxi'] +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False # Variables are scoped to this module so we can have persistent data # across calls to fns in here. @@ -288,7 +297,6 @@ DETAILS = {} # Set up logging log = logging.getLogger(__file__) - # Define the module's virtual name __virtualname__ = 'esxi' @@ -297,7 +305,7 @@ def __virtual__(): ''' Only load if the ESXi execution module is available. ''' - if 'vsphere.system_info' in __salt__: + if HAS_JSONSCHEMA: return __virtualname__ return False, 'The ESXi Proxy Minion module did not load.' @@ -309,32 +317,104 @@ def init(opts): ESXi devices, the host, login credentials, and, if configured, the protocol and port are cached. ''' - if 'host' not in opts['proxy']: - log.critical('No \'host\' key found in pillar for this proxy.') - return False - if 'username' not in opts['proxy']: - log.critical('No \'username\' key found in pillar for this proxy.') - return False - if 'passwords' not in opts['proxy']: - log.critical('No \'passwords\' key found in pillar for this proxy.') - return False - - host = opts['proxy']['host'] - - # Get the correct login details + log.debug('Initting esxi proxy module in process \'{}\'' + ''.format(os.getpid())) + log.debug('Validating esxi proxy input') + schema = EsxiProxySchema.serialize() + log.trace('esxi_proxy_schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) try: - username, password = find_credentials(host) - except SaltSystemExit as err: - log.critical('Error: {0}'.format(err)) - return False + jsonschema.validate(proxy_conf, schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) - # Set configuration details - DETAILS['host'] = host - DETAILS['username'] = username - DETAILS['password'] = password - DETAILS['protocol'] = opts['proxy'].get('protocol', 'https') - DETAILS['port'] = opts['proxy'].get('port', '443') - DETAILS['credstore'] = opts['proxy'].get('credstore') + DETAILS['proxytype'] = proxy_conf['proxytype'] + if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf): + log.critical('Neither \'host\' nor \'vcenter\' keys found in pillar ' + 'for this proxy.') + return False + if 'host' in proxy_conf: + # We have started the proxy by connecting directly to the host + if 'username' not in proxy_conf: + log.critical('No \'username\' key found in pillar for this proxy.') + return False + if 'passwords' not in proxy_conf: + log.critical('No \'passwords\' key found in pillar for this proxy.') + return False + host = proxy_conf['host'] + + # Get the correct login details + try: + username, password = find_credentials(host) + except SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + + # Set configuration details + DETAILS['host'] = host + DETAILS['username'] = username + DETAILS['password'] = password + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') + return True + + if 'vcenter' in proxy_conf: + vcenter = proxy_conf['vcenter'] + if not proxy_conf.get('esxi_host'): + log.critical('No \'esxi_host\' key found in pillar for this proxy.') + DETAILS['esxi_host'] = proxy_conf['esxi_host'] + # We have started the proxy by connecting via the vCenter + if 'mechanism' not in proxy_conf: + log.critical('No \'mechanism\' key found in pillar for this proxy.') + return False + mechanism = proxy_conf['mechanism'] + # Save mandatory fields in cache + for key in ('vcenter', 'mechanism'): + DETAILS[key] = proxy_conf[key] + + if mechanism == 'userpass': + if 'username' not in proxy_conf: + log.critical('No \'username\' key found in pillar for this ' + 'proxy.') + return False + if 'passwords' not in proxy_conf and \ + len(proxy_conf['passwords']) > 0: + + log.critical('Mechanism is set to \'userpass\' , but no ' + '\'passwords\' key found in pillar for this ' + 'proxy.') + return False + for key in ('username', 'passwords'): + DETAILS[key] = proxy_conf[key] + elif mechanism == 'sspi': + if 'domain' not in proxy_conf: + log.critical('Mechanism is set to \'sspi\' , but no ' + '\'domain\' key found in pillar for this proxy.') + return False + if 'principal' not in proxy_conf: + log.critical('Mechanism is set to \'sspi\' , but no ' + '\'principal\' key found in pillar for this ' + 'proxy.') + return False + for key in ('domain', 'principal'): + DETAILS[key] = proxy_conf[key] + + if mechanism == 'userpass': + # Get the correct login details + log.debug('Retrieving credentials and testing vCenter connection' + ' for mehchanism \'userpass\'') + try: + username, password = find_credentials(DETAILS['vcenter']) + DETAILS['password'] = password + except SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + + # Save optional + DETAILS['protocol'] = proxy_conf.get('protocol', 'https') + DETAILS['port'] = proxy_conf.get('port', '443') + DETAILS['credstore'] = proxy_conf.get('credstore') def grains(): @@ -358,8 +438,9 @@ def grains_refresh(): def ping(): ''' - Check to see if the host is responding. Returns False if the host didn't - respond, True otherwise. + Returns True if connection is to be done via a vCenter (no connection is attempted). + Check to see if the host is responding when connecting directly via an ESXi + host. CLI Example: @@ -367,15 +448,19 @@ def ping(): salt esxi-host test.ping ''' - # find_credentials(DETAILS['host']) - try: - __salt__['vsphere.system_info'](host=DETAILS['host'], - username=DETAILS['username'], - password=DETAILS['password']) - except SaltSystemExit as err: - log.warning(err) - return False - + if DETAILS.get('esxi_host'): + return True + else: + # TODO Check connection if mechanism is SSPI + if DETAILS['mechanism'] == 'userpass': + find_credentials(DETAILS['host']) + try: + __salt__['vsphere.system_info'](host=DETAILS['host'], + username=DETAILS['username'], + password=DETAILS['password']) + except SaltSystemExit as err: + log.warning(err) + return False return True @@ -461,3 +546,14 @@ def _grains(host, protocol=None, port=None): port=port) GRAINS_CACHE.update(ret) return GRAINS_CACHE + + +def is_connected_via_vcenter(): + return True if 'vcenter' in DETAILS else False + + +def get_details(): + ''' + Return the proxy details + ''' + return DETAILS diff --git a/salt/proxy/vcenter.py b/salt/proxy/vcenter.py new file mode 100644 index 00000000000..5c5ad797d19 --- /dev/null +++ b/salt/proxy/vcenter.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +''' +Proxy Minion interface module for managing VMWare vCenters. + +:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)` +:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + +Dependencies +============ + +- pyVmomi Python Module + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original ESXi State +Module was developed against. + + +Configuration +============= +To use this proxy module, please use on of the following configurations: + + +.. code-block:: yaml + + proxy: + proxytype: vcenter + vcenter: + username: + mechanism: userpass + passwords: + - first_password + - second_password + - third_password + + proxy: + proxytype: vcenter + vcenter: + username: + domain: + mechanism: sspi + principal: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this Proxy Module, set this to +``vcenter``. + +vcenter +^^^^^^^ +The location of the VMware vCenter server (host of ip). Required + +username +^^^^^^^^ +The username used to login to the vcenter, such as ``root``. +Required only for userpass. + +mechanism +^^^^^^^^ +The mechanism used to connect to the vCenter server. Supported values are +``userpass`` and ``sspi``. Required. + +passwords +^^^^^^^^^ +A list of passwords to be used to try and login to the vCenter server. At least +one password in this list is required if mechanism is ``userpass`` + +The proxy integration will try the passwords listed in order. + +domain +^^^^^^ +User domain. Required if mechanism is ``sspi`` + +principal +^^^^^^^^ +Kerberos principal. Rquired if mechanism is ``sspi`` + +protocol +^^^^^^^^ +If the vCenter is not using the default protocol, set this value to an +alternate protocol. Default is ``https``. + +port +^^^^ +If the ESXi host is not using the default port, set this value to an +alternate port. Default is ``443``. + + +Salt Proxy +---------- + +After your pillar is in place, you can test the proxy. The proxy can run on +any machine that has network connectivity to your Salt Master and to the +vCenter server in the pillar. SaltStack recommends that the machine running the +salt-proxy process also run a regular minion, though it is not strictly +necessary. + +On the machine that will run the proxy, make sure there is an ``/etc/salt/proxy`` +file with at least the following in it: + +.. code-block:: yaml + + master: + +You can then start the salt-proxy process with: + +.. code-block:: bash + + salt-proxy --proxyid + +You may want to add ``-l debug`` to run the above in the foreground in +debug mode just to make sure everything is OK. + +Next, accept the key for the proxy on your salt-master, just like you +would for a regular minion: + +.. code-block:: bash + + salt-key -a + +You can confirm that the pillar data is in place for the proxy: + +.. code-block:: bash + + salt pillar.items + +And now you should be able to ping the ESXi host to make sure it is +responding: + +.. code-block:: bash + + salt test.ping + +At this point you can execute one-off commands against the vcenter. For +example, you can get if the proxy can actually connect to the vCenter: + +.. code-block:: bash + + salt vsphere.test_vcenter_connection + +Note that you don't need to provide credentials or an ip/hostname. Salt +knows to use the credentials you stored in Pillar. + +It's important to understand how this particular proxy works. +:mod:`Salt.modules.vsphere ` is a +standard Salt execution module. + + If you pull up the docs for it you'll see +that almost every function in the module takes credentials and a targets either +a vcenter or a host. When credentials and a host aren't passed, Salt runs commands +through ``pyVmomi`` against the local machine. If you wanted, you could run +functions from this module on any host where an appropriate version of +``pyVmomi`` is installed, and that host would reach out over the network +and communicate with the ESXi host. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import os + +# Import Salt Libs +import salt.exceptions +from salt.config.schemas.vcenter import VCenterProxySchema +from salt.utils.dictupdate import merge + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['vcenter'] + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Variables are scoped to this module so we can have persistent data +# across calls to fns in here. +DETAILS = {} + + +# Set up logging +log = logging.getLogger(__name__) +# Define the module's virtual name +__virtualname__ = 'vcenter' + + +def __virtual__(): + ''' + Only load if the vsphere execution module is available. + ''' + if HAS_JSONSCHEMA: + return __virtualname__ + + return False, 'The vcenter proxy module did not load.' + + +def init(opts): + ''' + This function gets called when the proxy starts up. + For login the protocol and port are cached. + ''' + log.info('Initting vcenter proxy module in process {0}' + ''.format(os.getpid())) + log.trace('VCenter Proxy Validating vcenter proxy input') + schema = VCenterProxySchema.serialize() + log.trace('schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) + try: + jsonschema.validate(proxy_conf, schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidConfigError(exc) + + # Save mandatory fields in cache + for key in ('vcenter', 'mechanism'): + DETAILS[key] = proxy_conf[key] + + # Additional validation + if DETAILS['mechanism'] == 'userpass': + if 'username' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\' , but no ' + '\'username\' key found in proxy config') + if 'passwords' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\' , but no ' + '\'passwords\' key found in proxy config') + for key in ('username', 'passwords'): + DETAILS[key] = proxy_conf[key] + else: + if 'domain' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\' , but no ' + '\'domain\' key found in proxy config') + if 'principal' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\' , but no ' + '\'principal\' key found in proxy config') + for key in ('domain', 'principal'): + DETAILS[key] = proxy_conf[key] + + # Save optional + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') + + # Test connection + if DETAILS['mechanism'] == 'userpass': + # Get the correct login details + log.info('Retrieving credentials and testing vCenter connection for ' + 'mehchanism \'userpass\'') + try: + username, password = find_credentials() + DETAILS['password'] = password + except salt.exceptions.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + return True + + +def ping(): + ''' + Returns True. + + CLI Example: + + .. code-block:: bash + + salt vcenter test.ping + ''' + return True + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('VCenter proxy shutdown() called...') + + +def find_credentials(): + ''' + Cycle through all the possible credentials and return the first one that + works. + ''' + + # if the username and password were already found don't fo though the + # connection process again + if 'username' in DETAILS and 'password' in DETAILS: + return DETAILS['username'], DETAILS['password'] + + passwords = __pillar__['proxy']['passwords'] + for password in passwords: + DETAILS['password'] = password + if not __salt__['vsphere.test_vcenter_connection'](): + # We are unable to authenticate + continue + # If we have data returned from above, we've successfully authenticated. + return DETAILS['username'], password + # We've reached the end of the list without successfully authenticating. + raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' + 'incorrect credentials.') + + +def get_details(): + ''' + Function that returns the cached details + ''' + return DETAILS diff --git a/salt/serializers/yaml.py b/salt/serializers/yaml.py index 2fad384d1bb..e893c3f3898 100644 --- a/salt/serializers/yaml.py +++ b/salt/serializers/yaml.py @@ -77,10 +77,25 @@ def serialize(obj, **options): raise SerializationError(error) +class EncryptedString(str): + + yaml_tag = u'!encrypted' + + @staticmethod + def yaml_constructor(loader, tag, node): + return EncryptedString(loader.construct_scalar(node)) + + @staticmethod + def yaml_dumper(dumper, data): + return dumper.represent_scalar(EncryptedString.yaml_tag, data.__str__()) + + class Loader(BaseLoader): # pylint: disable=W0232 '''Overwrites Loader as not for pollute legacy Loader''' pass + +Loader.add_multi_constructor(EncryptedString.yaml_tag, EncryptedString.yaml_constructor) Loader.add_multi_constructor('tag:yaml.org,2002:null', Loader.construct_yaml_null) Loader.add_multi_constructor('tag:yaml.org,2002:bool', Loader.construct_yaml_bool) Loader.add_multi_constructor('tag:yaml.org,2002:int', Loader.construct_yaml_int) @@ -100,6 +115,7 @@ class Dumper(BaseDumper): # pylint: disable=W0232 '''Overwrites Dumper as not for pollute legacy Dumper''' pass +Dumper.add_multi_representer(EncryptedString, EncryptedString.yaml_dumper) Dumper.add_multi_representer(type(None), Dumper.represent_none) Dumper.add_multi_representer(str, Dumper.represent_str) if six.PY2: diff --git a/salt/states/archive.py b/salt/states/archive.py index 2a1454f99da..8bea1e17b23 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -414,7 +414,7 @@ def extracted(name, .. versionadded:: 2017.7.3 keep : True - Same as ``keep_source``. + Same as ``keep_source``, kept for backward-compatibility. .. note:: If both ``keep_source`` and ``keep`` are used, ``keep`` will be @@ -648,6 +648,21 @@ def extracted(name, # Remove pub kwargs as they're irrelevant here. kwargs = salt.utils.args.clean_kwargs(**kwargs) + if 'keep_source' in kwargs and 'keep' in kwargs: + ret.setdefault('warnings', []).append( + 'Both \'keep_source\' and \'keep\' were used. Since these both ' + 'do the same thing, \'keep\' was ignored.' + ) + keep_source = bool(kwargs.pop('keep_source')) + kwargs.pop('keep') + elif 'keep_source' in kwargs: + keep_source = bool(kwargs.pop('keep_source')) + elif 'keep' in kwargs: + keep_source = bool(kwargs.pop('keep')) + else: + # Neither was passed, default is True + keep_source = True + if 'keep_source' in kwargs and 'keep' in kwargs: ret.setdefault('warnings', []).append( 'Both \'keep_source\' and \'keep\' were used. Since these both ' diff --git a/salt/states/boto_rds.py b/salt/states/boto_rds.py index c3bc7661556..c35eea58485 100644 --- a/salt/states/boto_rds.py +++ b/salt/states/boto_rds.py @@ -697,7 +697,10 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N changed = {} for items in parameters: for k, value in items.items(): - params[k] = value + if type(value) is bool: + params[k] = 'on' if value else 'off' + else: + params[k] = str(value) logging.debug('Parameters from user are : {0}.'.format(params)) options = __salt__['boto_rds.describe_parameters'](name=name, region=region, key=key, keyid=keyid, profile=profile) if not options.get('result'): @@ -705,8 +708,8 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N ret['comment'] = os.linesep.join([ret['comment'], 'Faled to get parameters for group {0}.'.format(name)]) return ret for parameter in options['parameters'].values(): - if parameter['ParameterName'] in params and str(params.get(parameter['ParameterName'])) != str(parameter['ParameterValue']): - logging.debug('Values that are being compared are {0}:{1} .'.format(params.get(parameter['ParameterName']), parameter['ParameterValue'])) + if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != str(parameter['ParameterValue']): + logging.debug('Values that are being compared for {0} are {1}:{2} .'.format(parameter['ParameterName'], params.get(parameter['ParameterName']), parameter['ParameterValue'])) changed[parameter['ParameterName']] = params.get(parameter['ParameterName']) if len(changed) > 0: if __opts__['test']: @@ -715,9 +718,9 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N return ret update = __salt__['boto_rds.update_parameter_group'](name, parameters=changed, apply_method=apply_method, tags=tags, region=region, key=key, keyid=keyid, profile=profile) - if not update: + if 'error' in update: ret['result'] = False - ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}.'.format(changed, name)]) + ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}:'.format(changed, name), update['error']['message']]) return ret ret['changes']['Parameters'] = changed ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are changed.'.format(changed, name)]) diff --git a/salt/states/dvs.py b/salt/states/dvs.py new file mode 100644 index 00000000000..6b44a84c387 --- /dev/null +++ b/salt/states/dvs.py @@ -0,0 +1,717 @@ +# -*- coding: utf-8 -*- +''' +Manage VMware distributed virtual switches (DVSs) and their distributed virtual +portgroups (DVportgroups). + +:codeauthor: :email:`Alexandru Bleotu ` + +Examples +======== + +Several settings can be changed for DVSs and DVporgroups. Here are two examples +covering all of the settings. Fewer settings can be used + +DVS +--- + +.. code-block:: python + + 'name': 'dvs1', + 'max_mtu': 1000, + 'uplink_names': [ + 'dvUplink1', + 'dvUplink2', + 'dvUplink3' + ], + 'capability': { + 'portgroup_operation_supported': false, + 'operation_supported': true, + 'port_operation_supported': false + }, + 'lacp_api_version': 'multipleLag', + 'contact_email': 'foo@email.com', + 'product_info': { + 'version': + '6.0.0', + 'vendor': + 'VMware, + Inc.', + 'name': + 'DVS' + }, + 'network_resource_management_enabled': true, + 'contact_name': 'me@email.com', + 'infrastructure_traffic_resource_pools': [ + { + 'reservation': 0, + 'limit': 1000, + 'share_level': 'high', + 'key': 'management', + 'num_shares': 100 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'faultTolerance', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': 32000, + 'share_level': 'normal', + 'key': 'vmotion', + 'num_shares': 50 + }, + { + 'reservation': 10000, + 'limit': -1, + 'share_level': 'normal', + 'key': 'virtualMachine', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'custom', + 'key': 'iSCSI', + 'num_shares': 75 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'nfs', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'hbr', + 'num_shares': 50 + }, + { + 'reservation': 8750, + 'limit': 15000, + 'share_level': 'high', + 'key': 'vsan', + 'num_shares': 100 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'vdp', + 'num_shares': 50 + } + ], + 'link_discovery_protocol': { + 'operation': + 'listen', + 'protocol': + 'cdp' + }, + 'network_resource_control_version': 'version3', + 'description': 'Managed by Salt. Random settings.' + +Note: The mandatory attribute is: ``name``. + +Portgroup +--------- + +.. code-block:: python + 'security_policy': { + 'allow_promiscuous': true, + 'mac_changes': false, + 'forged_transmits': true + }, + 'name': 'vmotion-v702', + 'out_shaping': { + 'enabled': true, + 'average_bandwidth': 1500, + 'burst_size': 4096, + 'peak_bandwidth': 1500 + }, + 'num_ports': 128, + 'teaming': { + 'port_order': { + 'active': [ + 'dvUplink2' + ], + 'standby': [ + 'dvUplink1' + ] + }, + 'notify_switches': false, + 'reverse_policy': true, + 'rolling_order': false, + 'policy': 'failover_explicit', + 'failure_criteria': { + 'check_error_percent': true, + 'full_duplex': false, + 'check_duplex': false, + 'percentage': 50, + 'check_speed': 'minimum', + 'speed': 20, + 'check_beacon': true + } + }, + 'type': 'earlyBinding', + 'vlan_id': 100, + 'description': 'Managed by Salt. Random settings.' + +Note: The mandatory attributes are: ``name``, ``type``. + +Dependencies +============ + + +- pyVmomi Python Module + + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9, + or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original ESXi State +Module was developed against. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import traceback +import sys + +# Import Salt Libs +import salt.exceptions +from salt.ext.six.moves import range + +# Import Third Party Libs +try: + from pyVmomi import VmomiSupport + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + if not HAS_PYVMOMI: + return False, 'State module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('State module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') + return 'dvs' + + +def mod_init(low): + ''' + Init function + ''' + return True + + +def _get_datacenter_name(): + ''' + Returns the datacenter name configured on the proxy + + Supported proxies: esxcluster, esxdatacenter + ''' + + proxy_type = __salt__['vsphere.get_proxy_type']() + details = None + if proxy_type == 'esxcluster': + details = __salt__['esxcluster.get_details']() + elif proxy_type == 'esxdatacenter': + details = __salt__['esxdatacenter.get_details']() + if not details: + raise salt.exceptions.CommandExecutionError( + 'details for proxy type \'{0}\' not loaded'.format(proxy_type)) + return details['datacenter'] + + +def dvs_configured(name, dvs): + ''' + Configures a DVS. + + Creates a new DVS, if it doesn't exist in the provided datacenter or + reconfigures it if configured differently. + + dvs + DVS dict representations (see module sysdocs) + ''' + datacenter_name = _get_datacenter_name() + dvs_name = dvs['name'] if dvs.get('name') else name + log.info('Running state {0} for DVS \'{1}\' in datacenter ' + '\'{2}\''.format(name, dvs_name, datacenter_name)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None} + comments = [] + changes = {} + changes_required = False + + try: + #TODO dvs validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name], + service_instance=si) + if not dvss: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create a new DVS ' + '\'{1}\' in datacenter \'{2}\'' + ''.format(name, dvs_name, datacenter_name)) + log.info(comments[-1]) + else: + dvs['name'] = dvs_name + __salt__['vsphere.create_dvs'](dvs_dict=dvs, + dvs_name=dvs_name, + service_instance=si) + comments.append('Created a new DVS \'{0}\' in datacenter ' + '\'{1}\''.format(dvs_name, datacenter_name)) + log.info(comments[-1]) + changes.update({'dvs': {'new': dvs}}) + else: + # DVS already exists. Checking various aspects of the config + props = ['description', 'contact_email', 'contact_name', + 'lacp_api_version', 'link_discovery_protocol', + 'max_mtu', 'network_resource_control_version', + 'network_resource_management_enabled'] + log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking ' + 'for any updates in ' + '{2}'.format(dvs_name, datacenter_name, props)) + props_to_original_values = {} + props_to_updated_values = {} + current_dvs = dvss[0] + for prop in props: + if prop in dvs and dvs[prop] != current_dvs.get(prop): + props_to_original_values[prop] = current_dvs.get(prop) + props_to_updated_values[prop] = dvs[prop] + + # Simple infrastructure traffic resource control compare doesn't + # work because num_shares is optional if share_level is not custom + # We need to do a dedicated compare for this property + infra_prop = 'infrastructure_traffic_resource_pools' + original_infra_res_pools = [] + updated_infra_res_pools = [] + if infra_prop in dvs: + if not current_dvs.get(infra_prop): + updated_infra_res_pools = dvs[infra_prop] + else: + for idx in range(len(dvs[infra_prop])): + if 'num_shares' not in dvs[infra_prop][idx] and \ + current_dvs[infra_prop][idx]['share_level'] != \ + 'custom' and \ + 'num_shares' in current_dvs[infra_prop][idx]: + + del current_dvs[infra_prop][idx]['num_shares'] + if dvs[infra_prop][idx] != \ + current_dvs[infra_prop][idx]: + + original_infra_res_pools.append( + current_dvs[infra_prop][idx]) + updated_infra_res_pools.append( + dict(dvs[infra_prop][idx])) + if updated_infra_res_pools: + props_to_original_values[ + 'infrastructure_traffic_resource_pools'] = \ + original_infra_res_pools + props_to_updated_values[ + 'infrastructure_traffic_resource_pools'] = \ + updated_infra_res_pools + if props_to_updated_values: + if __opts__['test']: + changes_string = '' + for p in props_to_updated_values: + if p == 'infrastructure_traffic_resource_pools': + changes_string += \ + '\tinfrastructure_traffic_resource_pools:\n' + for idx in range(len(props_to_updated_values[p])): + d = props_to_updated_values[p][idx] + s = props_to_original_values[p][idx] + changes_string += \ + ('\t\t{0} from \'{1}\' to \'{2}\'\n' + ''.format(d['key'], s, d)) + else: + changes_string += \ + ('\t{0} from \'{1}\' to \'{2}\'\n' + ''.format(p, props_to_original_values[p], + props_to_updated_values[p])) + comments.append( + 'State dvs_configured will update DVS \'{0}\' ' + 'in datacenter \'{1}\':\n{2}' + ''.format(dvs_name, datacenter_name, changes_string)) + log.info(comments[-1]) + else: + __salt__['vsphere.update_dvs']( + dvs_dict=props_to_updated_values, + dvs=dvs_name, + service_instance=si) + comments.append('Updated DVS \'{0}\' in datacenter \'{1}\'' + ''.format(dvs_name, datacenter_name)) + log.info(comments[-1]) + changes.update({'dvs': {'new': props_to_updated_values, + 'old': props_to_original_values}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': str(exc), + 'result': False if not __opts__['test'] else None}) + return ret + if not comments: + # We have no changes + ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is ' + 'correctly configured. Nothing to be done.' + ''.format(dvs_name, datacenter_name)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret + + +def _get_diff_dict(dict1, dict2): + ''' + Returns a dictionary with the diffs between two dictionaries + + It will ignore any key that doesn't exist in dict2 + ''' + ret_dict = {} + for p in dict2.keys(): + if p not in dict1: + ret_dict.update({p: {'val1': None, 'val2': dict2[p]}}) + elif dict1[p] != dict2[p]: + if isinstance(dict1[p], dict) and isinstance(dict2[p], dict): + sub_diff_dict = _get_diff_dict(dict1[p], dict2[p]) + if sub_diff_dict: + ret_dict.update({p: sub_diff_dict}) + else: + ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}}) + return ret_dict + + +def _get_val2_dict_from_diff_dict(diff_dict): + ''' + Returns a dictionaries with the values stored in val2 of a diff dict. + ''' + ret_dict = {} + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if 'val2' in diff_dict[p].keys(): + ret_dict.update({p: diff_dict[p]['val2']}) + else: + ret_dict.update( + {p: _get_val2_dict_from_diff_dict(diff_dict[p])}) + return ret_dict + + +def _get_val1_dict_from_diff_dict(diff_dict): + ''' + Returns a dictionaries with the values stored in val1 of a diff dict. + ''' + ret_dict = {} + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if 'val1' in diff_dict[p].keys(): + ret_dict.update({p: diff_dict[p]['val1']}) + else: + ret_dict.update( + {p: _get_val1_dict_from_diff_dict(diff_dict[p])}) + return ret_dict + + +def _get_changes_from_diff_dict(diff_dict): + ''' + Returns a list of string message of the differences in a diff dict. + + Each inner message is tabulated one tab deeper + ''' + changes_strings = [] + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if sorted(diff_dict[p].keys()) == ['val1', 'val2']: + # Some string formatting + from_str = diff_dict[p]['val1'] + if isinstance(diff_dict[p]['val1'], str): + from_str = '\'{0}\''.format(diff_dict[p]['val1']) + elif isinstance(diff_dict[p]['val1'], list): + from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1'])) + to_str = diff_dict[p]['val2'] + if isinstance(diff_dict[p]['val2'], str): + to_str = '\'{0}\''.format(diff_dict[p]['val2']) + elif isinstance(diff_dict[p]['val2'], list): + to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2'])) + changes_strings.append('{0} from {1} to {2}'.format( + p, from_str, to_str)) + else: + sub_changes = _get_changes_from_diff_dict(diff_dict[p]) + if sub_changes: + changes_strings.append('{0}:'.format(p)) + changes_strings.extend(['\t{0}'.format(c) + for c in sub_changes]) + return changes_strings + + +def portgroups_configured(name, dvs, portgroups): + ''' + Configures portgroups on a DVS. + + Creates/updates/removes portgroups in a provided DVS + + dvs + Name of the DVS + + portgroups + Portgroup dict representations (see module sysdocs) + ''' + datacenter = _get_datacenter_name() + log.info('Running state {0} on DVS \'{1}\', datacenter ' + '\'{2}\''.format(name, dvs, datacenter)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + + try: + #TODO portroups validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_pgs = __salt__['vsphere.list_dvportgroups']( + dvs=dvs, service_instance=si) + expected_pg_names = [] + for pg in portgroups: + pg_name = pg['name'] + expected_pg_names.append(pg_name) + del pg['name'] + log.info('Checking pg \'{0}\''.format(pg_name)) + filtered_current_pgs = \ + [p for p in current_pgs if p.get('name') == pg_name] + if not filtered_current_pgs: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create a new portgroup ' + '\'{1}\' in DVS \'{2}\', datacenter ' + '\'{3}\''.format(name, pg_name, dvs, + datacenter)) + else: + __salt__['vsphere.create_dvportgroup']( + portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs, + service_instance=si) + comments.append('Created a new portgroup \'{0}\' in DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(pg_name, dvs, datacenter)) + log.info(comments[-1]) + changes.update({pg_name: {'new': pg}}) + else: + # Porgroup already exists. Checking the config + log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter ' + '\'{2}\'. Checking for any updates.' + ''.format(pg_name, dvs, datacenter)) + current_pg = filtered_current_pgs[0] + diff_dict = _get_diff_dict(current_pg, pg) + + if diff_dict: + changes_required = True + if __opts__['test']: + changes_strings = \ + _get_changes_from_diff_dict(diff_dict) + log.trace('changes_strings = ' + '{0}'.format(changes_strings)) + comments.append( + 'State {0} will update portgroup \'{1}\' in ' + 'DVS \'{2}\', datacenter \'{3}\':\n{4}' + ''.format(name, pg_name, dvs, datacenter, + '\n'.join(['\t{0}'.format(c) for c in + changes_strings]))) + else: + __salt__['vsphere.update_dvportgroup']( + portgroup_dict=pg, portgroup=pg_name, dvs=dvs, + service_instance=si) + comments.append('Updated portgroup \'{0}\' in DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(pg_name, dvs, datacenter)) + log.info(comments[-1]) + changes.update( + {pg_name: {'new': + _get_val2_dict_from_diff_dict(diff_dict), + 'old': + _get_val1_dict_from_diff_dict(diff_dict)}}) + # Add the uplink portgroup to the expected pg names + uplink_pg = __salt__['vsphere.list_uplink_dvportgroup']( + dvs=dvs, service_instance=si) + expected_pg_names.append(uplink_pg['name']) + # Remove any extra portgroups + for current_pg in current_pgs: + if current_pg['name'] not in expected_pg_names: + changes_required = True + if __opts__['test']: + comments.append('State {0} will remove ' + 'the portgroup \'{1}\' from DVS \'{2}\', ' + 'datacenter \'{3}\'' + ''.format(name, current_pg['name'], dvs, + datacenter)) + else: + __salt__['vsphere.remove_dvportgroup']( + portgroup=current_pg['name'], dvs=dvs, + service_instance=si) + comments.append('Removed the portgroup \'{0}\' from DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(current_pg['name'], dvs, + datacenter)) + log.info(comments[-1]) + changes.update({current_pg['name']: + {'old': current_pg}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter ' + '\'{1}\' exist and are correctly configured. ' + 'Nothing to be done.'.format(dvs, datacenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret + + +def uplink_portgroup_configured(name, dvs, uplink_portgroup): + ''' + Configures the uplink portgroup on a DVS. The state assumes there is only + one uplink portgroup. + + dvs + Name of the DVS + + upling_portgroup + Uplink portgroup dict representations (see module sysdocs) + + ''' + datacenter = _get_datacenter_name() + log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\'' + ''.format(name, dvs, datacenter)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + + try: + #TODO portroups validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup']( + dvs=dvs, service_instance=si) + log.trace('current_uplink_portgroup = ' + '{0}'.format(current_uplink_portgroup)) + diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup) + if diff_dict: + changes_required = True + if __opts__['test']: + changes_strings = \ + _get_changes_from_diff_dict(diff_dict) + log.trace('changes_strings = ' + '{0}'.format(changes_strings)) + comments.append( + 'State {0} will update the ' + 'uplink portgroup in DVS \'{1}\', datacenter ' + '\'{2}\':\n{3}' + ''.format(name, dvs, datacenter, + '\n'.join(['\t{0}'.format(c) for c in + changes_strings]))) + else: + __salt__['vsphere.update_dvportgroup']( + portgroup_dict=uplink_portgroup, + portgroup=current_uplink_portgroup['name'], + dvs=dvs, + service_instance=si) + comments.append('Updated the uplink portgroup in DVS ' + '\'{0}\', datacenter \'{1}\'' + ''.format(dvs, datacenter)) + log.info(comments[-1]) + changes.update( + {'uplink_portgroup': + {'new': _get_val2_dict_from_diff_dict(diff_dict), + 'old': _get_val1_dict_from_diff_dict(diff_dict)}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter ' + '\'{1}\' is correctly configured. ' + 'Nothing to be done.'.format(dvs, datacenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 12240422e4d..3d723abce14 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -90,20 +90,47 @@ ESXi Proxy Minion, please refer to the configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. - ''' # Import Python Libs from __future__ import absolute_import import logging +import sys +import re # Import Salt Libs from salt.ext import six import salt.utils.files -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, InvalidConfigError, \ + VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ + ArgumentValueError +from salt.utils.decorators import depends +from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ + HostCacheSchema + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) +try: + from pyVmomi import VmomiSupport + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + log.error('pyVmomi not loaded: Incompatible versions ' + 'of Python. See Issue #29537.') + raise ImportError() + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + def __virtual__(): return 'esxi.cmd' in __salt__ @@ -998,6 +1025,577 @@ def syslog_configured(name, return ret +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +def diskgroups_configured(name, diskgroups, erase_disks=False): + ''' + Configures the disk groups to use for vsan. + + It will do the following: + (1) checks for if all disks in the diskgroup spec exist and errors if they + don't + (2) creates diskgroups with the correct disk configurations if diskgroup + (identified by the cache disk canonical name) doesn't exist + (3) adds extra capacity disks to the existing diskgroup + + State input example + ------------------- + + .. code:: python + + { + 'cache_scsi_addr': 'vmhba1:C0:T0:L0', + 'capacity_scsi_addrs': [ + 'vmhba2:C0:T0:L0', + 'vmhba3:C0:T0:L0', + 'vmhba4:C0:T0:L0', + ] + } + + name + Mandatory state name. + + diskgroups + Disk group representation containing scsi disk addresses. + Scsi addresses are expected for disks in the diskgroup: + + erase_disks + Specifies whether to erase all partitions on all disks member of the + disk group before the disk group is created. Default vaule is False. + ''' + proxy_details = __salt__['esxi.get_details']() + hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ + else proxy_details['esxi_host'] + log.info('Running state {0} for host \'{1}\''.format(name, hostname)) + # Variable used to return the result of the invocation + ret = {'name': name, 'result': None, 'changes': {}, + 'pchanges': {}, 'comments': None} + # Signals if errors have been encountered + errors = False + # Signals if changes are required + changes = False + comments = [] + diskgroup_changes = {} + si = None + try: + log.trace('Validating diskgroups_configured input') + schema = DiskGroupsDiskScsiAddressSchema.serialize() + try: + jsonschema.validate({'diskgroups': diskgroups, + 'erase_disks': erase_disks}, schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + si = __salt__['vsphere.get_service_instance_via_proxy']() + host_disks = __salt__['vsphere.list_disks'](service_instance=si) + if not host_disks: + raise VMwareObjectRetrievalError( + 'No disks retrieved from host \'{0}\''.format(hostname)) + scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} + log.trace('scsi_addr_to_disk_map = {0}'.format(scsi_addr_to_disk_map)) + existing_diskgroups = \ + __salt__['vsphere.list_diskgroups'](service_instance=si) + cache_disk_to_existing_diskgroup_map = \ + {dg['cache_disk']: dg for dg in existing_diskgroups} + except CommandExecutionError as err: + log.error('Error: {0}'.format(err)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False if not __opts__['test'] else None, + 'comment': str(err)}) + return ret + + # Iterate through all of the disk groups + for idx, dg in enumerate(diskgroups): + # Check for cache disk + if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: + comments.append('No cache disk with scsi address \'{0}\' was ' + 'found.'.format(dg['cache_scsi_addr'])) + log.error(comments[-1]) + errors = True + continue + + # Check for capacity disks + cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] + cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], + cache_disk_id) + bad_scsi_addrs = [] + capacity_disk_ids = [] + capacity_disk_displays = [] + for scsi_addr in dg['capacity_scsi_addrs']: + if scsi_addr not in scsi_addr_to_disk_map: + bad_scsi_addrs.append(scsi_addr) + continue + capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) + capacity_disk_displays.append( + '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) + if bad_scsi_addrs: + comments.append('Error in diskgroup #{0}: capacity disks with ' + 'scsi addresses {1} were not found.' + ''.format(idx, + ', '.join(['\'{0}\''.format(a) + for a in bad_scsi_addrs]))) + log.error(comments[-1]) + errors = True + continue + + if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): + # A new diskgroup needs to be created + log.trace('erase_disks = {0}'.format(erase_disks)) + if erase_disks: + if __opts__['test']: + comments.append('State {0} will ' + 'erase all disks of disk group #{1}; ' + 'cache disk: \'{2}\', ' + 'capacity disk(s): {3}.' + ''.format(name, idx, cache_disk_display, + ', '.join( + ['\'{}\''.format(a) for a in + capacity_disk_displays]))) + else: + # Erase disk group disks + for disk_id in [cache_disk_id] + capacity_disk_ids: + __salt__['vsphere.erase_disk_partitions']( + disk_id=disk_id, service_instance=si) + comments.append('Erased disks of diskgroup #{0}; ' + 'cache disk: \'{1}\', capacity disk(s): ' + '{2}'.format( + idx, cache_disk_display, + ', '.join(['\'{0}\''.format(a) for a in + capacity_disk_displays]))) + log.info(comments[-1]) + + if __opts__['test']: + comments.append('State {0} will create ' + 'the disk group #{1}; cache disk: \'{2}\', ' + 'capacity disk(s): {3}.' + .format(name, idx, cache_disk_display, + ', '.join(['\'{0}\''.format(a) for a in + capacity_disk_displays]))) + log.info(comments[-1]) + changes = True + continue + try: + __salt__['vsphere.create_diskgroup'](cache_disk_id, + capacity_disk_ids, + safety_checks=False, + service_instance=si) + except VMwareSaltError as err: + comments.append('Error creating disk group #{0}: ' + '{1}.'.format(idx, err)) + log.error(comments[-1]) + errors = True + continue + + comments.append('Created disk group #\'{0}\'.'.format(idx)) + log.info(comments[-1]) + diskgroup_changes[str(idx)] = \ + {'new': {'cache': cache_disk_display, + 'capacity': capacity_disk_displays}} + changes = True + continue + + # The diskgroup exists; checking the capacity disks + log.debug('Disk group #{0} exists. Checking capacity disks: ' + '{1}.'.format(idx, capacity_disk_displays)) + existing_diskgroup = \ + cache_disk_to_existing_diskgroup_map.get(cache_disk_id) + existing_capacity_disk_displays = \ + ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0], disk_id) + for disk_id in existing_diskgroup['capacity_disks']] + # Populate added disks and removed disks and their displays + added_capacity_disk_ids = [] + added_capacity_disk_displays = [] + removed_capacity_disk_ids = [] + removed_capacity_disk_displays = [] + for disk_id in capacity_disk_ids: + if disk_id not in existing_diskgroup['capacity_disks']: + disk_scsi_addr = [d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0] + added_capacity_disk_ids.append(disk_id) + added_capacity_disk_displays.append( + '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) + for disk_id in existing_diskgroup['capacity_disks']: + if disk_id not in capacity_disk_ids: + disk_scsi_addr = [d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0] + removed_capacity_disk_ids.append(disk_id) + removed_capacity_disk_displays.append( + '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) + + log.debug('Disk group #{0}: existing capacity disk ids: {1}; added ' + 'capacity disk ids: {2}; removed capacity disk ids: {3}' + ''.format(idx, existing_capacity_disk_displays, + added_capacity_disk_displays, + removed_capacity_disk_displays)) + + #TODO revisit this when removing capacity disks is supported + if removed_capacity_disk_ids: + comments.append( + 'Error removing capacity disk(s) {0} from disk group #{1}; ' + 'operation is not supported.' + ''.format(', '.join(['\'{0}\''.format(id) for id in + removed_capacity_disk_displays]), idx)) + log.error(comments[-1]) + errors = True + continue + + if added_capacity_disk_ids: + # Capacity disks need to be added to disk group + + # Building a string representation of the capacity disks + # that need to be added + s = ', '.join(['\'{0}\''.format(id) for id in + added_capacity_disk_displays]) + if __opts__['test']: + comments.append('State {0} will add ' + 'capacity disk(s) {1} to disk group #{2}.' + ''.format(name, s, idx)) + log.info(comments[-1]) + changes = True + continue + try: + __salt__['vsphere.add_capacity_to_diskgroup']( + cache_disk_id, + added_capacity_disk_ids, + safety_checks=False, + service_instance=si) + except VMwareSaltError as err: + comments.append('Error adding capacity disk(s) {0} to ' + 'disk group #{1}: {2}.'.format(s, idx, err)) + log.error(comments[-1]) + errors = True + continue + + com = ('Added capacity disk(s) {0} to disk group #{1}' + ''.format(s, idx)) + log.info(com) + comments.append(com) + diskgroup_changes[str(idx)] = \ + {'new': {'cache': cache_disk_display, + 'capacity': capacity_disk_displays}, + 'old': {'cache': cache_disk_display, + 'capacity': existing_capacity_disk_displays}} + changes = True + continue + + # No capacity needs to be added + s = ('Disk group #{0} is correctly configured. Nothing to be done.' + ''.format(idx)) + log.info(s) + comments.append(s) + __salt__['vsphere.disconnect'](si) + + #Build the final return message + result = (True if not (changes or errors) else # no changes/errors + None if __opts__['test'] else # running in test mode + False if errors else True) # found errors; defaults to True + ret.update({'result': result, + 'comment': '\n'.join(comments)}) + if changes: + if __opts__['test']: + ret['pchanges'] = diskgroup_changes + elif changes: + ret['changes'] = diskgroup_changes + return ret + + +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +def host_cache_configured(name, enabled, datastore, swap_size='100%', + dedicated_backing_disk=False, + erase_backing_disk=False): + ''' + Configures the host cache used for swapping. + + It will do the following: + (1) checks if backing disk exists + (2) creates the VMFS datastore if doesn't exist (datastore partition will + be created and use the entire disk + (3) raises an error if dedicated_backing_disk is True and partitions + already exist on the backing disk + (4) configures host_cache to use a portion of the datastore for caching + (either a specific size or a percentage of the datastore) + + State input examples + -------------------- + + Percentage swap size (can't be 100%) + + .. code:: python + + { + 'enabled': true, + 'datastore': { + 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', + 'vmfs_version': 5, + 'name': 'hostcache' + } + 'dedicated_backing_disk': false + 'swap_size': '98%', + } + + + .. code:: python + + Fixed sized swap size + + { + 'enabled': true, + 'datastore': { + 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', + 'vmfs_version': 5, + 'name': 'hostcache' + } + 'dedicated_backing_disk': true + 'swap_size': '10GiB', + } + + name + Mandatory state name. + + enabled + Specifies whether the host cache is enabled. + + datastore + Specifies the host cache datastore. + + swap_size + Specifies the size of the host cache swap. Can be a percentage or a + value in GiB. Default value is ``100%``. + + dedicated_backing_disk + Specifies whether the backing disk is dedicated to the host cache which + means it must have no other partitions. Default is False + + erase_backing_disk + Specifies whether to erase all partitions on the backing disk before + the datastore is created. Default vaule is False. + ''' + log.trace('enabled = {0}'.format(enabled)) + log.trace('datastore = {0}'.format(datastore)) + log.trace('swap_size = {0}'.format(swap_size)) + log.trace('erase_backing_disk = {0}'.format(erase_backing_disk)) + # Variable used to return the result of the invocation + proxy_details = __salt__['esxi.get_details']() + hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ + else proxy_details['esxi_host'] + log.trace('hostname = {0}'.format(hostname)) + log.info('Running host_cache_swap_configured for host ' + '\'{0}\''.format(hostname)) + ret = {'name': hostname, 'comment': 'Default comments', + 'result': None, 'changes': {}, 'pchanges': {}} + result = None if __opts__['test'] else True # We assume success + needs_setting = False + comments = [] + changes = {} + si = None + try: + log.debug('Validating host_cache_configured input') + schema = HostCacheSchema.serialize() + try: + jsonschema.validate({'enabled': enabled, + 'datastore': datastore, + 'swap_size': swap_size, + 'erase_backing_disk': erase_backing_disk}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + m = re.match(r'(\d+)(%|GiB)', swap_size) + swap_size_value = int(m.group(1)) + swap_type = m.group(2) + log.trace('swap_size_value = {0}; swap_type = {1}'.format( + swap_size_value, swap_type)) + si = __salt__['vsphere.get_service_instance_via_proxy']() + host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) + + # Check enabled + if host_cache['enabled'] != enabled: + changes.update({'enabled': {'old': host_cache['enabled'], + 'new': enabled}}) + needs_setting = True + + # Check datastores + existing_datastores = None + if host_cache.get('datastore'): + existing_datastores = \ + __salt__['vsphere.list_datastores_via_proxy']( + datastore_names=[datastore['name']], + service_instance=si) + # Retrieve backing disks + existing_disks = __salt__['vsphere.list_disks']( + scsi_addresses=[datastore['backing_disk_scsi_addr']], + service_instance=si) + if not existing_disks: + raise VMwareObjectRetrievalError( + 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' + ''.format(datastore['backing_disk_scsi_addr'], hostname)) + backing_disk = existing_disks[0] + backing_disk_display = '{0} (id:{1})'.format( + backing_disk['scsi_address'], backing_disk['id']) + log.trace('backing_disk = {0}'.format(backing_disk_display)) + + existing_datastore = None + if not existing_datastores: + # Check if disk needs to be erased + if erase_backing_disk: + if __opts__['test']: + comments.append('State {0} will erase ' + 'the backing disk \'{1}\' on host \'{2}\'.' + ''.format(name, backing_disk_display, + hostname)) + log.info(comments[-1]) + else: + # Erase disk + __salt__['vsphere.erase_disk_partitions']( + disk_id=backing_disk['id'], service_instance=si) + comments.append('Erased backing disk \'{0}\' on host ' + '\'{1}\'.'.format(backing_disk_display, + hostname)) + log.info(comments[-1]) + # Create the datastore + if __opts__['test']: + comments.append('State {0} will create ' + 'the datastore \'{1}\', with backing disk ' + '\'{2}\', on host \'{3}\'.' + ''.format(name, datastore['name'], + backing_disk_display, hostname)) + log.info(comments[-1]) + else: + if dedicated_backing_disk: + # Check backing disk doesn't already have partitions + partitions = __salt__['vsphere.list_disk_partitions']( + disk_id=backing_disk['id'], service_instance=si) + log.trace('partitions = {0}'.format(partitions)) + # We will ignore the mbr partitions + non_mbr_partitions = [p for p in partitions + if p['format'] != 'mbr'] + if len(non_mbr_partitions) > 0: + raise VMwareApiError( + 'Backing disk \'{0}\' has unexpected partitions' + ''.format(backing_disk_display)) + __salt__['vsphere.create_vmfs_datastore']( + datastore['name'], existing_disks[0]['id'], + datastore['vmfs_version'], service_instance=si) + comments.append('Created vmfs datastore \'{0}\', backed by ' + 'disk \'{1}\', on host \'{2}\'.' + ''.format(datastore['name'], + backing_disk_display, hostname)) + log.info(comments[-1]) + changes.update( + {'datastore': + {'new': {'name': datastore['name'], + 'backing_disk': backing_disk_display}}}) + existing_datastore = \ + __salt__['vsphere.list_datastores_via_proxy']( + datastore_names=[datastore['name']], + service_instance=si)[0] + needs_setting = True + else: + # Check datastore is backed by the correct disk + if not existing_datastores[0].get('backing_disk_ids'): + raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' + 'backing disk' + ''.format(datastore['name'])) + if backing_disk['id'] not in \ + existing_datastores[0]['backing_disk_ids']: + + raise VMwareSaltError( + 'Datastore \'{0}\' is not backed by the correct disk: ' + 'expected \'{1}\'; got {2}' + ''.format( + datastore['name'], backing_disk['id'], + ', '.join( + ['\'{0}\''.format(disk) for disk in + existing_datastores[0]['backing_disk_ids']]))) + + comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' + 'and is backed by disk \'{2}\'. Nothing to be ' + 'done.'.format(datastore['name'], hostname, + backing_disk_display)) + existing_datastore = existing_datastores[0] + log.trace('existing_datastore = {0}'.format(existing_datastore)) + log.info(comments[-1]) + + if existing_datastore: + # The following comparisons can be done if the existing_datastore + # is set; it may not be set if running in test mode + # + # We support percent, as well as MiB, we will convert the size + # to MiB, multiples of 1024 (VMware SDK limitation) + if swap_type == '%': + # Percentage swap size + # Convert from bytes to MiB + raw_size_MiB = (swap_size_value/100.0) * \ + (existing_datastore['capacity']/1024/1024) + else: + raw_size_MiB = swap_size_value * 1024 + log.trace('raw_size = {0}MiB'.format(raw_size_MiB)) + swap_size_MiB = int(raw_size_MiB/1024)*1024 + log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB)) + existing_swap_size_MiB = 0 + m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ + host_cache.get('swap_size') else None + if m: + # if swap_size from the host is set and has an expected value + # we are going to parse it to get the number of MiBs + existing_swap_size_MiB = int(m.group(1)) + if not existing_swap_size_MiB == swap_size_MiB: + needs_setting = True + changes.update( + {'swap_size': + {'old': '{}GiB'.format(existing_swap_size_MiB/1024), + 'new': '{}GiB'.format(swap_size_MiB/1024)}}) + + if needs_setting: + if __opts__['test']: + comments.append('State {0} will configure ' + 'the host cache on host \'{1}\' to: {2}.' + ''.format(name, hostname, + {'enabled': enabled, + 'datastore_name': datastore['name'], + 'swap_size': swap_size})) + else: + if (existing_datastore['capacity'] / 1024.0**2) < \ + swap_size_MiB: + + raise ArgumentValueError( + 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' + 'smaller than the required swap size ({2} MiB)' + ''.format(existing_datastore['name'], + existing_datastore['capacity'] / 1024.0**2, + swap_size_MiB)) + __salt__['vsphere.configure_host_cache']( + enabled, + datastore['name'], + swap_size_MiB=swap_size_MiB, + service_instance=si) + comments.append('Host cache configured on host ' + '\'{0}\'.'.format(hostname)) + else: + comments.append('Host cache on host \'{0}\' is already correctly ' + 'configured. Nothing to be done.'.format(hostname)) + result = True + __salt__['vsphere.disconnect'](si) + log.info(comments[-1]) + ret.update({'comment': '\n'.join(comments), + 'result': result}) + if __opts__['test']: + ret['pchanges'] = changes + else: + ret['changes'] = changes + return ret + except CommandExecutionError as err: + log.error('Error: {0}.'.format(err)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False if not __opts__['test'] else None, + 'comment': '{}.'.format(err)}) + return ret + + def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from diff --git a/salt/states/file.py b/salt/states/file.py index 1d89feb2957..7724acf5c41 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6637,6 +6637,28 @@ def cached(name, else: pre_hash = None + def _try_cache(path, checksum): + ''' + This helper is not needed anymore in develop as the fileclient in the + develop branch now has means of skipping a download if the existing + hash matches one passed to cp.cache_file. Remove this helper and the + code that invokes it, once we have merged forward into develop. + ''' + if not path or not checksum: + return True + form = salt.utils.files.HASHES_REVMAP.get(len(checksum)) + if form is None: + # Shouldn't happen, an invalid checksum length should be caught + # before we get here. But in the event this gets through, don't let + # it cause any trouble, and just return True. + return True + try: + return salt.utils.get_hash(path, form=form) != checksum + except (IOError, OSError, ValueError): + # Again, shouldn't happen, but don't let invalid input/permissions + # in the call to get_hash blow this up. + return True + # Cache the file. Note that this will not actually download the file if # either of the following is true: # 1. source is a salt:// URL and the fileserver determines that the hash @@ -6645,14 +6667,18 @@ def cached(name, # matches the cached copy. # Remote, non salt:// sources _will_ download if a copy of the file was # not already present in the minion cache. - try: - local_copy = __salt__['cp.cache_file']( - name, - saltenv=saltenv, - source_hash=source_sum.get('hsum')) - except Exception as exc: - ret['comment'] = exc.__str__() - return ret + if _try_cache(local_copy, source_sum.get('hsum')): + # The _try_cache helper is obsolete in the develop branch. Once merged + # forward, remove the helper as well as this if statement, and dedent + # the below block. + try: + local_copy = __salt__['cp.cache_file']( + name, + saltenv=saltenv, + source_hash=source_sum.get('hsum')) + except Exception as exc: + ret['comment'] = exc.__str__() + return ret if not local_copy: ret['comment'] = ( diff --git a/salt/states/heat.py b/salt/states/heat.py index caf549f4333..bb8e6045dca 100644 --- a/salt/states/heat.py +++ b/salt/states/heat.py @@ -79,8 +79,6 @@ def _construct_yaml_str(self, node): Construct for yaml ''' return self.construct_scalar(node) -YamlLoader.add_constructor(u'tag:yaml.org,2002:str', - _construct_yaml_str) YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp', _construct_yaml_str) diff --git a/salt/states/pbm.py b/salt/states/pbm.py new file mode 100644 index 00000000000..00945fc65cf --- /dev/null +++ b/salt/states/pbm.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +''' +Manages VMware storage policies +(called pbm because the vCenter endpoint is /pbm) + +Examples +======== + +Storage policy +-------------- + +.. code-block:: python + +{ + "name": "salt_storage_policy" + "description": "Managed by Salt. Random capability values.", + "resource_type": "STORAGE", + "subprofiles": [ + { + "capabilities": [ + { + "setting": { + "type": "scalar", + "value": 2 + }, + "namespace": "VSAN", + "id": "hostFailuresToTolerate" + }, + { + "setting": { + "type": "scalar", + "value": 2 + }, + "namespace": "VSAN", + "id": "stripeWidth" + }, + { + "setting": { + "type": "scalar", + "value": true + }, + "namespace": "VSAN", + "id": "forceProvisioning" + }, + { + "setting": { + "type": "scalar", + "value": 50 + }, + "namespace": "VSAN", + "id": "proportionalCapacity" + }, + { + "setting": { + "type": "scalar", + "value": 0 + }, + "namespace": "VSAN", + "id": "cacheReservation" + } + ], + "name": "Rule-Set 1: VSAN", + "force_provision": null + } + ], +} + +Dependencies +============ + + +- pyVmomi Python Module + + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import copy +import sys + +# Import Salt Libs +from salt.exceptions import CommandExecutionError, ArgumentValueError +from salt.utils.dictdiffer import recursive_diff +from salt.utils.listdiffer import list_diff + +# External libraries +try: + from pyVmomi import VmomiSupport + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + if not HAS_PYVMOMI: + return False, 'State module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('State module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') + return True + + +def mod_init(low): + ''' + Init function + ''' + return True + + +def default_vsan_policy_configured(name, policy): + ''' + Configures the default VSAN policy on a vCenter. + The state assumes there is only one default VSAN policy on a vCenter. + + policy + Dict representation of a policy + ''' + # TODO Refactor when recurse_differ supports list_differ + # It's going to make the whole thing much easier + policy_copy = copy.deepcopy(policy) + proxy_type = __salt__['vsphere.get_proxy_type']() + log.trace('proxy_type = {0}'.format(proxy_type)) + # All allowed proxies have a shim execution module with the same + # name which implementes a get_details function + # All allowed proxies have a vcenter detail + vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter'] + log.info('Running {0} on vCenter ' + '\'{1}\''.format(name, vcenter)) + log.trace('policy = {0}'.format(policy)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + si = None + + try: + #TODO policy schema validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_policy = __salt__['vsphere.list_default_vsan_policy'](si) + log.trace('current_policy = {0}'.format(current_policy)) + # Building all diffs between the current and expected policy + # XXX We simplify the comparison by assuming we have at most 1 + # sub_profile + if policy.get('subprofiles'): + if len(policy['subprofiles']) > 1: + raise ArgumentValueError('Multiple sub_profiles ({0}) are not ' + 'supported in the input policy') + subprofile = policy['subprofiles'][0] + current_subprofile = current_policy['subprofiles'][0] + capabilities_differ = list_diff(current_subprofile['capabilities'], + subprofile.get('capabilities', []), + key='id') + del policy['subprofiles'] + if subprofile.get('capabilities'): + del subprofile['capabilities'] + del current_subprofile['capabilities'] + # Get the subprofile diffs without the capability keys + subprofile_differ = recursive_diff(current_subprofile, + dict(subprofile)) + + del current_policy['subprofiles'] + policy_differ = recursive_diff(current_policy, policy) + if policy_differ.diffs or capabilities_differ.diffs or \ + subprofile_differ.diffs: + + if 'name' in policy_differ.new_values or \ + 'description' in policy_differ.new_values: + + raise ArgumentValueError( + '\'name\' and \'description\' of the default VSAN policy ' + 'cannot be updated') + changes_required = True + if __opts__['test']: + str_changes = [] + if policy_differ.diffs: + str_changes.extend([change for change in + policy_differ.changes_str.split('\n')]) + if subprofile_differ.diffs or capabilities_differ.diffs: + str_changes.append('subprofiles:') + if subprofile_differ.diffs: + str_changes.extend( + [' {0}'.format(change) for change in + subprofile_differ.changes_str.split('\n')]) + if capabilities_differ.diffs: + str_changes.append(' capabilities:') + str_changes.extend( + [' {0}'.format(change) for change in + capabilities_differ.changes_str2.split('\n')]) + comments.append( + 'State {0} will update the default VSAN policy on ' + 'vCenter \'{1}\':\n{2}' + ''.format(name, vcenter, '\n'.join(str_changes))) + else: + __salt__['vsphere.update_storage_policy']( + policy=current_policy['name'], + policy_dict=policy_copy, + service_instance=si) + comments.append('Updated the default VSAN policy in vCenter ' + '\'{0}\''.format(vcenter)) + log.info(comments[-1]) + + new_values = policy_differ.new_values + new_values['subprofiles'] = [subprofile_differ.new_values] + new_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.new_values + if not new_values['subprofiles'][0]['capabilities']: + del new_values['subprofiles'][0]['capabilities'] + if not new_values['subprofiles'][0]: + del new_values['subprofiles'] + old_values = policy_differ.old_values + old_values['subprofiles'] = [subprofile_differ.old_values] + old_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.old_values + if not old_values['subprofiles'][0]['capabilities']: + del old_values['subprofiles'][0]['capabilities'] + if not old_values['subprofiles'][0]: + del old_values['subprofiles'] + changes.update({'default_vsan_policy': + {'new': new_values, + 'old': old_values}}) + log.trace(changes) + __salt__['vsphere.disconnect'](si) + except CommandExecutionError as exc: + log.error('Error: {}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('Default VSAN policy in vCenter ' + '\'{0}\' is correctly configured. ' + 'Nothing to be done.'.format(vcenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret + + +def storage_policies_configured(name, policies): + ''' + Configures storage policies on a vCenter. + + policies + List of dict representation of the required storage policies + ''' + comments = [] + changes = [] + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + log.trace('policies = {0}'.format(policies)) + si = None + try: + proxy_type = __salt__['vsphere.get_proxy_type']() + log.trace('proxy_type = {0}'.format(proxy_type)) + # All allowed proxies have a shim execution module with the same + # name which implementes a get_details function + # All allowed proxies have a vcenter detail + vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter'] + log.info('Running state \'{0}\' on vCenter ' + '\'{1}\''.format(name, vcenter)) + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_policies = __salt__['vsphere.list_storage_policies']( + policy_names=[policy['name'] for policy in policies], + service_instance=si) + log.trace('current_policies = {0}'.format(current_policies)) + # TODO Refactor when recurse_differ supports list_differ + # It's going to make the whole thing much easier + for policy in policies: + policy_copy = copy.deepcopy(policy) + filtered_policies = [p for p in current_policies + if p['name'] == policy['name']] + current_policy = filtered_policies[0] \ + if filtered_policies else None + + if not current_policy: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create the storage policy ' + '\'{1}\' on vCenter \'{2}\'' + ''.format(name, policy['name'], vcenter)) + else: + __salt__['vsphere.create_storage_policy']( + policy['name'], policy, service_instance=si) + comments.append('Created storage policy \'{0}\' on ' + 'vCenter \'{1}\''.format(policy['name'], + vcenter)) + changes.append({'new': policy, 'old': None}) + log.trace(comments[-1]) + # Continue with next + continue + + # Building all diffs between the current and expected policy + # XXX We simplify the comparison by assuming we have at most 1 + # sub_profile + if policy.get('subprofiles'): + if len(policy['subprofiles']) > 1: + raise ArgumentValueError('Multiple sub_profiles ({0}) are not ' + 'supported in the input policy') + subprofile = policy['subprofiles'][0] + current_subprofile = current_policy['subprofiles'][0] + capabilities_differ = list_diff(current_subprofile['capabilities'], + subprofile.get('capabilities', []), + key='id') + del policy['subprofiles'] + if subprofile.get('capabilities'): + del subprofile['capabilities'] + del current_subprofile['capabilities'] + # Get the subprofile diffs without the capability keys + subprofile_differ = recursive_diff(current_subprofile, + dict(subprofile)) + + del current_policy['subprofiles'] + policy_differ = recursive_diff(current_policy, policy) + if policy_differ.diffs or capabilities_differ.diffs or \ + subprofile_differ.diffs: + + changes_required = True + if __opts__['test']: + str_changes = [] + if policy_differ.diffs: + str_changes.extend( + [change for change in + policy_differ.changes_str.split('\n')]) + if subprofile_differ.diffs or \ + capabilities_differ.diffs: + + str_changes.append('subprofiles:') + if subprofile_differ.diffs: + str_changes.extend( + [' {0}'.format(change) for change in + subprofile_differ.changes_str.split('\n')]) + if capabilities_differ.diffs: + str_changes.append(' capabilities:') + str_changes.extend( + [' {0}'.format(change) for change in + capabilities_differ.changes_str2.split('\n')]) + comments.append( + 'State {0} will update the storage policy \'{1}\'' + ' on vCenter \'{2}\':\n{3}' + ''.format(name, policy['name'], vcenter, + '\n'.join(str_changes))) + else: + __salt__['vsphere.update_storage_policy']( + policy=current_policy['name'], + policy_dict=policy_copy, + service_instance=si) + comments.append('Updated the storage policy \'{0}\'' + 'in vCenter \'{1}\'' + ''.format(policy['name'], vcenter)) + log.info(comments[-1]) + + # Build new/old values to report what was changed + new_values = policy_differ.new_values + new_values['subprofiles'] = [subprofile_differ.new_values] + new_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.new_values + if not new_values['subprofiles'][0]['capabilities']: + del new_values['subprofiles'][0]['capabilities'] + if not new_values['subprofiles'][0]: + del new_values['subprofiles'] + old_values = policy_differ.old_values + old_values['subprofiles'] = [subprofile_differ.old_values] + old_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.old_values + if not old_values['subprofiles'][0]['capabilities']: + del old_values['subprofiles'][0]['capabilities'] + if not old_values['subprofiles'][0]: + del old_values['subprofiles'] + changes.append({'new': new_values, + 'old': old_values}) + else: + # No diffs found - no updates required + comments.append('Storage policy \'{0}\' is up to date. ' + 'Nothing to be done.'.format(policy['name'])) + __salt__['vsphere.disconnect'](si) + except CommandExecutionError as exc: + log.error('Error: {0}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('All storage policy in vCenter ' + '\'{0}\' is correctly configured. ' + 'Nothing to be done.'.format(vcenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': {'storage_policies': changes}, + 'result': None}) + else: + ret.update({'changes': {'storage_policies': changes}, + 'result': True}) + return ret + + +def default_storage_policy_assigned(name, policy, datastore): + ''' + Assigns a default storage policy to a datastore + + policy + Name of storage policy + + datastore + Name of datastore + ''' + log.info('Running state {0} for policy \'{1}\', datastore \'{2}\'.' + ''.format(name, policy, datastore)) + changes = {} + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + si = None + try: + si = __salt__['vsphere.get_service_instance_via_proxy']() + existing_policy = \ + __salt__['vsphere.list_default_storage_policy_of_datastore']( + datastore=datastore, service_instance=si) + if existing_policy['name'] == policy: + comment = ('Storage policy \'{0}\' is already assigned to ' + 'datastore \'{1}\'. Nothing to be done.' + ''.format(policy, datastore)) + else: + changes_required = True + changes = { + 'default_storage_policy': {'old': existing_policy['name'], + 'new': policy}} + if __opts__['test']: + comment = ('State {0} will assign storage policy \'{1}\' to ' + 'datastore \'{2}\'.').format(name, policy, + datastore) + else: + __salt__['vsphere.assign_default_storage_policy_to_datastore']( + policy=policy, datastore=datastore, service_instance=si) + comment = ('Storage policy \'{0} was assigned to datastore ' + '\'{1}\'.').format(policy, name) + log.info(comment) + except CommandExecutionError as exc: + log.error('Error: {}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + ret['comment'] = comment + if changes_required: + if __opts__['test']: + ret.update({'result': None, + 'pchanges': changes}) + else: + ret.update({'result': True, + 'changes': changes}) + else: + ret['result'] = True + return ret diff --git a/salt/states/win_wua.py b/salt/states/win_wua.py index ab43b656544..798853d5ca7 100644 --- a/salt/states/win_wua.py +++ b/salt/states/win_wua.py @@ -84,10 +84,12 @@ def installed(name, updates=None): Args: - name (str): The identifier of a single update to install. + name (str): + The identifier of a single update to install. - updates (list): A list of identifiers for updates to be installed. - Overrides ``name``. Default is None. + updates (list): + A list of identifiers for updates to be installed. Overrides + ``name``. Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method @@ -121,7 +123,7 @@ def installed(name, updates=None): # Install multiple updates install_updates: wua.installed: - - name: + - updates: - KB3194343 - 28cf1b09-2b1a-458c-9bd1-971d1b26b211 ''' @@ -215,10 +217,12 @@ def removed(name, updates=None): Args: - name (str): The identifier of a single update to uninstall. + name (str): + The identifier of a single update to uninstall. - updates (list): A list of identifiers for updates to be removed. - Overrides ``name``. Default is None. + updates (list): + A list of identifiers for updates to be removed. Overrides ``name``. + Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method @@ -329,3 +333,172 @@ def removed(name, updates=None): ret['comment'] = 'Updates removed successfully' return ret + + +def uptodate(name, + software=True, + drivers=False, + skip_hidden=False, + skip_mandatory=False, + skip_reboot=True, + categories=None, + severities=None,): + ''' + Ensure Microsoft Updates that match the passed criteria are installed. + Updates will be downloaded if needed. + + This state allows you to update a system without specifying a specific + update to apply. All matching updates will be installed. + + Args: + + name (str): + The name has no functional value and is only used as a tracking + reference + + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) + + skip_hidden (bool): + Skip updates that have been hidden. Default is False. + + skip_mandatory (bool): + Skip mandatory updates. Default is False. + + skip_reboot (bool): + Skip updates that require a reboot. Default is True. + + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. + + Categories include the following: + + * Critical Updates + * Definition Updates + * Drivers (make sure you set drivers=True) + * Feature Packs + * Security Updates + * Update Rollups + * Updates + * Update Rollups + * Windows 7 + * Windows 8.1 + * Windows 8.1 drivers + * Windows 8.1 and later drivers + * Windows Defender + + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. + + Severities include the following: + + * Critical + * Important + + + Returns: + dict: A dictionary containing the results of the update + + CLI Example: + + .. code-block:: yaml + + # Update the system using the state defaults + update_system: + wua.up_to_date + + # Update the drivers + update_drivers: + wua.up_to_date: + - software: False + - drivers: True + - skip_reboot: False + + # Apply all critical updates + update_critical: + wua.up_to_date: + - severities: + - Critical + ''' + ret = {'name': name, + 'changes': {}, + 'result': True, + 'comment': ''} + + wua = salt.utils.win_update.WindowsUpdateAgent() + + available_updates = wua.available( + skip_hidden=skip_hidden, skip_installed=True, + skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, + software=software, drivers=drivers, categories=categories, + severities=severities) + + # No updates found + if available_updates.count() == 0: + ret['comment'] = 'No updates found' + return ret + + updates = list(available_updates.list().keys()) + + # Search for updates + install_list = wua.search(updates) + + # List of updates to download + download = salt.utils.win_update.Updates() + for item in install_list.updates: + if not salt.utils.is_true(item.IsDownloaded): + download.updates.Add(item) + + # List of updates to install + install = salt.utils.win_update.Updates() + for item in install_list.updates: + if not salt.utils.is_true(item.IsInstalled): + install.updates.Add(item) + + # Return comment of changes if test. + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Updates will be installed:' + for update in install.updates: + ret['comment'] += '\n' + ret['comment'] += ': '.join( + [update.Identity.UpdateID, update.Title]) + return ret + + # Download updates + wua.download(download) + + # Install updates + wua.install(install) + + # Refresh windows update info + wua.refresh() + + post_info = wua.updates().list() + + # Verify the installation + for item in install.list(): + if not salt.utils.is_true(post_info[item]['Installed']): + ret['changes']['failed'] = { + item: {'Title': post_info[item]['Title'][:40] + '...', + 'KBs': post_info[item]['KBs']} + } + ret['result'] = False + else: + ret['changes']['installed'] = { + item: {'Title': post_info[item]['Title'][:40] + '...', + 'NeedsReboot': post_info[item]['NeedsReboot'], + 'KBs': post_info[item]['KBs']} + } + + if ret['changes'].get('failed', False): + ret['comment'] = 'Updates failed' + else: + ret['comment'] = 'Updates installed successfully' + + return ret diff --git a/salt/tops/saltclass.py b/salt/tops/saltclass.py new file mode 100644 index 00000000000..585641a0245 --- /dev/null +++ b/salt/tops/saltclass.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +''' +SaltClass master_tops Module + +.. code-block:: yaml + master_tops: + saltclass: + path: /srv/saltclass +''' + +# import python libs +from __future__ import absolute_import +import logging + +import salt.utils.saltclass as sc + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only run if properly configured + ''' + if __opts__['master_tops'].get('saltclass'): + return True + return False + + +def top(**kwargs): + ''' + Node definitions path will be retrieved from __opts__ - or set to default - + then added to 'salt_data' dict that is passed to the 'get_tops' function. + 'salt_data' dict is a convenient way to pass all the required datas to the function + It contains: + - __opts__ + - empty __salt__ + - __grains__ + - empty __pillar__ + - minion_id + - path + + If successfull the function will return a top dict for minion_id + ''' + # If path has not been set, make a default + _opts = __opts__['master_tops']['saltclass'] + if 'path' not in _opts: + path = '/srv/saltclass' + log.warning('path variable unset, using default: {0}'.format(path)) + else: + path = _opts['path'] + + # Create a dict that will contain our salt objects + # to send to get_tops function + if 'id' not in kwargs['opts']: + log.warning('Minion id not found - Returning empty dict') + return {} + else: + minion_id = kwargs['opts']['id'] + + salt_data = { + '__opts__': kwargs['opts'], + '__salt__': {}, + '__grains__': kwargs['grains'], + '__pillar__': {}, + 'minion_id': minion_id, + 'path': path + } + + return sc.get_tops(minion_id, salt_data) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index b0077420834..6dc7799a570 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -217,7 +217,7 @@ class RecursiveDictDiffer(DictDiffer): Each inner difference is tabulated two space deeper ''' changes_strings = [] - for p in diff_dict.keys(): + for p in sorted(diff_dict.keys()): if sorted(diff_dict[p].keys()) == ['new', 'old']: # Some string formatting old_value = diff_dict[p]['old'] @@ -267,7 +267,7 @@ class RecursiveDictDiffer(DictDiffer): keys.append('{0}{1}'.format(prefix, key)) return keys - return _added(self._diffs, prefix='') + return sorted(_added(self._diffs, prefix='')) def removed(self): ''' @@ -290,7 +290,7 @@ class RecursiveDictDiffer(DictDiffer): prefix='{0}{1}.'.format(prefix, key))) return keys - return _removed(self._diffs, prefix='') + return sorted(_removed(self._diffs, prefix='')) def changed(self): ''' @@ -338,7 +338,7 @@ class RecursiveDictDiffer(DictDiffer): return keys - return _changed(self._diffs, prefix='') + return sorted(_changed(self._diffs, prefix='')) def unchanged(self): ''' @@ -363,7 +363,7 @@ class RecursiveDictDiffer(DictDiffer): prefix='{0}{1}.'.format(prefix, key))) return keys - return _unchanged(self.current_dict, self._diffs, prefix='') + return sorted(_unchanged(self.current_dict, self._diffs, prefix='')) @property def diffs(self): diff --git a/salt/utils/files.py b/salt/utils/files.py index c55ac86324d..207e29b5cae 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -485,6 +485,8 @@ def safe_filename_leaf(file_basename): windows is \\ / : * ? " < > | posix is / .. versionadded:: 2017.7.2 + + :codeauthor: Damon Atkins ''' def _replace(re_obj): return urllib.quote(re_obj.group(0), safe=u'') @@ -497,19 +499,27 @@ def safe_filename_leaf(file_basename): return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) -def safe_filepath(file_path_name): +def safe_filepath(file_path_name, dir_sep=None): ''' Input the full path and filename, splits on directory separator and calls safe_filename_leaf for - each part of the path. + each part of the path. dir_sep allows coder to force a directory separate to a particular character .. versionadded:: 2017.7.2 + + :codeauthor: Damon Atkins ''' + if not dir_sep: + dir_sep = os.sep + # Normally if file_path_name or dir_sep is Unicode then the output will be Unicode + # This code ensure the output type is the same as file_path_name + if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type): + dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3 + # splitdrive only set drive on windows platform (drive, path) = os.path.splitdrive(file_path_name) - path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) + path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)]) if drive: - return os.sep.join([drive, path]) - else: - return path + path = dir_sep.join([drive, path]) + return path @jinja_filter('is_text_file') diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 0a49f2c24d4..4657c70210d 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -966,6 +966,31 @@ class CkMinions(object): auth_list.append(matcher) return auth_list + def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None): + ''' + Returns a list of authorisation matchers that a user is eligible for. + This list is a combination of the provided personal matchers plus the + matchers of any group the user is in. + ''' + if auth_list is None: + auth_list = [] + if permissive is None: + permissive = self.opts.get('permissive_acl') + name_matched = False + for match in auth_provider: + if match == '*' and not permissive: + continue + if match.endswith('%'): + if match.rstrip('%') in groups: + auth_list.extend(auth_provider[match]) + else: + if salt.utils.expr_match(match, name): + name_matched = True + auth_list.extend(auth_provider[match]) + if not permissive and not name_matched and '*' in auth_provider: + auth_list.extend(auth_provider['*']) + return auth_list + def wheel_check(self, auth_list, fun, args): ''' Check special API permissions @@ -982,6 +1007,8 @@ class CkMinions(object): ''' Check special API permissions ''' + if not auth_list: + return False if form != 'cloud': comps = fun.split('.') if len(comps) != 2: diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py new file mode 100644 index 00000000000..c7fa43eaa4b --- /dev/null +++ b/salt/utils/pbm.py @@ -0,0 +1,329 @@ +# -*- coding: utf-8 -*- +''' +Library for VMware Storage Policy management (via the pbm endpoint) + +This library is used to manage the various policies available in VMware + +:codeauthor: Alexandru Bleotu + +Dependencies +~~~~~~~~~~~~ + +- pyVmomi Python Module + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +# Import Salt Libs +import salt.utils.vmware +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError + + +try: + from pyVmomi import pbm, vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only load if PyVmomi is installed. + ''' + if HAS_PYVMOMI: + return True + else: + return False, 'Missing dependency: The salt.utils.pbm module ' \ + 'requires the pyvmomi library' + + +def get_profile_manager(service_instance): + ''' + Returns a profile manager + + service_instance + Service instance to the host or vCenter + ''' + stub = salt.utils.vmware.get_new_service_instance_stub( + service_instance, ns='pbm/2.0', path='/pbm/sdk') + pbm_si = pbm.ServiceInstance('ServiceInstance', stub) + try: + profile_manager = pbm_si.RetrieveContent().profileManager + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + return profile_manager + + +def get_placement_solver(service_instance): + ''' + Returns a placement solver + + service_instance + Service instance to the host or vCenter + ''' + stub = salt.utils.vmware.get_new_service_instance_stub( + service_instance, ns='pbm/2.0', path='/pbm/sdk') + pbm_si = pbm.ServiceInstance('ServiceInstance', stub) + try: + profile_manager = pbm_si.RetrieveContent().placementSolver + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + return profile_manager + + +def get_capability_definitions(profile_manager): + ''' + Returns a list of all capability definitions. + + profile_manager + Reference to the profile manager. + ''' + res_type = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + try: + cap_categories = profile_manager.FetchCapabilityMetadata(res_type) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + cap_definitions = [] + for cat in cap_categories: + cap_definitions.extend(cat.capabilityMetadata) + return cap_definitions + + +def get_policies_by_id(profile_manager, policy_ids): + ''' + Returns a list of policies with the specified ids. + + profile_manager + Reference to the profile manager. + + policy_ids + List of policy ids to retrieve. + ''' + try: + return profile_manager.RetrieveContent(policy_ids) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + + +def get_storage_policies(profile_manager, policy_names=None, + get_all_policies=False): + ''' + Returns a list of the storage policies, filtered by name. + + profile_manager + Reference to the profile manager. + + policy_names + List of policy names to filter by. + Default is None. + + get_all_policies + Flag specifying to return all policies, regardless of the specified + filter. + ''' + res_type = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + try: + policy_ids = profile_manager.QueryProfile(res_type) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + log.trace('policy_ids = {0}'.format(policy_ids)) + # More policies are returned so we need to filter again + policies = [p for p in get_policies_by_id(profile_manager, policy_ids) + if p.resourceType.resourceType == + pbm.profile.ResourceTypeEnum.STORAGE] + if get_all_policies: + return policies + if not policy_names: + policy_names = [] + return [p for p in policies if p.name in policy_names] + + +def create_storage_policy(profile_manager, policy_spec): + ''' + Creates a storage policy. + + profile_manager + Reference to the profile manager. + + policy_spec + Policy update spec. + ''' + try: + profile_manager.Create(policy_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + + +def update_storage_policy(profile_manager, policy, policy_spec): + ''' + Updates a storage policy. + + profile_manager + Reference to the profile manager. + + policy + Reference to the policy to be updated. + + policy_spec + Policy update spec. + ''' + try: + profile_manager.Update(policy.profileId, policy_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + + +def get_default_storage_policy_of_datastore(profile_manager, datastore): + ''' + Returns the default storage policy reference assigned to a datastore. + + profile_manager + Reference to the profile manager. + + datastore + Reference to the datastore. + ''' + # Retrieve all datastores visible + hub = pbm.placement.PlacementHub( + hubId=datastore._moId, hubType='Datastore') + log.trace('placement_hub = {0}'.format(hub)) + try: + policy_id = profile_manager.QueryDefaultRequirementProfile(hub) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + policy_refs = get_policies_by_id(profile_manager, [policy_id]) + if not policy_refs: + raise VMwareObjectRetrievalError('Storage policy with id \'{0}\' was ' + 'not found'.format(policy_id)) + return policy_refs[0] + + +def assign_default_storage_policy_to_datastore(profile_manager, policy, + datastore): + ''' + Assigns a storage policy as the default policy to a datastore. + + profile_manager + Reference to the profile manager. + + policy + Reference to the policy to assigned. + + datastore + Reference to the datastore. + ''' + placement_hub = pbm.placement.PlacementHub( + hubId=datastore._moId, hubType='Datastore') + log.trace('placement_hub = {0}'.format(placement_hub)) + try: + profile_manager.AssignDefaultRequirementProfile(policy.profileId, + [placement_hub]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) diff --git a/salt/utils/saltclass.py b/salt/utils/saltclass.py new file mode 100644 index 00000000000..3df204d5dc1 --- /dev/null +++ b/salt/utils/saltclass.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +import os +import re +import logging +from salt.ext.six import iteritems +import yaml +from jinja2 import FileSystemLoader, Environment + +log = logging.getLogger(__name__) + + +# Renders jinja from a template file +def render_jinja(_file, salt_data): + j_env = Environment(loader=FileSystemLoader(os.path.dirname(_file))) + j_env.globals.update({ + '__opts__': salt_data['__opts__'], + '__salt__': salt_data['__salt__'], + '__grains__': salt_data['__grains__'], + '__pillar__': salt_data['__pillar__'], + 'minion_id': salt_data['minion_id'], + }) + j_render = j_env.get_template(os.path.basename(_file)).render() + return j_render + + +# Renders yaml from rendered jinja +def render_yaml(_file, salt_data): + return yaml.safe_load(render_jinja(_file, salt_data)) + + +# Returns a dict from a class yaml definition +def get_class(_class, salt_data): + l_files = [] + saltclass_path = salt_data['path'] + + straight = '{0}/classes/{1}.yml'.format(saltclass_path, _class) + sub_straight = '{0}/classes/{1}.yml'.format(saltclass_path, + _class.replace('.', '/')) + sub_init = '{0}/classes/{1}/init.yml'.format(saltclass_path, + _class.replace('.', '/')) + + for root, dirs, files in os.walk('{0}/classes'.format(saltclass_path)): + for l_file in files: + l_files.append('{0}/{1}'.format(root, l_file)) + + if straight in l_files: + return render_yaml(straight, salt_data) + + if sub_straight in l_files: + return render_yaml(sub_straight, salt_data) + + if sub_init in l_files: + return render_yaml(sub_init, salt_data) + + log.warning('{0}: Class definition not found'.format(_class)) + return {} + + +# Return environment +def get_env_from_dict(exp_dict_list): + environment = '' + for s_class in exp_dict_list: + if 'environment' in s_class: + environment = s_class['environment'] + return environment + + +# Merge dict b into a +def dict_merge(a, b, path=None): + if path is None: + path = [] + + for key in b: + if key in a: + if isinstance(a[key], list) and isinstance(b[key], list): + if b[key][0] == '^': + b[key].pop(0) + a[key] = b[key] + else: + a[key].extend(b[key]) + elif isinstance(a[key], dict) and isinstance(b[key], dict): + dict_merge(a[key], b[key], path + [str(key)]) + elif a[key] == b[key]: + pass + else: + a[key] = b[key] + else: + a[key] = b[key] + return a + + +# Recursive search and replace in a dict +def dict_search_and_replace(d, old, new, expanded): + for (k, v) in iteritems(d): + if isinstance(v, dict): + dict_search_and_replace(d[k], old, new, expanded) + if v == old: + d[k] = new + return d + + +# Retrieve original value from ${xx:yy:zz} to be expanded +def find_value_to_expand(x, v): + a = x + for i in v[2:-1].split(':'): + if i in a: + a = a.get(i) + else: + a = v + return a + return a + + +# Return a dict that contains expanded variables if found +def expand_variables(a, b, expanded, path=None): + if path is None: + b = a.copy() + path = [] + + for (k, v) in iteritems(a): + if isinstance(v, dict): + expand_variables(v, b, expanded, path + [str(k)]) + else: + if isinstance(v, str): + vre = re.search(r'(^|.)\$\{.*?\}', v) + if vre: + re_v = vre.group(0) + if re_v.startswith('\\'): + v_new = v.replace(re_v, re_v.lstrip('\\')) + b = dict_search_and_replace(b, v, v_new, expanded) + expanded.append(k) + elif not re_v.startswith('$'): + v_expanded = find_value_to_expand(b, re_v[1:]) + v_new = v.replace(re_v[1:], v_expanded) + b = dict_search_and_replace(b, v, v_new, expanded) + expanded.append(k) + else: + v_expanded = find_value_to_expand(b, re_v) + b = dict_search_and_replace(b, v, v_expanded, expanded) + expanded.append(k) + return b + + +def expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand): + # Get classes to expand from minion dictionnary + if not classes_to_expand and 'classes' in minion_dict: + classes_to_expand = minion_dict['classes'] + + # Now loop on list to recursively expand them + for klass in classes_to_expand: + if klass not in seen_classes: + seen_classes.append(klass) + expanded_classes[klass] = get_class(klass, salt_data) + # Fix corner case where class is loaded but doesn't contain anything + if expanded_classes[klass] is None: + expanded_classes[klass] = {} + # Now replace class element in classes_to_expand by expansion + if 'classes' in expanded_classes[klass]: + l_id = classes_to_expand.index(klass) + classes_to_expand[l_id:l_id] = expanded_classes[klass]['classes'] + expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand) + else: + expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand) + + # We may have duplicates here and we want to remove them + tmp = [] + for t_element in classes_to_expand: + if t_element not in tmp: + tmp.append(t_element) + + classes_to_expand = tmp + + # Now that we've retrieved every class in order, + # let's return an ordered list of dicts + ord_expanded_classes = [] + ord_expanded_states = [] + for ord_klass in classes_to_expand: + ord_expanded_classes.append(expanded_classes[ord_klass]) + # And be smart and sort out states list + # Address the corner case where states is empty in a class definition + if 'states' in expanded_classes[ord_klass] and expanded_classes[ord_klass]['states'] is None: + expanded_classes[ord_klass]['states'] = {} + + if 'states' in expanded_classes[ord_klass]: + ord_expanded_states.extend(expanded_classes[ord_klass]['states']) + + # Add our minion dict as final element but check if we have states to process + if 'states' in minion_dict and minion_dict['states'] is None: + minion_dict['states'] = [] + + if 'states' in minion_dict: + ord_expanded_states.extend(minion_dict['states']) + + ord_expanded_classes.append(minion_dict) + + return ord_expanded_classes, classes_to_expand, ord_expanded_states + + +def expanded_dict_from_minion(minion_id, salt_data): + _file = '' + saltclass_path = salt_data['path'] + # Start + for root, dirs, files in os.walk('{0}/nodes'.format(saltclass_path)): + for minion_file in files: + if minion_file == '{0}.yml'.format(minion_id): + _file = os.path.join(root, minion_file) + + # Load the minion_id definition if existing, else an exmpty dict + node_dict = {} + if _file: + node_dict[minion_id] = render_yaml(_file, salt_data) + else: + log.warning('{0}: Node definition not found'.format(minion_id)) + node_dict[minion_id] = {} + + # Get 2 ordered lists: + # expanded_classes: A list of all the dicts + # classes_list: List of all the classes + expanded_classes, classes_list, states_list = expand_classes_in_order( + node_dict[minion_id], + salt_data, [], {}, []) + + # Here merge the pillars together + pillars_dict = {} + for exp_dict in expanded_classes: + if 'pillars' in exp_dict: + dict_merge(pillars_dict, exp_dict) + + return expanded_classes, pillars_dict, classes_list, states_list + + +def get_pillars(minion_id, salt_data): + # Get 2 dicts and 2 lists + # expanded_classes: Full list of expanded dicts + # pillars_dict: dict containing merged pillars in order + # classes_list: All classes processed in order + # states_list: All states listed in order + (expanded_classes, + pillars_dict, + classes_list, + states_list) = expanded_dict_from_minion(minion_id, salt_data) + + # Retrieve environment + environment = get_env_from_dict(expanded_classes) + + # Expand ${} variables in merged dict + # pillars key shouldn't exist if we haven't found any minion_id ref + if 'pillars' in pillars_dict: + pillars_dict_expanded = expand_variables(pillars_dict['pillars'], {}, []) + else: + pillars_dict_expanded = expand_variables({}, {}, []) + + # Build the final pillars dict + pillars_dict = {} + pillars_dict['__saltclass__'] = {} + pillars_dict['__saltclass__']['states'] = states_list + pillars_dict['__saltclass__']['classes'] = classes_list + pillars_dict['__saltclass__']['environment'] = environment + pillars_dict['__saltclass__']['nodename'] = minion_id + pillars_dict.update(pillars_dict_expanded) + + return pillars_dict + + +def get_tops(minion_id, salt_data): + # Get 2 dicts and 2 lists + # expanded_classes: Full list of expanded dicts + # pillars_dict: dict containing merged pillars in order + # classes_list: All classes processed in order + # states_list: All states listed in order + (expanded_classes, + pillars_dict, + classes_list, + states_list) = expanded_dict_from_minion(minion_id, salt_data) + + # Retrieve environment + environment = get_env_from_dict(expanded_classes) + + # Build final top dict + tops_dict = {} + tops_dict[environment] = states_list + + return tops_dict diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index b239b269b09..68ff6ca7227 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -79,6 +79,8 @@ import atexit import errno import logging import time +import sys +import ssl # Import Salt Libs import salt.exceptions @@ -92,8 +94,9 @@ import salt.utils.stringutils from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: - from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub - from pyVmomi import vim, vmodl + from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ + SoapStubAdapter + from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -405,6 +408,49 @@ def get_service_instance(host, username=None, password=None, protocol=None, return service_instance +def get_new_service_instance_stub(service_instance, path, ns=None, + version=None): + ''' + Returns a stub that points to a different path, + created from an existing connection. + + service_instance + The Service Instance. + + path + Path of the new stub. + + ns + Namespace of the new stub. + Default value is None + + version + Version of the new stub. + Default value is None. + ''' + #For python 2.7.9 and later, the defaul SSL conext has more strict + #connection handshaking rule. We may need turn of the hostname checking + #and client side cert verification + context = None + if sys.version_info[:3] > (2, 7, 8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + stub = service_instance._stub + hostname = stub.host.split(':')[0] + session_cookie = stub.cookie.split('"')[1] + VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie + new_stub = SoapStubAdapter(host=hostname, + ns=ns, + path=path, + version=version, + poolSize=0, + sslContext=context) + new_stub.cookie = stub.cookie + return new_stub + + def get_service_instance_from_managed_object(mo_ref, name=''): ''' Retrieves the service instance from a managed object. @@ -981,6 +1027,333 @@ def get_network_adapter_type(adapter_type): return vim.vm.device.VirtualE1000e() +def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): + ''' + Returns distributed virtual switches (DVSs) in a datacenter. + + dc_ref + The parent datacenter reference. + + dvs_names + The names of the DVSs to return. Default is None. + + get_all_dvss + Return all DVSs in the datacenter. Default is False. + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Retrieving DVSs in datacenter \'{0}\', dvs_names=\'{1}\', ' + 'get_all_dvss={2}'.format(dc_name, + ','.join(dvs_names) if dvs_names + else None, + get_all_dvss)) + properties = ['name'] + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=True, + type=vim.Datacenter, + selectSet=[vmodl.query.PropertyCollector.TraversalSpec( + path='childEntity', + skip=False, + type=vim.Folder)]) + service_instance = get_service_instance_from_managed_object(dc_ref) + items = [i['object'] for i in + get_mors_with_properties(service_instance, + vim.DistributedVirtualSwitch, + container_ref=dc_ref, + property_list=properties, + traversal_spec=traversal_spec) + if get_all_dvss or (dvs_names and i['name'] in dvs_names)] + return items + + +def get_network_folder(dc_ref): + ''' + Retrieves the network folder of a datacenter + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Retrieving network folder in datacenter ' + '\'{0}\''.format(dc_name)) + service_instance = get_service_instance_from_managed_object(dc_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=False, + type=vim.Datacenter) + entries = get_mors_with_properties(service_instance, + vim.Folder, + container_ref=dc_ref, + property_list=['name'], + traversal_spec=traversal_spec) + if not entries: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Network folder in datacenter \'{0}\' wasn\'t retrieved' + ''.format(dc_name)) + return entries[0]['object'] + + +def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): + ''' + Creates a distributed virtual switches (DVS) in a datacenter. + Returns the reference to the newly created distributed virtual switch. + + dc_ref + The parent datacenter reference. + + dvs_name + The name of the DVS to create. + + dvs_create_spec + The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. + Default is None. + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Creating DVS \'{0}\' in datacenter ' + '\'{1}\''.format(dvs_name, dc_name)) + if not dvs_create_spec: + dvs_create_spec = vim.DVSCreateSpec() + if not dvs_create_spec.configSpec: + dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() + dvs_create_spec.configSpec.name = dvs_name + netw_folder_ref = get_network_folder(dc_ref) + try: + task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + +def update_dvs(dvs_ref, dvs_config_spec): + ''' + Updates a distributed virtual switch with the config_spec. + + dvs_ref + The DVS reference. + + dvs_config_spec + The updated config spec (vim.VMwareDVSConfigSpec) to be applied to + the DVS. + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Updating dvs \'{0}\''.format(dvs_name)) + try: + task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + +def set_dvs_network_resource_management_enabled(dvs_ref, enabled): + ''' + Sets whether NIOC is enabled on a DVS. + + dvs_ref + The DVS reference. + + enabled + Flag specifying whether NIOC is enabled. + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Setting network resource management enable to {0} on ' + 'dvs \'{1}\''.format(enabled, dvs_name)) + try: + dvs_ref.EnableNetworkResourceManagement(enable=enabled) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + +def get_dvportgroups(parent_ref, portgroup_names=None, + get_all_portgroups=False): + ''' + Returns distributed virtual porgroups (dvportgroups). + The parent object can be either a datacenter or a dvs. + + parent_ref + The parent object reference. Can be either a datacenter or a dvs. + + portgroup_names + The names of the dvss to return. Default is None. + + get_all_portgroups + Return all portgroups in the parent. Default is False. + ''' + if not (isinstance(parent_ref, vim.Datacenter) or + isinstance(parent_ref, vim.DistributedVirtualSwitch)): + raise salt.exceptions.ArgumentValueError( + 'Parent has to be either a datacenter, ' + 'or a distributed virtual switch') + parent_name = get_managed_object_name(parent_ref) + log.trace('Retrieving portgroup in {0} \'{1}\', portgroups_names=\'{2}\', ' + 'get_all_portgroups={3}'.format( + type(parent_ref).__name__, parent_name, + ','.join(portgroup_names) if portgroup_names else None, + get_all_portgroups)) + properties = ['name'] + if isinstance(parent_ref, vim.Datacenter): + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=True, + type=vim.Datacenter, + selectSet=[vmodl.query.PropertyCollector.TraversalSpec( + path='childEntity', + skip=False, + type=vim.Folder)]) + else: # parent is distributed virtual switch + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='portgroup', + skip=False, + type=vim.DistributedVirtualSwitch) + + service_instance = get_service_instance_from_managed_object(parent_ref) + items = [i['object'] for i in + get_mors_with_properties(service_instance, + vim.DistributedVirtualPortgroup, + container_ref=parent_ref, + property_list=properties, + traversal_spec=traversal_spec) + if get_all_portgroups or + (portgroup_names and i['name'] in portgroup_names)] + return items + + +def get_uplink_dvportgroup(dvs_ref): + ''' + Returns the uplink distributed virtual portgroup of a distributed virtual + switch (dvs) + + dvs_ref + The dvs reference + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Retrieving uplink portgroup of dvs \'{0}\''.format(dvs_name)) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='portgroup', + skip=False, + type=vim.DistributedVirtualSwitch) + service_instance = get_service_instance_from_managed_object(dvs_ref) + items = [entry['object'] for entry in + get_mors_with_properties(service_instance, + vim.DistributedVirtualPortgroup, + container_ref=dvs_ref, + property_list=['tag'], + traversal_spec=traversal_spec) + if entry['tag'] and + [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] + if not items: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) + return items[0] + + +def create_dvportgroup(dvs_ref, spec): + ''' + Creates a distributed virtual portgroup on a distributed virtual switch + (dvs) + + dvs_ref + The dvs reference + + spec + Portgroup spec (vim.DVPortgroupConfigSpec) + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Adding portgroup {0} to dvs ' + '\'{1}\''.format(spec.name, dvs_name)) + log.trace('spec = {}'.format(spec)) + try: + task = dvs_ref.CreateDVPortgroup_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + +def update_dvportgroup(portgroup_ref, spec): + ''' + Updates a distributed virtual portgroup + + portgroup_ref + The portgroup reference + + spec + Portgroup spec (vim.DVPortgroupConfigSpec) + ''' + pg_name = get_managed_object_name(portgroup_ref) + log.trace('Updating portgrouo {0}'.format(pg_name)) + try: + task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, pg_name, str(task.__class__)) + + +def remove_dvportgroup(portgroup_ref): + ''' + Removes a distributed virtual portgroup + + portgroup_ref + The portgroup reference + ''' + pg_name = get_managed_object_name(portgroup_ref) + log.trace('Removing portgrouo {0}'.format(pg_name)) + try: + task = portgroup_ref.Destroy_Task() + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, pg_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. @@ -1536,7 +1909,7 @@ def get_datastores(service_instance, reference, datastore_names=None, 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem - log.debug('Filtering datastores with backing disk ids: {}' + log.trace('Filtering datastores with backing disk ids: {}' ''.format(backing_disk_ids)) storage_system = get_storage_system(service_instance, reference, obj_name) @@ -1552,11 +1925,11 @@ def get_datastores(service_instance, reference, datastore_names=None, # Skip volume if it doesn't contain an extent with a # canonical name of interest continue - log.debug('Found datastore \'{0}\' for disk id(s) \'{1}\'' + log.trace('Found datastore \'{0}\' for disk id(s) \'{1}\'' ''.format(vol.name, [e.diskName for e in vol.extent])) disk_datastores.append(vol.name) - log.debug('Datastore found for disk filter: {}' + log.trace('Datastore found for disk filter: {}' ''.format(disk_datastores)) if datastore_names: datastore_names.extend(disk_datastores) @@ -1633,7 +2006,7 @@ def rename_datastore(datastore_ref, new_datastore_name): New datastore name ''' ds_name = get_managed_object_name(datastore_ref) - log.debug('Renaming datastore \'{0}\' to ' + log.trace('Renaming datastore \'{0}\' to ' '\'{1}\''.format(ds_name, new_datastore_name)) try: datastore_ref.RenameDatastore(new_datastore_name) @@ -1675,6 +2048,224 @@ def get_storage_system(service_instance, host_ref, hostname=None): return objs[0]['object'] +def _get_partition_info(storage_system, device_path): + ''' + Returns partition informations for a device path, of type + vim.HostDiskPartitionInfo + ''' + try: + partition_infos = \ + storage_system.RetrieveDiskPartitionInfo( + devicePath=[device_path]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('partition_info = {0}'.format(partition_infos[0])) + return partition_infos[0] + + +def _get_new_computed_partition_spec(hostname, storage_system, device_path, + partition_info): + ''' + Computes the new disk partition info when adding a new vmfs partition that + uses up the remainder of the disk; returns a tuple + (new_partition_number, vim.HostDiskPartitionSpec + ''' + log.trace('Adding a partition at the end of the disk and getting the new ' + 'computed partition spec') + #TODO implement support for multiple partitions + # We support adding a partition add the end of the disk with partitions + free_partitions = [p for p in partition_info.layout.partition + if p.type == 'none'] + if not free_partitions: + raise salt.exceptions.VMwareObjectNotFoundError( + 'Free partition was not found on device \'{0}\'' + ''.format(partition_info.deviceName)) + free_partition = free_partitions[0] + + # Create a layout object that copies the existing one + layout = vim.HostDiskPartitionLayout( + total=partition_info.layout.total, + partition=partition_info.layout.partition) + # Create a partition with the free space on the disk + # Change the free partition type to vmfs + free_partition.type = 'vmfs' + try: + computed_partition_info = storage_system.ComputeDiskPartitionInfo( + devicePath=device_path, + partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, + layout=layout) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('computed partition info = {0}' + ''.format(computed_partition_info)) + log.trace('Retrieving new partition number') + partition_numbers = [p.partition for p in + computed_partition_info.layout.partition + if (p.start.block == free_partition.start.block or + # XXX If the entire disk is free (i.e. the free + # disk partition starts at block 0) the newily + # created partition is created from block 1 + (free_partition.start.block == 0 and + p.start.block == 1)) and + p.end.block == free_partition.end.block and + p.type == 'vmfs'] + if not partition_numbers: + raise salt.exceptions.VMwareNotFoundError( + 'New partition was not found in computed partitions of device ' + '\'{0}\''.format(partition_info.deviceName)) + log.trace('new partition number = {0}'.format(partition_numbers[0])) + return (partition_numbers[0], computed_partition_info.spec) + + +def create_vmfs_datastore(host_ref, datastore_name, disk_ref, + vmfs_major_version, storage_system=None): + ''' + Creates a VMFS datastore from a disk_id + + host_ref + vim.HostSystem object referencing a host to create the datastore on + + datastore_name + Name of the datastore + + disk_ref + vim.HostScsiDislk on which the datastore is created + + vmfs_major_version + VMFS major version to use + ''' + # TODO Support variable sized partitions + hostname = get_managed_object_name(host_ref) + disk_id = disk_ref.canonicalName + log.debug('Creating datastore \'{0}\' on host \'{1}\', scsi disk \'{2}\', ' + 'vmfs v{3}'.format(datastore_name, hostname, disk_id, + vmfs_major_version)) + if not storage_system: + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + + target_disk = disk_ref + partition_info = _get_partition_info(storage_system, + target_disk.devicePath) + log.trace('partition_info = {0}'.format(partition_info)) + new_partition_number, partition_spec = _get_new_computed_partition_spec( + hostname, storage_system, target_disk.devicePath, partition_info) + spec = vim.VmfsDatastoreCreateSpec( + vmfs=vim.HostVmfsSpec( + majorVersion=vmfs_major_version, + volumeName=datastore_name, + extent=vim.HostScsiDiskPartition( + diskName=disk_id, + partition=new_partition_number)), + diskUuid=target_disk.uuid, + partition=partition_spec) + try: + ds_ref = \ + host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.debug('Created datastore \'{0}\' on host ' + '\'{1}\''.format(datastore_name, hostname)) + return ds_ref + + +def get_host_datastore_system(host_ref, hostname=None): + ''' + Returns a host's datastore system + + host_ref + Reference to the ESXi host + + hostname + Name of the host. This argument is optional. + ''' + + if not hostname: + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.datastoreSystem', + type=vim.HostSystem, + skip=False) + objs = get_mors_with_properties(service_instance, + vim.HostDatastoreSystem, + property_list=['datastore'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not objs: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' datastore system was not retrieved' + ''.format(hostname)) + log.trace('[{0}] Retrieved datastore system'.format(hostname)) + return objs[0]['object'] + + +def remove_datastore(service_instance, datastore_ref): + ''' + Creates a VMFS datastore from a disk_id + + service_instance + The Service Instance Object containing the datastore + + datastore_ref + The reference to the datastore to remove + ''' + ds_props = get_properties_of_managed_object( + datastore_ref, ['host', 'info', 'name']) + ds_name = ds_props['name'] + log.debug('Removing datastore \'{}\''.format(ds_name)) + ds_info = ds_props['info'] + ds_hosts = ds_props.get('host') + if not ds_hosts: + raise salt.exceptions.VMwareApiError( + 'Datastore \'{0}\' can\'t be removed. No ' + 'attached hosts found'.format(ds_name)) + hostname = get_managed_object_name(ds_hosts[0].key) + host_ds_system = get_host_datastore_system(ds_hosts[0].key, + hostname=hostname) + try: + host_ds_system.RemoveDatastore(datastore_ref) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('[{0}] Removed datastore \'{1}\''.format(hostname, ds_name)) + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' @@ -1699,44 +2290,541 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None, Default value is False. ''' properties = ['name'] + if cluster_name and not datacenter_name: + raise salt.exceptions.ArgumentValueError( + 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] - if cluster_name: - properties.append('parent') - if datacenter_name: + if not datacenter_name: + # Assume the root folder is the starting point + start_point = get_root_folder(service_instance) + else: start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: # Retrieval to test if cluster exists. Cluster existence only makes - # sense if the cluster has been specified + # sense if the datacenter has been specified cluster = get_cluster(start_point, cluster_name) - else: - # Assume the root folder is the starting point - start_point = get_root_folder(service_instance) + properties.append('parent') # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) + log.trace('Retrieved hosts: {0}'.format(h['name'] for h in hosts)) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) - name_condition = get_all_hosts or (h['name'] in host_names) - # the datacenter_name needs to be set in order for the cluster - # condition membership to be checked, otherwise the condition is - # ignored - cluster_condition = \ - (not datacenter_name or not cluster_name or - (isinstance(h['parent'], vim.ClusterComputeResource) and - h['parent'].name == cluster_name)) - if name_condition and cluster_condition: + if cluster_name: + if not isinstance(h['parent'], vim.ClusterComputeResource): + continue + parent_name = get_managed_object_name(h['parent']) + if parent_name != cluster_name: + continue + + if get_all_hosts: filtered_hosts.append(h['object']) + continue + if h['name'] in host_names: + filtered_hosts.append(h['object']) return filtered_hosts +def _get_scsi_address_to_lun_key_map(service_instance, + host_ref, + storage_system=None, + hostname=None): + ''' + Returns a map between the scsi addresses and the keys of all luns on an ESXi + host. + map[] = + + service_instance + The Service Instance Object from which to obtain the hosts + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. Default is None. + ''' + map = {} + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + try: + device_info = storage_system.storageDeviceInfo + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not device_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage device ' + 'info was not retrieved'.format(hostname)) + multipath_info = device_info.multipathInfo + if not multipath_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' multipath info was not retrieved' + ''.format(hostname)) + if multipath_info.lun is None: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No luns were retrieved from host \'{0}\''.format(hostname)) + lun_key_by_scsi_addr = {} + for l in multipath_info.lun: + # The vmware scsi_address may have multiple comma separated values + # The first one is the actual scsi address + lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun + for p in l.path}) + log.trace('Scsi address to lun id map on host \'{0}\': ' + '{1}'.format(hostname, lun_key_by_scsi_addr)) + return lun_key_by_scsi_addr + + +def get_all_luns(host_ref, storage_system=None, hostname=None): + ''' + Returns a list of all vim.HostScsiDisk objects in a disk + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. This argument is optional. + ''' + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + if not storage_system: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage system was not retrieved' + ''.format(hostname)) + try: + device_info = storage_system.storageDeviceInfo + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not device_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage device info was not retrieved' + ''.format(hostname)) + + scsi_luns = device_info.scsiLun + if scsi_luns: + log.trace('Retrieved scsi luns in host \'{0}\': {1}' + ''.format(hostname, [l.canonicalName for l in scsi_luns])) + return scsi_luns + log.trace('Retrieved no scsi_luns in host \'{0}\''.format(hostname)) + return [] + + +def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): + ''' + Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their + scsi address + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. This argument is optional. + ''' + if not hostname: + hostname = get_managed_object_name(host_ref) + si = get_service_instance_from_managed_object(host_ref, name=hostname) + if not storage_system: + storage_system = get_storage_system(si, host_ref, hostname) + lun_ids_to_scsi_addr_map = \ + _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, + hostname) + luns_to_key_map = {d.key: d for d in + get_all_luns(host_ref, storage_system, hostname)} + return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in + six.iteritems(lun_ids_to_scsi_addr_map)} + + +def get_disks(host_ref, disk_ids=None, scsi_addresses=None, + get_all_disks=False): + ''' + Returns a list of vim.HostScsiDisk objects representing disks + in a ESXi host, filtered by their cannonical names and scsi_addresses + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + disk_ids + The list of canonical names of the disks to be retrieved. Default value + is None + + scsi_addresses + The list of scsi addresses of the disks to be retrieved. Default value + is None + + get_all_disks + Specifies whether to retrieve all disks in the host. + Default value is False. + ''' + hostname = get_managed_object_name(host_ref) + if get_all_disks: + log.trace('Retrieving all disks in host \'{0}\''.format(hostname)) + else: + log.trace('Retrieving disks in host \'{0}\': ids = ({1}); scsi ' + 'addresses = ({2})'.format(hostname, disk_ids, + scsi_addresses)) + if not (disk_ids or scsi_addresses): + return [] + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + disk_keys = [] + if scsi_addresses: + # convert the scsi addresses to disk keys + lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, + storage_system, + hostname) + disk_keys = [key for scsi_addr, key + in six.iteritems(lun_key_by_scsi_addr) + if scsi_addr in scsi_addresses] + log.trace('disk_keys based on scsi_addresses = {0}'.format(disk_keys)) + + scsi_luns = get_all_luns(host_ref, storage_system) + scsi_disks = [disk for disk in scsi_luns + if isinstance(disk, vim.HostScsiDisk) and ( + get_all_disks or + # Filter by canonical name + (disk_ids and (disk.canonicalName in disk_ids)) or + # Filter by disk keys from scsi addresses + (disk.key in disk_keys))] + log.trace('Retrieved disks in host \'{0}\': {1}' + ''.format(hostname, [d.canonicalName for d in scsi_disks])) + return scsi_disks + + +def get_disk_partition_info(host_ref, disk_id, storage_system=None): + ''' + Returns all partitions on a disk + + host_ref + The reference of the ESXi host containing the disk + + disk_id + The canonical name of the disk whose partitions are to be removed + + storage_system + The ESXi host's storage system. Default is None. + ''' + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + + props = get_properties_of_managed_object(storage_system, + ['storageDeviceInfo.scsiLun']) + if not props.get('storageDeviceInfo.scsiLun'): + raise salt.exceptions.VMwareObjectRetrievalError( + 'No devices were retrieved in host \'{0}\''.format(hostname)) + log.trace('[{0}] Retrieved {1} devices: {2}'.format( + hostname, len(props['storageDeviceInfo.scsiLun']), + ', '.join([l.canonicalName + for l in props['storageDeviceInfo.scsiLun']]))) + disks = [l for l in props['storageDeviceInfo.scsiLun'] + if isinstance(l, vim.HostScsiDisk) and + l.canonicalName == disk_id] + if not disks: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\'' + ''.format(disk_id, hostname)) + log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath)) + partition_info = _get_partition_info(storage_system, disks[0].devicePath) + log.trace('[{0}] Retrieved {1} partition(s) on disk \'{2}\'' + ''.format(hostname, len(partition_info.spec.partition), disk_id)) + return partition_info + + +def erase_disk_partitions(service_instance, host_ref, disk_id, + hostname=None, storage_system=None): + ''' + Erases all partitions on a disk + + in a vcenter filtered by their names and/or datacenter, cluster membership + + service_instance + The Service Instance Object from which to obtain all information + + host_ref + The reference of the ESXi host containing the disk + + disk_id + The canonical name of the disk whose partitions are to be removed + + hostname + The ESXi hostname. Default is None. + + storage_system + The ESXi host's storage system. Default is None. + ''' + + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.storageSystem', + type=vim.HostSystem, + skip=False) + results = get_mors_with_properties(service_instance, + vim.HostStorageSystem, + ['storageDeviceInfo.scsiLun'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not results: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) + log.trace('[{0}] Retrieved {1} devices: {2}'.format( + hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), + ', '.join([l.canonicalName for l in + results[0].get('storageDeviceInfo.scsiLun', [])]))) + disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) + if isinstance(l, vim.HostScsiDisk) and + l.canonicalName == disk_id] + if not disks: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\'' + ''.format(disk_id, hostname)) + log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath)) + # Erase the partitions by setting an empty partition spec + try: + storage_system.UpdateDiskPartitions(disks[0].devicePath, + vim.HostDiskPartitionSpec()) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('[{0}] Erased partitions on disk \'{1}\'' + ''.format(hostname, disk_id)) + + +def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): + ''' + Returns a list of vim.VsanHostDiskMapping objects representing disks + in a ESXi host, filtered by their cannonical names. + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + cache_disk_ids + The list of cannonical names of the cache disks to be retrieved. The + canonical name of the cache disk is enough to identify the disk group + because it is guaranteed to have one and only one cache disk. + Default is None. + + get_all_disk_groups + Specifies whether to retrieve all disks groups in the host. + Default value is False. + ''' + hostname = get_managed_object_name(host_ref) + if get_all_disk_groups: + log.trace('Retrieving all disk groups on host \'{0}\'' + ''.format(hostname)) + else: + log.trace('Retrieving disk groups from host \'{0}\', with cache disk ' + 'ids : ({1})'.format(hostname, cache_disk_ids)) + if not cache_disk_ids: + return [] + try: + vsan_host_config = host_ref.config.vsanHostConfig + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not vsan_host_config: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No host config found on host \'{0}\''.format(hostname)) + vsan_storage_info = vsan_host_config.storageInfo + if not vsan_storage_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No vsan storage info found on host \'{0}\''.format(hostname)) + vsan_disk_mappings = vsan_storage_info.diskMapping + if not vsan_disk_mappings: + return [] + disk_groups = [dm for dm in vsan_disk_mappings if + (get_all_disk_groups or + (dm.ssd.canonicalName in cache_disk_ids))] + log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : ' + '{1}'.format(hostname, + [d.ssd.canonicalName for d in disk_groups])) + return disk_groups + + +def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): + ''' + Checks that the disks in a disk group are as expected and raises + CheckError exceptions if the check fails + ''' + if not disk_group.ssd.canonicalName == cache_disk_id: + raise salt.exceptions.ArgumentValueError( + 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' + '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) + if sorted([d.canonicalName for d in disk_group.nonSsd]) != \ + sorted(capacity_disk_ids): + + raise salt.exceptions.ArgumentValueError( + 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' + ''.format(sorted([d.canonicalName for d in disk_group.nonSsd]), + sorted(capacity_disk_ids))) + log.trace('Checked disks in diskgroup with cache disk id \'{0}\'' + ''.format(cache_disk_id)) + return True + + +#TODO Support host caches on multiple datastores +def get_host_cache(host_ref, host_cache_manager=None): + ''' + Returns a vim.HostScsiDisk if the host cache is configured on the specified + host, other wise returns None + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + host_cache_manager + The vim.HostCacheConfigurationManager object representing the cache + configuration manager on the specified host. Default is None. If None, + it will be retrieved in the method + ''' + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + log.trace('Retrieving the host cache on host \'{0}\''.format(hostname)) + if not host_cache_manager: + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.cacheConfigurationManager', + type=vim.HostSystem, + skip=False) + results = get_mors_with_properties(service_instance, + vim.HostCacheConfigurationManager, + ['cacheConfigurationInfo'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not results or not results[0].get('cacheConfigurationInfo'): + log.trace('Host \'{0}\' has no host cache'.format(hostname)) + return None + return results[0]['cacheConfigurationInfo'][0] + else: + results = get_properties_of_managed_object(host_cache_manager, + ['cacheConfigurationInfo']) + if not results: + log.trace('Host \'{0}\' has no host cache'.format(hostname)) + return None + return results['cacheConfigurationInfo'][0] + + +#TODO Support host caches on multiple datastores +def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, + host_cache_manager=None): + ''' + Configures the host cahe of the specified host + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + datastore_ref + The vim.Datastore opject representing the datastore the host cache will + be configured on. + + swap_size_MiB + The size in Mibibytes of the swap. + + host_cache_manager + The vim.HostCacheConfigurationManager object representing the cache + configuration manager on the specified host. Default is None. If None, + it will be retrieved in the method + ''' + hostname = get_managed_object_name(host_ref) + if not host_cache_manager: + props = get_properties_of_managed_object( + host_ref, ['configManager.cacheConfigurationManager']) + if not props.get('configManager.cacheConfigurationManager'): + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host \'{0}\' has no host cache'.format(hostname)) + host_cache_manager = props['configManager.cacheConfigurationManager'] + log.trace('Configuring the host cache on host \'{0}\', datastore \'{1}\', ' + 'swap size={2} MiB'.format(hostname, datastore_ref.name, + swap_size_MiB)) + + spec = vim.HostCacheConfigurationSpec( + datastore=datastore_ref, + swapSize=swap_size_MiB) + log.trace('host_cache_spec={0}'.format(spec)) + try: + task = host_cache_manager.ConfigureHostCache_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, hostname, 'HostCacheConfigurationTask') + log.trace('Configured host cache on host \'{0}\''.format(hostname)) + return True + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 8ad713cd3e2..4e124c9f6f4 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -49,7 +49,8 @@ import logging import ssl # Import Salt Libs -from salt.exceptions import VMwareApiError, VMwareRuntimeError +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError import salt.utils.vmware try: @@ -129,6 +130,308 @@ def get_vsan_cluster_config_system(service_instance): return vc_mos['vsan-cluster-config-system'] +def get_vsan_disk_management_system(service_instance): + ''' + Returns a vim.VimClusterVsanVcDiskManagementSystem object + + service_instance + Service instance to the host or vCenter + ''' + + #TODO Replace when better connection mechanism is available + + #For python 2.7.9 and later, the defaul SSL conext has more strict + #connection handshaking rule. We may need turn of the hostname checking + #and client side cert verification + context = None + if sys.version_info[:3] > (2, 7, 8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + stub = service_instance._stub + vc_mos = vsanapiutils.GetVsanVcMos(stub, context=context) + return vc_mos['vsan-disk-management-system'] + + +def get_host_vsan_system(service_instance, host_ref, hostname=None): + ''' + Returns a host's vsan system + + service_instance + Service instance to the host or vCenter + + host_ref + Refernce to ESXi host + + hostname + Name of ESXi host. Default value is None. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.vsanSystem', + type=vim.HostSystem, + skip=False) + objs = salt.utils.vmware.get_mors_with_properties( + service_instance, vim.HostVsanSystem, property_list=['config.enabled'], + container_ref=host_ref, traversal_spec=traversal_spec) + if not objs: + raise VMwareObjectRetrievalError('Host\'s \'{0}\' VSAN system was ' + 'not retrieved'.format(hostname)) + log.trace('[{0}] Retrieved VSAN system'.format(hostname)) + return objs[0]['object'] + + +def create_diskgroup(service_instance, vsan_disk_mgmt_system, + host_ref, cache_disk, capacity_disks): + ''' + Creates a disk group + + service_instance + Service instance to the host or vCenter + + vsan_disk_mgmt_system + vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk + management system retrieved from the vsan endpoint. + + host_ref + vim.HostSystem object representing the target host the disk group will + be created on + + cache_disk + The vim.HostScsidisk to be used as a cache disk. It must be an ssd disk. + + capacity_disks + List of vim.HostScsiDisk objects representing of disks to be used as + capacity disks. Can be either ssd or non-ssd. There must be a minimum + of 1 capacity disk in the list. + ''' + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk_id = cache_disk.canonicalName + log.debug('Creating a new disk group with cache disk \'{0}\' on host ' + '\'{1}\''.format(cache_disk_id, hostname)) + log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in + capacity_disks])) + spec = vim.VimVsanHostDiskMappingCreationSpec() + spec.cacheDisks = [cache_disk] + spec.capacityDisks = capacity_disks + # All capacity disks must be either ssd or non-ssd (mixed disks are not + # supported) + spec.creationType = 'allFlash' if getattr(capacity_disks[0], 'ssd') \ + else 'hybrid' + spec.host = host_ref + try: + task = vsan_disk_mgmt_system.InitializeDiskMappings(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.fault.MethodNotFound as exc: + log.exception(exc) + raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method)) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + _wait_for_tasks([task], service_instance) + return True + + +def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system, + host_ref, diskgroup, new_capacity_disks): + ''' + Adds capacity disk(s) to a disk group. + + service_instance + Service instance to the host or vCenter + + vsan_disk_mgmt_system + vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk + management system retrieved from the vsan endpoint. + + host_ref + vim.HostSystem object representing the target host the disk group will + be created on + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup where + the additional capacity needs to be added + + new_capacity_disks + List of vim.HostScsiDisk objects representing the disks to be added as + capacity disks. Can be either ssd or non-ssd. There must be a minimum + of 1 new capacity disk in the list. + ''' + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk = diskgroup.ssd + cache_disk_id = cache_disk.canonicalName + log.debug('Adding capacity to disk group with cache disk \'{0}\' on host ' + '\'{1}\''.format(cache_disk_id, hostname)) + log.trace('new_capacity_disk_ids = {0}'.format([c.canonicalName for c in + new_capacity_disks])) + spec = vim.VimVsanHostDiskMappingCreationSpec() + spec.cacheDisks = [cache_disk] + spec.capacityDisks = new_capacity_disks + # All new capacity disks must be either ssd or non-ssd (mixed disks are not + # supported); also they need to match the type of the existing capacity + # disks; we assume disks are already validated + spec.creationType = 'allFlash' if getattr(new_capacity_disks[0], 'ssd') \ + else 'hybrid' + spec.host = host_ref + try: + task = vsan_disk_mgmt_system.InitializeDiskMappings(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.fault.MethodNotFound as exc: + log.exception(exc) + raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method)) + except vmodl.RuntimeFault as exc: + raise VMwareRuntimeError(exc.msg) + _wait_for_tasks([task], service_instance) + return True + + +def remove_capacity_from_diskgroup(service_instance, host_ref, diskgroup, + capacity_disks, data_evacuation=True, + hostname=None, + host_vsan_system=None): + ''' + Removes capacity disk(s) from a disk group. + + service_instance + Service instance to the host or vCenter + + host_vsan_system + ESXi host's VSAN system + + host_ref + Reference to the ESXi host + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup from + where the capacity needs to be removed + + capacity_disks + List of vim.HostScsiDisk objects representing the capacity disks to be + removed. Can be either ssd or non-ssd. There must be a minimum + of 1 capacity disk in the list. + + data_evacuation + Specifies whether to gracefully evacuate the data on the capacity disks + before removing them from the disk group. Default value is True. + + hostname + Name of ESXi host. Default value is None. + + host_vsan_system + ESXi host's VSAN system. Default value is None. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk = diskgroup.ssd + cache_disk_id = cache_disk.canonicalName + log.debug('Removing capacity from disk group with cache disk \'{0}\' on ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in + capacity_disks])) + if not host_vsan_system: + host_vsan_system = get_host_vsan_system(service_instance, + host_ref, hostname) + # Set to evacuate all data before removing the disks + maint_spec = vim.HostMaintenanceSpec() + maint_spec.vsanMode = vim.VsanHostDecommissionMode() + if data_evacuation: + maint_spec.vsanMode.objectAction = \ + vim.VsanHostDecommissionModeObjectAction.evacuateAllData + else: + maint_spec.vsanMode.objectAction = \ + vim.VsanHostDecommissionModeObjectAction.noAction + try: + task = host_vsan_system.RemoveDisk_Task(disk=capacity_disks, + maintenanceSpec=maint_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + salt.utils.vmware.wait_for_task(task, hostname, 'remove_capacity') + return True + + +def remove_diskgroup(service_instance, host_ref, diskgroup, hostname=None, + host_vsan_system=None, erase_disk_partitions=False, + data_accessibility=True): + ''' + Removes a disk group. + + service_instance + Service instance to the host or vCenter + + host_ref + Reference to the ESXi host + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup from + where the capacity needs to be removed + + hostname + Name of ESXi host. Default value is None. + + host_vsan_system + ESXi host's VSAN system. Default value is None. + + data_accessibility + Specifies whether to ensure data accessibility. Default value is True. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk_id = diskgroup.ssd.canonicalName + log.debug('Removing disk group with cache disk \'{0}\' on ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + if not host_vsan_system: + host_vsan_system = get_host_vsan_system( + service_instance, host_ref, hostname) + # Set to evacuate all data before removing the disks + maint_spec = vim.HostMaintenanceSpec() + maint_spec.vsanMode = vim.VsanHostDecommissionMode() + object_action = vim.VsanHostDecommissionModeObjectAction + if data_accessibility: + maint_spec.vsanMode.objectAction = \ + object_action.ensureObjectAccessibility + else: + maint_spec.vsanMode.objectAction = object_action.noAction + try: + task = host_vsan_system.RemoveDiskMapping_Task( + mapping=[diskgroup], maintenanceSpec=maint_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + salt.utils.vmware.wait_for_task(task, hostname, 'remove_diskgroup') + log.debug('Removed disk group with cache disk \'{0}\' ' + 'on host \'{1}\''.format(cache_disk_id, hostname)) + return True + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object diff --git a/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml b/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml new file mode 100644 index 00000000000..10f2865df73 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml @@ -0,0 +1,6 @@ +classes: + - app.ssh.server + +pillars: + sshd: + root_access: yes diff --git a/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml b/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml new file mode 100644 index 00000000000..9ebd94322f2 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml @@ -0,0 +1,4 @@ +pillars: + sshd: + root_access: no + ssh_port: 22 diff --git a/tests/integration/files/saltclass/examples/classes/default/init.yml b/tests/integration/files/saltclass/examples/classes/default/init.yml new file mode 100644 index 00000000000..20a5e450883 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/init.yml @@ -0,0 +1,17 @@ +classes: + - default.users + - default.motd + +states: + - openssh + +pillars: + default: + network: + dns: + srv1: 192.168.0.1 + srv2: 192.168.0.2 + domain: example.com + ntp: + srv1: 192.168.10.10 + srv2: 192.168.10.20 diff --git a/tests/integration/files/saltclass/examples/classes/default/motd.yml b/tests/integration/files/saltclass/examples/classes/default/motd.yml new file mode 100644 index 00000000000..18938d7b1af --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/motd.yml @@ -0,0 +1,3 @@ +pillars: + motd: + text: "Welcome to {{ __grains__['id'] }} system located in ${default:network:sub}" diff --git a/tests/integration/files/saltclass/examples/classes/default/users.yml b/tests/integration/files/saltclass/examples/classes/default/users.yml new file mode 100644 index 00000000000..8bfba671091 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/users.yml @@ -0,0 +1,16 @@ +states: + - user_mgt + +pillars: + default: + users: + adm1: + uid: 1201 + gid: 1201 + gecos: 'Super user admin1' + homedir: /home/adm1 + adm2: + uid: 1202 + gid: 1202 + gecos: 'Super user admin2' + homedir: /home/adm2 diff --git a/tests/integration/files/saltclass/examples/classes/roles/app.yml b/tests/integration/files/saltclass/examples/classes/roles/app.yml new file mode 100644 index 00000000000..af244e402ce --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/app.yml @@ -0,0 +1,21 @@ +states: + - app + +pillars: + app: + config: + dns: + srv1: ${default:network:dns:srv1} + srv2: ${default:network:dns:srv2} + uri: https://application.domain/call?\${test} + prod_parameters: + - p1 + - p2 + - p3 + pkg: + - app-core + - app-backend +# Safe minion_id matching +{% if minion_id == 'zrh.node3' %} + safe_pillar: '_only_ zrh.node3 will see this pillar and this cannot be overriden like grains' +{% endif %} diff --git a/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml b/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml new file mode 100644 index 00000000000..996ded51fa5 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml @@ -0,0 +1,7 @@ +states: + - nginx_deployment + +pillars: + nginx: + pkg: + - nginx diff --git a/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml b/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml new file mode 100644 index 00000000000..bc290997a6e --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml @@ -0,0 +1,7 @@ +classes: + - roles.nginx + +pillars: + nginx: + pkg: + - nginx-module diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml new file mode 100644 index 00000000000..7e7c39c60cd --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml @@ -0,0 +1,20 @@ +pillars: + default: + network: + sub: Geneva + dns: + srv1: 10.20.0.1 + srv2: 10.20.0.2 + srv3: 192.168.1.1 + domain: gnv.example.com + users: + adm1: + uid: 1210 + gid: 1210 + gecos: 'Super user admin1' + homedir: /srv/app/adm1 + adm3: + uid: 1203 + gid: 1203 + gecos: 'Super user admin3' + homedir: /home/adm3 diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml new file mode 100644 index 00000000000..22895482768 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml @@ -0,0 +1,17 @@ +classes: + - app.ssh.server + - roles.nginx.server + +pillars: + default: + network: + sub: Lausanne + dns: + srv1: 10.10.0.1 + domain: qls.example.com + users: + nginx_adm: + uid: 250 + gid: 200 + gecos: 'Nginx admin user' + homedir: /srv/www diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml new file mode 100644 index 00000000000..ac30dc73b9a --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml @@ -0,0 +1,24 @@ +classes: + - roles.app + # This should validate that we process a class only once + - app.borgbackup + # As this one should not be processed + # and would override in turn overrides from app.borgbackup + - app.ssh.server + +pillars: + default: + network: + sub: Zurich + dns: + srv1: 10.30.0.1 + srv2: 10.30.0.2 + domain: zrh.example.com + ntp: + srv1: 10.0.0.127 + users: + adm1: + uid: 250 + gid: 250 + gecos: 'Super user admin1' + homedir: /srv/app/1 diff --git a/tests/integration/files/saltclass/examples/nodes/fake_id.yml b/tests/integration/files/saltclass/examples/nodes/fake_id.yml new file mode 100644 index 00000000000..a87137e6fbe --- /dev/null +++ b/tests/integration/files/saltclass/examples/nodes/fake_id.yml @@ -0,0 +1,6 @@ +environment: base + +classes: +{% for class in ['default'] %} + - {{ class }} +{% endfor %} diff --git a/tests/integration/modules/test_nilrt_ip.py b/tests/integration/modules/test_nilrt_ip.py index 1412cffb2d2..5c2fbc0bfba 100644 --- a/tests/integration/modules/test_nilrt_ip.py +++ b/tests/integration/modules/test_nilrt_ip.py @@ -98,13 +98,13 @@ class Nilrt_ipModuleTest(ModuleCase): def test_static_all(self): interfaces = self.__interfaces() for interface in interfaces: - result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 my.dns.com']) + result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 8.8.8.8']) self.assertTrue(result) info = self.run_function('ip.get_interfaces_details') for interface in info['interfaces']: self.assertIn('8.8.4.4', interface['ipv4']['dns']) - self.assertIn('my.dns.com', interface['ipv4']['dns']) + self.assertIn('8.8.8.8', interface['ipv4']['dns']) self.assertEqual(interface['ipv4']['requestmode'], 'static') self.assertEqual(interface['ipv4']['address'], '192.168.10.4') self.assertEqual(interface['ipv4']['netmask'], '255.255.255.0') diff --git a/tests/support/gitfs.py b/tests/support/gitfs.py index 411bfd27ce1..72871476014 100644 --- a/tests/support/gitfs.py +++ b/tests/support/gitfs.py @@ -341,7 +341,8 @@ class GitPillarTestBase(GitTestBase, LoaderModuleMockMixin): with patch.dict(git_pillar.__opts__, ext_pillar_opts): return git_pillar.ext_pillar( 'minion', - ext_pillar_opts['ext_pillar'][0]['git'], + {}, + *ext_pillar_opts['ext_pillar'][0]['git'] ) def make_repo(self, root_dir, user='root'): diff --git a/tests/unit/beacons/test_status_beacon.py b/tests/unit/beacons/test_status_beacon.py index fca75763445..4ab3d83a779 100644 --- a/tests/unit/beacons/test_status_beacon.py +++ b/tests/unit/beacons/test_status_beacon.py @@ -12,6 +12,7 @@ # Python libs from __future__ import absolute_import +import sys # Salt libs import salt.config @@ -45,14 +46,32 @@ class StatusBeaconTestCase(TestCase, LoaderModuleMockMixin): def test_empty_config(self, *args, **kwargs): config = {} ret = status.beacon(config) - self.assertEqual(sorted(list(ret[0]['data'])), sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time'])) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time']) + + self.assertEqual(sorted(list(ret[0]['data'])), expected) def test_deprecated_dict_config(self): config = {'time': ['all']} ret = status.beacon(config) - self.assertEqual(list(ret[0]['data']), ['time']) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = ['time'] + + self.assertEqual(list(ret[0]['data']), expected) def test_list_config(self): config = [{'time': ['all']}] ret = status.beacon(config) - self.assertEqual(list(ret[0]['data']), ['time']) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = ['time'] + + self.assertEqual(list(ret[0]['data']), expected) diff --git a/tests/unit/daemons/test_masterapi.py b/tests/unit/daemons/test_masterapi.py index 29ea37ecd47..d2f59312279 100644 --- a/tests/unit/daemons/test_masterapi.py +++ b/tests/unit/daemons/test_masterapi.py @@ -63,7 +63,7 @@ class LocalFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.runner(load) self.assertDictEqual(mock_ret, ret) @@ -93,7 +93,7 @@ class LocalFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_runner_eauth_salt_invocation_errpr(self): + def test_runner_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -102,7 +102,7 @@ class LocalFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.runner(load) self.assertDictEqual(mock_ret, ret) @@ -146,7 +146,7 @@ class LocalFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.wheel(load) self.assertDictEqual(mock_ret, ret) @@ -176,7 +176,7 @@ class LocalFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_wheel_eauth_salt_invocation_errpr(self): + def test_wheel_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -185,7 +185,7 @@ class LocalFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.wheel(load) self.assertDictEqual(mock_ret, ret) diff --git a/tests/unit/modules/test_disk.py b/tests/unit/modules/test_disk.py index ef08b47fb73..fc4715b0d25 100644 --- a/tests/unit/modules/test_disk.py +++ b/tests/unit/modules/test_disk.py @@ -152,6 +152,8 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(disk.__salt__, {'cmd.retcode': mock}): self.assertEqual(disk.format_(device), True) + @skipIf(not salt.utils.which('lsblk') and not salt.utils.which('df'), + 'lsblk or df not found') def test_fstype(self): ''' unit tests for disk.fstype diff --git a/tests/unit/modules/test_environ.py b/tests/unit/modules/test_environ.py index e1724e35270..8cae25ef820 100644 --- a/tests/unit/modules/test_environ.py +++ b/tests/unit/modules/test_environ.py @@ -70,7 +70,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin): Set multiple salt process environment variables from a dict. Returns a dict. ''' - mock_environ = {'key': 'value'} + mock_environ = {'KEY': 'value'} with patch.dict(os.environ, mock_environ): self.assertFalse(environ.setenv('environ')) @@ -83,7 +83,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(os.environ, mock_environ): mock_setval = MagicMock(return_value=None) with patch.object(environ, 'setval', mock_setval): - self.assertEqual(environ.setenv({}, False, True, False)['key'], + self.assertEqual(environ.setenv({}, False, True, False)['KEY'], None) def test_get(self): diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index e4d74f12662..d5806679233 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -10,7 +10,7 @@ import textwrap # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.paths import TMP -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch # Import Salt libs @@ -92,45 +92,56 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): 'repl': 'baz=\\g', 'append_if_not_found': True, } - base = 'foo=1\nbar=2' - expected = '{base}\n{repl}\n'.format(base=base, **args) + base = os.linesep.join(['foo=1', 'bar=2']) + # File ending with a newline, no match - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write(base + '\n') + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes(base + os.linesep)) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.replace(tfile.name, **args) + expected = os.linesep.join([base, 'baz=\\g']) + os.linesep + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # File not ending with a newline, no match - with tempfile.NamedTemporaryFile('w+') as tfile: - tfile.write(base) + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes(base)) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.replace(tfile.name, **args) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # A newline should not be added in empty files - with tempfile.NamedTemporaryFile('w+') as tfile: - filemod.replace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), args['repl'] + '\n') + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + pass + filemod.replace(tfile.name, **args) + expected = args['repl'] + os.linesep + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # Using not_found_content, rather than repl - with tempfile.NamedTemporaryFile('w+') as tfile: - args['not_found_content'] = 'baz=3' - expected = '{base}\n{not_found_content}\n'.format(base=base, **args) - tfile.write(base) + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes(base)) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + args['not_found_content'] = 'baz=3' + expected = os.linesep.join([base, 'baz=3']) + os.linesep + filemod.replace(tfile.name, **args) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # not appending if matches - with tempfile.NamedTemporaryFile('w+') as tfile: - base = 'foo=1\n#baz=42\nbar=2\n' - expected = 'foo=1\nbaz=42\nbar=2\n' - tfile.write(base) + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + base = os.linesep.join(['foo=1', 'baz=42', 'bar=2']) + tfile.write(salt.utils.to_bytes(base)) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + expected = base + filemod.replace(tfile.name, **args) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) def test_backup(self): fext = '.bak' @@ -250,25 +261,26 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): del self.tfile def test_replace_multiline(self): - new_multiline_content = ( - "Who's that then?\nWell, how'd you become king," - "then?\nWe found them. I'm not a witch.\nWe shall" - "say 'Ni' again to you, if you do not appease us." - ) + new_multiline_content = os.linesep.join([ + "Who's that then?", + "Well, how'd you become king, then?", + "We found them. I'm not a witch.", + "We shall say 'Ni' again to you, if you do not appease us." + ]) filemod.blockreplace(self.tfile.name, '#-- START BLOCK 1', '#-- END BLOCK 1', new_multiline_content, backup=False) - with salt.utils.files.fopen(self.tfile.name, 'r') as fp: + with salt.utils.files.fopen(self.tfile.name, 'rb') as fp: filecontent = fp.read() - self.assertIn('#-- START BLOCK 1' - + "\n" + new_multiline_content - + "\n" - + '#-- END BLOCK 1', filecontent) - self.assertNotIn('old content part 1', filecontent) - self.assertNotIn('old content part 2', filecontent) + self.assertIn(salt.utils.to_bytes( + os.linesep.join([ + '#-- START BLOCK 1', new_multiline_content, '#-- END BLOCK 1'])), + filecontent) + self.assertNotIn(b'old content part 1', filecontent) + self.assertNotIn(b'old content part 2', filecontent) def test_replace_append(self): new_content = "Well, I didn't vote for you." @@ -295,10 +307,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): backup=False, append_if_not_found=True) - with salt.utils.files.fopen(self.tfile.name, 'r') as fp: - self.assertIn('#-- START BLOCK 2' - + "\n" + new_content - + '#-- END BLOCK 2', fp.read()) + with salt.utils.files.fopen(self.tfile.name, 'rb') as fp: + self.assertIn(salt.utils.to_bytes( + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)])), + fp.read()) def test_replace_append_newline_at_eof(self): ''' @@ -312,27 +326,33 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): 'content': 'baz', 'append_if_not_found': True, } - block = '{marker_start}\n{content}{marker_end}\n'.format(**args) - expected = base + '\n' + block + block = os.linesep.join(['#start', 'baz#stop']) + os.linesep # File ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write(base + '\n') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes(base + os.linesep)) tfile.flush() - filemod.blockreplace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.blockreplace(tfile.name, **args) + expected = os.linesep.join([base, block]) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # File not ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write(base) + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes(base)) tfile.flush() - filemod.blockreplace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.blockreplace(tfile.name, **args) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # A newline should not be added in empty files - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - filemod.blockreplace(tfile.name, **args) - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), block) + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + pass + filemod.blockreplace(tfile.name, **args) + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), block) + os.remove(tfile.name) def test_replace_prepend(self): new_content = "Well, I didn't vote for you." @@ -347,10 +367,11 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): prepend_if_not_found=False, backup=False ) - with salt.utils.files.fopen(self.tfile.name, 'r') as fp: - self.assertNotIn( - '#-- START BLOCK 2' + "\n" - + new_content + '#-- END BLOCK 2', + with salt.utils.files.fopen(self.tfile.name, 'rb') as fp: + self.assertNotIn(salt.utils.to_bytes( + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)])), fp.read()) filemod.blockreplace(self.tfile.name, @@ -359,12 +380,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): backup=False, prepend_if_not_found=True) - with salt.utils.files.fopen(self.tfile.name, 'r') as fp: + with salt.utils.files.fopen(self.tfile.name, 'rb') as fp: self.assertTrue( - fp.read().startswith( - '#-- START BLOCK 2' - + "\n" + new_content - + '#-- END BLOCK 2')) + fp.read().startswith(salt.utils.to_bytes( + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)])))) def test_replace_partial_marked_lines(self): filemod.blockreplace(self.tfile.name, @@ -481,6 +502,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): } } + @skipIf(salt.utils.is_windows(), 'SED is not available on Windows') def test_sed_limit_escaped(self): with tempfile.NamedTemporaryFile(mode='w+') as tfile: tfile.write(SED_CONTENT) @@ -505,127 +527,131 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): newlines at end of file. ''' # File ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write('foo\n') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes('foo' + os.linesep)) tfile.flush() - filemod.append(tfile.name, 'bar') - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'foo\nbar\n') + filemod.append(tfile.name, 'bar') + expected = os.linesep.join(['foo', 'bar']) + os.linesep + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + # File not ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write('foo') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes('foo')) tfile.flush() + filemod.append(tfile.name, 'bar') + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + + # A newline should be added in empty files + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: filemod.append(tfile.name, 'bar') - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'foo\nbar\n') - # A newline should not be added in empty files - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - filemod.append(tfile.name, 'bar') - with salt.utils.files.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'bar\n') + with salt.utils.files.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), 'bar' + os.linesep) def test_extract_hash(self): ''' Check various hash file formats. ''' # With file name - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write( + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes( 'rc.conf ef6e82e4006dee563d98ada2a2a80a27\n' 'ead48423703509d37c4a90e6a0d53e143b6fc268 example.tar.gz\n' 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106 ./subdir/example.tar.gz\n' 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b foo.tar.bz2\n' - ) + )) tfile.flush() - result = filemod.extract_hash(tfile.name, '', '/rc.conf') - self.assertEqual(result, { - 'hsum': 'ef6e82e4006dee563d98ada2a2a80a27', - 'hash_type': 'md5' - }) + result = filemod.extract_hash(tfile.name, '', '/rc.conf') + self.assertEqual(result, { + 'hsum': 'ef6e82e4006dee563d98ada2a2a80a27', + 'hash_type': 'md5' + }) - result = filemod.extract_hash(tfile.name, '', '/example.tar.gz') - self.assertEqual(result, { + result = filemod.extract_hash(tfile.name, '', '/example.tar.gz') + self.assertEqual(result, { + 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', + 'hash_type': 'sha1' + }) + + # All the checksums in this test file are sha1 sums. We run this + # loop three times. The first pass tests auto-detection of hash + # type by length of the hash. The second tests matching a specific + # type. The third tests a failed attempt to match a specific type, + # since sha256 was requested but sha1 is what is in the file. + for hash_type in ('', 'sha1', 'sha256'): + # Test the source_hash_name argument. Even though there are + # matches in the source_hash file for both the file_name and + # source params, they should be ignored in favor of the + # source_hash_name. + file_name = '/example.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = './subdir/example.tar.gz' + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { + 'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) + + # Test both a file_name and source but no source_hash_name. + # Even though there are matches for both file_name and + # source_hash_name, file_name should be preferred. + file_name = '/example.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = None + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', 'hash_type': 'sha1' - }) + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) - # All the checksums in this test file are sha1 sums. We run this - # loop three times. The first pass tests auto-detection of hash - # type by length of the hash. The second tests matching a specific - # type. The third tests a failed attempt to match a specific type, - # since sha256 was requested but sha1 is what is in the file. - for hash_type in ('', 'sha1', 'sha256'): - # Test the source_hash_name argument. Even though there are - # matches in the source_hash file for both the file_name and - # source params, they should be ignored in favor of the - # source_hash_name. - file_name = '/example.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = './subdir/example.tar.gz' - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) - - # Test both a file_name and source but no source_hash_name. - # Even though there are matches for both file_name and - # source_hash_name, file_name should be preferred. - file_name = '/example.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = None - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) - - # Test both a file_name and source but no source_hash_name. - # Since there is no match for the file_name, the source is - # matched. - file_name = '/somefile.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = None - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) + # Test both a file_name and source but no source_hash_name. + # Since there is no match for the file_name, the source is + # matched. + file_name = '/somefile.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = None + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { + 'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) # Hash only, no file name (Maven repo checksum format) # Since there is no name match, the first checksum in the file will # always be returned, never the second. - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write('ead48423703509d37c4a90e6a0d53e143b6fc268\n' - 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(salt.utils.to_bytes( + 'ead48423703509d37c4a90e6a0d53e143b6fc268\n' + 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n')) tfile.flush() - for hash_type in ('', 'sha1', 'sha256'): - result = filemod.extract_hash(tfile.name, hash_type, '/testfile') - expected = { - 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) + for hash_type in ('', 'sha1', 'sha256'): + result = filemod.extract_hash(tfile.name, hash_type, '/testfile') + expected = { + 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) def test_user_to_uid_int(self): ''' @@ -778,6 +804,7 @@ class FileBasicsTestCase(TestCase, LoaderModuleMockMixin): self.addCleanup(os.remove, self.myfile) self.addCleanup(delattr, self, 'myfile') + @skipIf(salt.utils.is_windows(), 'os.symlink is not available on Windows') def test_symlink_already_in_desired_state(self): os.symlink(self.tfile.name, self.directory + '/a_link') self.addCleanup(os.remove, self.directory + '/a_link') diff --git a/tests/unit/modules/test_hosts.py b/tests/unit/modules/test_hosts.py index 56f01f56ab2..7cd76994537 100644 --- a/tests/unit/modules/test_hosts.py +++ b/tests/unit/modules/test_hosts.py @@ -94,7 +94,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): Tests true if the alias is set ''' hosts_file = '/etc/hosts' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' with patch('salt.modules.hosts.__get_hosts_filename', @@ -198,7 +198,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): Tests if specified host entry gets added from the hosts file ''' hosts_file = '/etc/hosts' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' with patch('salt.utils.files.fopen', mock_open()), \ diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index e3d0ef73d8a..1e0e82d43c0 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -99,19 +99,20 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin): def test_delete_deployments(self): ''' - Tests deployment creation. + Tests deployment deletion :return: ''' with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: - with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): - mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") - mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( - **{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': 200}} - ) - self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200}) - self.assertTrue( - kubernetes.kubernetes.client.ExtensionsV1beta1Api(). - delete_namespaced_deployment().to_dict.called) + with patch('salt.modules.kubernetes.show_deployment', Mock(return_value=None)): + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': ''}} + ) + self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + delete_namespaced_deployment().to_dict.called) def test_create_deployments(self): ''' diff --git a/tests/unit/modules/test_poudriere.py b/tests/unit/modules/test_poudriere.py index 8b839aad152..b07d7b35431 100644 --- a/tests/unit/modules/test_poudriere.py +++ b/tests/unit/modules/test_poudriere.py @@ -50,10 +50,12 @@ class PoudriereTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it make jail ``jname`` pkgng aware. ''' - ret1 = 'Could not create or find required directory /tmp/salt' - ret2 = 'Looks like file /tmp/salt/salt-make.conf could not be created' - ret3 = {'changes': 'Created /tmp/salt/salt-make.conf'} - mock = MagicMock(return_value='/tmp/salt') + temp_dir = os.path.join('tmp', 'salt') + conf_file = os.path.join('tmp', 'salt', 'salt-make.conf') + ret1 = 'Could not create or find required directory {0}'.format(temp_dir) + ret2 = 'Looks like file {0} could not be created'.format(conf_file) + ret3 = {'changes': 'Created {0}'.format(conf_file)} + mock = MagicMock(return_value=temp_dir) mock_true = MagicMock(return_value=True) with patch.dict(poudriere.__salt__, {'config.option': mock, 'file.write': mock_true}): diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index 56669b900e8..ed043f27283 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -639,6 +639,14 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'mechanism': 'fake_mechanism', 'principal': 'fake_principal', 'domain': 'fake_domain'} + self.vcenter_details = {'vcenter': 'fake_vcenter', + 'username': 'fake_username', + 'password': 'fake_password', + 'protocol': 'fake_protocol', + 'port': 'fake_port', + 'mechanism': 'fake_mechanism', + 'principal': 'fake_principal', + 'domain': 'fake_domain'} def tearDown(self): for attrname in ('esxi_host_details', 'esxi_vcenter_details', @@ -693,6 +701,17 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'fake_protocol', 'fake_port', 'fake_mechanism', 'fake_principal', 'fake_domain'), ret) + def test_vcenter_proxy_details(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='vcenter')): + with patch.dict(vsphere.__salt__, + {'vcenter.get_details': MagicMock( + return_value=self.vcenter_details)}): + ret = vsphere._get_proxy_connection_details() + self.assertEqual(('fake_vcenter', 'fake_username', 'fake_password', + 'fake_protocol', 'fake_port', 'fake_mechanism', + 'fake_principal', 'fake_domain'), ret) + def test_unsupported_proxy_details(self): with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value='unsupported')): @@ -890,7 +909,7 @@ class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -933,7 +952,7 @@ class DisconnectTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -974,7 +993,7 @@ class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1049,7 +1068,7 @@ class ListDatacentersViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxcluster', 'esxdatacenter'] + supported_proxies = ['esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1127,7 +1146,7 @@ class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxdatacenter'] + supported_proxies = ['esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1339,12 +1358,15 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): def setUp(self): attrs = (('mock_si', MagicMock()), ('mock_dc', MagicMock()), - ('mock_cl', MagicMock())) + ('mock_cl', MagicMock()), + ('mock_root', MagicMock())) for attr, mock_obj in attrs: setattr(self, attr, mock_obj) self.addCleanup(delattr, self, attr) attrs = (('mock_get_datacenter', MagicMock(return_value=self.mock_dc)), - ('mock_get_cluster', MagicMock(return_value=self.mock_cl))) + ('mock_get_cluster', MagicMock(return_value=self.mock_cl)), + ('mock_get_root_folder', + MagicMock(return_value=self.mock_root))) for attr, mock_obj in attrs: setattr(self, attr, mock_obj) self.addCleanup(delattr, self, attr) @@ -1360,7 +1382,8 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): MagicMock(return_value=(None, None, None, None, None, None, None, None, 'datacenter'))), ('salt.utils.vmware.get_datacenter', self.mock_get_datacenter), - ('salt.utils.vmware.get_cluster', self.mock_get_cluster)) + ('salt.utils.vmware.get_cluster', self.mock_get_cluster), + ('salt.utils.vmware.get_root_folder', self.mock_get_root_folder)) for module, mock_obj in patches: patcher = patch(module, mock_obj) patcher.start() @@ -1409,3 +1432,10 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): MagicMock(return_value='esxdatacenter')): ret = vsphere._get_proxy_target(self.mock_si) self.assertEqual(ret, self.mock_dc) + + def test_vcenter_proxy_return(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='vcenter')): + ret = vsphere._get_proxy_target(self.mock_si) + self.mock_get_root_folder.assert_called_once_with(self.mock_si) + self.assertEqual(ret, self.mock_root) diff --git a/tests/unit/pillar/test_saltclass.py b/tests/unit/pillar/test_saltclass.py new file mode 100644 index 00000000000..30b63f8c548 --- /dev/null +++ b/tests/unit/pillar/test_saltclass.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import +import os + +# Import Salt Testing libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON + +# Import Salt Libs +import salt.pillar.saltclass as saltclass + + +base_path = os.path.dirname(os.path.realpath(__file__)) +fake_minion_id = 'fake_id' +fake_pillar = {} +fake_args = ({'path': '{0}/../../integration/files/saltclass/examples'.format(base_path)}) +fake_opts = {} +fake_salt = {} +fake_grains = {} + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltclassPillarTestCase(TestCase, LoaderModuleMockMixin): + ''' + Tests for salt.pillar.saltclass + ''' + def setup_loader_modules(self): + return {saltclass: {'__opts__': fake_opts, + '__salt__': fake_salt, + '__grains__': fake_grains + }} + + def _runner(self, expected_ret): + full_ret = saltclass.ext_pillar(fake_minion_id, fake_pillar, fake_args) + parsed_ret = full_ret['__saltclass__']['classes'] + self.assertListEqual(parsed_ret, expected_ret) + + def test_succeeds(self): + ret = ['default.users', 'default.motd', 'default'] + self._runner(ret) diff --git a/tests/unit/returners/test_local_cache.py b/tests/unit/returners/test_local_cache.py index 741957ffd87..aa7117efb5d 100644 --- a/tests/unit/returners/test_local_cache.py +++ b/tests/unit/returners/test_local_cache.py @@ -97,7 +97,7 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin): local_cache.clean_old_jobs() # Get the name of the JID directory that was created to test against - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): jid_dir_name = jid_dir.rpartition('\\')[2] else: jid_dir_name = jid_dir.rpartition('/')[2] diff --git a/tests/unit/serializers/test_serializers.py b/tests/unit/serializers/test_serializers.py index 4f4890e06e0..980405f8b81 100644 --- a/tests/unit/serializers/test_serializers.py +++ b/tests/unit/serializers/test_serializers.py @@ -18,6 +18,7 @@ import salt.serializers.yaml as yaml import salt.serializers.yamlex as yamlex import salt.serializers.msgpack as msgpack import salt.serializers.python as python +from salt.serializers.yaml import EncryptedString from salt.serializers import SerializationError from salt.utils.odict import OrderedDict @@ -43,10 +44,11 @@ class TestSerializers(TestCase): @skipIf(not yaml.available, SKIP_MESSAGE % 'yaml') def test_serialize_yaml(self): data = { - "foo": "bar" + "foo": "bar", + "encrypted_data": EncryptedString("foo") } serialized = yaml.serialize(data) - assert serialized == '{foo: bar}', serialized + assert serialized == '{encrypted_data: !encrypted foo, foo: bar}', serialized deserialized = yaml.deserialize(serialized) assert deserialized == data, deserialized diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py index c663d2c45ca..b12fcb6a93b 100644 --- a/tests/unit/test_master.py +++ b/tests/unit/test_master.py @@ -63,7 +63,7 @@ class ClearFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.runner(clear_load) self.assertDictEqual(mock_ret, ret) @@ -93,7 +93,7 @@ class ClearFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_runner_eauth_salt_invocation_errpr(self): + def test_runner_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -102,7 +102,7 @@ class ClearFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.runner(clear_load) self.assertDictEqual(mock_ret, ret) @@ -155,7 +155,7 @@ class ClearFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.wheel(clear_load) self.assertDictEqual(mock_ret, ret) @@ -185,7 +185,7 @@ class ClearFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_wheel_eauth_salt_invocation_errpr(self): + def test_wheel_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -194,7 +194,7 @@ class ClearFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.wheel(clear_load) self.assertDictEqual(mock_ret, ret) diff --git a/tests/unit/test_minion.py b/tests/unit/test_minion.py index e60e08edf30..b96d586ddd4 100644 --- a/tests/unit/test_minion.py +++ b/tests/unit/test_minion.py @@ -18,6 +18,7 @@ import salt.utils.event as event from salt.exceptions import SaltSystemExit import salt.syspaths import tornado +from salt.ext.six.moves import range __opts__ = {} @@ -69,7 +70,7 @@ class MinionTestCase(TestCase): mock_jid_queue = [123] try: minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop()) - ret = minion._handle_decoded_payload(mock_data) + ret = minion._handle_decoded_payload(mock_data).result() self.assertEqual(minion.jid_queue, mock_jid_queue) self.assertIsNone(ret) finally: @@ -98,7 +99,7 @@ class MinionTestCase(TestCase): # Call the _handle_decoded_payload function and update the mock_jid_queue to include the new # mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't # previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal. - minion._handle_decoded_payload(mock_data) + minion._handle_decoded_payload(mock_data).result() mock_jid_queue.append(mock_jid) self.assertEqual(minion.jid_queue, mock_jid_queue) finally: @@ -126,8 +127,54 @@ class MinionTestCase(TestCase): # Call the _handle_decoded_payload function and check that the queue is smaller by one item # and contains the new jid - minion._handle_decoded_payload(mock_data) + minion._handle_decoded_payload(mock_data).result() self.assertEqual(len(minion.jid_queue), 2) self.assertEqual(minion.jid_queue, [456, 789]) finally: minion.destroy() + + def test_process_count_max(self): + ''' + Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes, + as per process_count_max. + ''' + with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \ + patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \ + patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)), \ + patch('salt.utils.minion.running', MagicMock(return_value=[])), \ + patch('tornado.gen.sleep', MagicMock(return_value=tornado.concurrent.Future())): + process_count_max = 10 + mock_opts = salt.config.DEFAULT_MINION_OPTS + mock_opts['minion_jid_queue_hwm'] = 100 + mock_opts["process_count_max"] = process_count_max + + try: + io_loop = tornado.ioloop.IOLoop() + minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop) + + # mock gen.sleep to throw a special Exception when called, so that we detect it + class SleepCalledEception(Exception): + """Thrown when sleep is called""" + pass + tornado.gen.sleep.return_value.set_exception(SleepCalledEception()) + + # up until process_count_max: gen.sleep does not get called, processes are started normally + for i in range(process_count_max): + mock_data = {'fun': 'foo.bar', + 'jid': i} + io_loop.run_sync(lambda data=mock_data: minion._handle_decoded_payload(data)) + self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, i + 1) + self.assertEqual(len(minion.jid_queue), i + 1) + salt.utils.minion.running.return_value += [i] + + # above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started + mock_data = {'fun': 'foo.bar', + 'jid': process_count_max + 1} + + self.assertRaises(SleepCalledEception, + lambda: io_loop.run_sync(lambda: minion._handle_decoded_payload(mock_data))) + self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, + process_count_max) + self.assertEqual(len(minion.jid_queue), process_count_max + 1) + finally: + minion.destroy() diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py index 2c6243bbd85..c2706d72a34 100644 --- a/tests/unit/utils/test_dictdiffer.py +++ b/tests/unit/utils/test_dictdiffer.py @@ -49,7 +49,7 @@ class RecursiveDictDifferTestCase(TestCase): def test_changed_without_ignore_unset_values(self): self.recursive_diff.ignore_unset_values = False self.assertEqual(self.recursive_diff.changed(), - ['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i']) + ['a.c', 'a.e', 'a.f', 'a.g', 'h', 'i']) def test_unchanged(self): self.assertEqual(self.recursive_diff.unchanged(), @@ -89,7 +89,7 @@ class RecursiveDictDifferTestCase(TestCase): 'a:\n' ' c from 2 to 4\n' ' e from \'old_value\' to \'new_value\'\n' - ' g from nothing to \'new_key\'\n' ' f from \'old_key\' to nothing\n' + ' g from nothing to \'new_key\'\n' 'h from nothing to \'new_key\'\n' 'i from nothing to None') diff --git a/tests/unit/utils/test_listdiffer.py b/tests/unit/utils/test_listdiffer.py index ae8288c81c9..2df44278e3e 100644 --- a/tests/unit/utils/test_listdiffer.py +++ b/tests/unit/utils/test_listdiffer.py @@ -32,34 +32,43 @@ class ListDictDifferTestCase(TestCase): continue def test_added(self): - self.assertEqual(self.list_diff.added, - [{'key': 5, 'value': 'foo5', 'int_value': 105}]) + self.assertEqual(len(self.list_diff.added), 1) + self.assertDictEqual(self.list_diff.added[0], + {'key': 5, 'value': 'foo5', 'int_value': 105}) def test_removed(self): - self.assertEqual(self.list_diff.removed, - [{'key': 3, 'value': 'foo3', 'int_value': 103}]) + self.assertEqual(len(self.list_diff.removed), 1) + self.assertDictEqual(self.list_diff.removed[0], + {'key': 3, 'value': 'foo3', 'int_value': 103}) def test_diffs(self): - self.assertEqual(self.list_diff.diffs, - [{2: {'int_value': {'new': 112, 'old': 102}}}, - # Added items - {5: {'int_value': {'new': 105, 'old': NONE}, - 'key': {'new': 5, 'old': NONE}, - 'value': {'new': 'foo5', 'old': NONE}}}, - # Removed items - {3: {'int_value': {'new': NONE, 'old': 103}, - 'key': {'new': NONE, 'old': 3}, - 'value': {'new': NONE, 'old': 'foo3'}}}]) + self.assertEqual(len(self.list_diff.diffs), 3) + self.assertDictEqual(self.list_diff.diffs[0], + {2: {'int_value': {'new': 112, 'old': 102}}}) + self.assertDictEqual(self.list_diff.diffs[1], + # Added items + {5: {'int_value': {'new': 105, 'old': NONE}, + 'key': {'new': 5, 'old': NONE}, + 'value': {'new': 'foo5', 'old': NONE}}}) + self.assertDictEqual(self.list_diff.diffs[2], + # Removed items + {3: {'int_value': {'new': NONE, 'old': 103}, + 'key': {'new': NONE, 'old': 3}, + 'value': {'new': NONE, 'old': 'foo3'}}}) def test_new_values(self): - self.assertEqual(self.list_diff.new_values, - [{'key': 2, 'int_value': 112}, - {'key': 5, 'value': 'foo5', 'int_value': 105}]) + self.assertEqual(len(self.list_diff.new_values), 2) + self.assertDictEqual(self.list_diff.new_values[0], + {'key': 2, 'int_value': 112}) + self.assertDictEqual(self.list_diff.new_values[1], + {'key': 5, 'value': 'foo5', 'int_value': 105}) def test_old_values(self): - self.assertEqual(self.list_diff.old_values, - [{'key': 2, 'int_value': 102}, - {'key': 3, 'value': 'foo3', 'int_value': 103}]) + self.assertEqual(len(self.list_diff.old_values), 2) + self.assertDictEqual(self.list_diff.old_values[0], + {'key': 2, 'int_value': 102}) + self.assertDictEqual(self.list_diff.old_values[1], + {'key': 3, 'value': 'foo3', 'int_value': 103}) def test_changed_all(self): self.assertEqual(self.list_diff.changed(selection='all'), @@ -78,11 +87,3 @@ class ListDictDifferTestCase(TestCase): '\twill be removed\n' '\tidentified by key 5:\n' '\twill be added\n') - - def test_changes_str2(self): - self.assertEqual(self.list_diff.changes_str2, - ' key=2 (updated):\n' - ' int_value from 102 to 112\n' - ' key=3 (removed)\n' - ' key=5 (added): {\'int_value\': 105, \'key\': 5, ' - '\'value\': \'foo5\'}') diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 43488e894c0..ba4cc402d80 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -958,5 +958,47 @@ class SaltAPIParserTestCase(LogSettingsParserTests): self.addCleanup(delattr, self, 'parser') +@skipIf(NO_MOCK, NO_MOCK_REASON) +class DaemonMixInTestCase(TestCase): + ''' + Tests the PIDfile deletion in the DaemonMixIn. + ''' + + def setUp(self): + ''' + Setting up + ''' + # Set PID + self.pid = '/some/fake.pid' + + # Setup mixin + self.mixin = salt.utils.parsers.DaemonMixIn() + self.mixin.info = None + self.mixin.config = {} + self.mixin.config['pidfile'] = self.pid + + def test_pid_file_deletion(self): + ''' + PIDfile deletion without exception. + ''' + with patch('os.unlink', MagicMock()) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert self.mixin.info.call_count == 0 + assert os_unlink.call_count == 1 + + def test_pid_file_deletion_with_oserror(self): + ''' + PIDfile deletion with exception + ''' + with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert os_unlink.call_count == 1 + self.mixin.info.assert_called_with( + 'PIDfile could not be deleted: {0}'.format(self.pid)) + # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py new file mode 100644 index 00000000000..6c2be0f9b58 --- /dev/null +++ b/tests/unit/utils/test_pbm.py @@ -0,0 +1,664 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests functions in salt.utils.vsan +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ + PropertyMock + +# Import Salt libraries +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError +from salt.ext.six.moves import range +import salt.utils.pbm + +try: + from pyVmomi import vim, vmodl, pbm + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +# Get Logging Started +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetProfileManagerTestCase(TestCase): + '''Tests for salt.utils.pbm.get_profile_manager''' + def setUp(self): + self.mock_si = MagicMock() + self.mock_stub = MagicMock() + self.mock_prof_mgr = MagicMock() + self.mock_content = MagicMock() + self.mock_pbm_si = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_content)) + type(self.mock_content).profileManager = \ + PropertyMock(return_value=self.mock_prof_mgr) + patches = ( + ('salt.utils.vmware.get_new_service_instance_stub', + MagicMock(return_value=self.mock_stub)), + ('salt.utils.pbm.pbm.ServiceInstance', + MagicMock(return_value=self.mock_pbm_si))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_stub', 'mock_content', + 'mock_pbm_si', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_new_service_stub(self): + mock_get_new_service_stub = MagicMock() + with patch('salt.utils.vmware.get_new_service_instance_stub', + mock_get_new_service_stub): + salt.utils.pbm.get_profile_manager(self.mock_si) + mock_get_new_service_stub.assert_called_once_with( + self.mock_si, ns='pbm/2.0', path='/pbm/sdk') + + def test_pbm_si(self): + mock_get_pbm_si = MagicMock() + with patch('salt.utils.pbm.pbm.ServiceInstance', + mock_get_pbm_si): + salt.utils.pbm.get_profile_manager(self.mock_si) + mock_get_pbm_si.assert_called_once_with('ServiceInstance', + self.mock_stub) + + def test_return_profile_manager(self): + ret = salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(ret, self.mock_prof_mgr) + + def test_profile_manager_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_profile_manager_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_profile_manager_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetPlacementSolverTestCase(TestCase): + '''Tests for salt.utils.pbm.get_placement_solver''' + def setUp(self): + self.mock_si = MagicMock() + self.mock_stub = MagicMock() + self.mock_prof_mgr = MagicMock() + self.mock_content = MagicMock() + self.mock_pbm_si = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_content)) + type(self.mock_content).placementSolver = \ + PropertyMock(return_value=self.mock_prof_mgr) + patches = ( + ('salt.utils.vmware.get_new_service_instance_stub', + MagicMock(return_value=self.mock_stub)), + ('salt.utils.pbm.pbm.ServiceInstance', + MagicMock(return_value=self.mock_pbm_si))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_stub', 'mock_content', + 'mock_pbm_si', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_new_service_stub(self): + mock_get_new_service_stub = MagicMock() + with patch('salt.utils.vmware.get_new_service_instance_stub', + mock_get_new_service_stub): + salt.utils.pbm.get_placement_solver(self.mock_si) + mock_get_new_service_stub.assert_called_once_with( + self.mock_si, ns='pbm/2.0', path='/pbm/sdk') + + def test_pbm_si(self): + mock_get_pbm_si = MagicMock() + with patch('salt.utils.pbm.pbm.ServiceInstance', + mock_get_pbm_si): + salt.utils.pbm.get_placement_solver(self.mock_si) + mock_get_pbm_si.assert_called_once_with('ServiceInstance', + self.mock_stub) + + def test_return_profile_manager(self): + ret = salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(ret, self.mock_prof_mgr) + + def test_placement_solver_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_placement_solver_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_placement_solver_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetCapabilityDefinitionsTestCase(TestCase): + '''Tests for salt.utils.pbm.get_capability_definitions''' + def setUp(self): + self.mock_res_type = MagicMock() + self.mock_cap_cats = [MagicMock(capabilityMetadata=['fake_cap_meta1', + 'fake_cap_meta2']), + MagicMock(capabilityMetadata=['fake_cap_meta3'])] + self.mock_prof_mgr = MagicMock( + FetchCapabilityMetadata=MagicMock(return_value=self.mock_cap_cats)) + patches = ( + ('salt.utils.pbm.pbm.profile.ResourceType', + MagicMock(return_value=self.mock_res_type)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_res_type', 'mock_cap_cats', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_res_type(self): + mock_get_res_type = MagicMock() + with patch('salt.utils.pbm.pbm.profile.ResourceType', + mock_get_res_type): + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + mock_get_res_type.assert_called_once_with( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + + def test_fetch_capabilities(self): + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.mock_prof_mgr.FetchCapabilityMetadata.assert_called_once_with( + self.mock_res_type) + + def test_fetch_capabilities_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_fetch_capabilities_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_fetch_capabilities_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_return_cap_definitions(self): + ret = salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(ret, ['fake_cap_meta1', 'fake_cap_meta2', + 'fake_cap_meta3']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetPoliciesByIdTestCase(TestCase): + '''Tests for salt.utils.pbm.get_policies_by_id''' + def setUp(self): + self.policy_ids = MagicMock() + self.mock_policies = MagicMock() + self.mock_prof_mgr = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_policies)) + + def tearDown(self): + for attr in ('policy_ids', 'mock_policies', 'mock_prof_mgr'): + delattr(self, attr) + + def test_retrieve_policies(self): + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.mock_prof_mgr.RetrieveContent.assert_called_once_with( + self.policy_ids) + + def test_retrieve_policies_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_retrieve_policies_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_retrieve_policies_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_return_policies(self): + ret = salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(ret, self.mock_policies) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetStoragePoliciesTestCase(TestCase): + '''Tests for salt.utils.pbm.get_storage_policies''' + def setUp(self): + self.mock_res_type = MagicMock() + self.mock_policy_ids = MagicMock() + self.mock_prof_mgr = MagicMock( + QueryProfile=MagicMock(return_value=self.mock_policy_ids)) + # Policies + self.mock_policies = [] + for i in range(4): + mock_obj = MagicMock(resourceType=MagicMock( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE)) + mock_obj.name = 'fake_policy{0}'.format(i) + self.mock_policies.append(mock_obj) + patches = ( + ('salt.utils.pbm.pbm.profile.ResourceType', + MagicMock(return_value=self.mock_res_type)), + ('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=self.mock_policies))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_res_type', 'mock_policy_ids', 'mock_policies', + 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_res_type(self): + mock_get_res_type = MagicMock() + with patch('salt.utils.pbm.pbm.profile.ResourceType', + mock_get_res_type): + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_get_res_type.assert_called_once_with( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + + def test_retrieve_policy_ids(self): + mock_retrieve_policy_ids = MagicMock(return_value=self.mock_policy_ids) + self.mock_prof_mgr.QueryProfile = mock_retrieve_policy_ids + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_retrieve_policy_ids.assert_called_once_with(self.mock_res_type) + + def test_retrieve_policy_ids_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_retrieve_policy_ids_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_retrieve_policy_ids_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_get_policies_by_id(self): + mock_get_policies_by_id = MagicMock(return_value=self.mock_policies) + with patch('salt.utils.pbm.get_policies_by_id', + mock_get_policies_by_id): + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_get_policies_by_id.assert_called_once_with( + self.mock_prof_mgr, self.mock_policy_ids) + + def test_return_all_policies(self): + ret = salt.utils.pbm.get_storage_policies(self.mock_prof_mgr, + get_all_policies=True) + self.assertEqual(ret, self.mock_policies) + + def test_return_filtered_policies(self): + ret = salt.utils.pbm.get_storage_policies( + self.mock_prof_mgr, policy_names=['fake_policy1', 'fake_policy3']) + self.assertEqual(ret, [self.mock_policies[1], self.mock_policies[3]]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateStoragePolicyTestCase(TestCase): + '''Tests for salt.utils.pbm.create_storage_policy''' + def setUp(self): + self.mock_policy_spec = MagicMock() + self.mock_prof_mgr = MagicMock() + + def tearDown(self): + for attr in ('mock_policy_spec', 'mock_prof_mgr'): + delattr(self, attr) + + def test_create_policy(self): + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.mock_prof_mgr.Create.assert_called_once_with( + self.mock_policy_spec) + + def test_create_policy_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_policy_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_policy_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateStoragePolicyTestCase(TestCase): + '''Tests for salt.utils.pbm.update_storage_policy''' + def setUp(self): + self.mock_policy_spec = MagicMock() + self.mock_policy = MagicMock() + self.mock_prof_mgr = MagicMock() + + def tearDown(self): + for attr in ('mock_policy_spec', 'mock_policy', 'mock_prof_mgr'): + delattr(self, attr) + + def test_create_policy(self): + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.mock_prof_mgr.Update.assert_called_once_with( + self.mock_policy.profileId, self.mock_policy_spec) + + def test_create_policy_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_policy_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_policy_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDefaultStoragePolicyOfDatastoreTestCase(TestCase): + '''Tests for salt.utils.pbm.get_default_storage_policy_of_datastore''' + def setUp(self): + self.mock_ds = MagicMock(_moId='fake_ds_moid') + self.mock_hub = MagicMock() + self.mock_policy_id = 'fake_policy_id' + self.mock_prof_mgr = MagicMock( + QueryDefaultRequirementProfile=MagicMock( + return_value=self.mock_policy_id)) + self.mock_policy_refs = [MagicMock()] + patches = ( + ('salt.utils.pbm.pbm.placement.PlacementHub', + MagicMock(return_value=self.mock_hub)), + ('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=self.mock_policy_refs))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ds', 'mock_hub', 'mock_policy_id', 'mock_prof_mgr', + 'mock_policy_refs'): + delattr(self, attr) + + def test_get_placement_hub(self): + mock_get_placement_hub = MagicMock() + with patch('salt.utils.pbm.pbm.placement.PlacementHub', + mock_get_placement_hub): + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_get_placement_hub.assert_called_once_with( + hubId='fake_ds_moid', hubType='Datastore') + + def test_query_default_requirement_profile(self): + mock_query_prof = MagicMock(return_value=self.mock_policy_id) + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + mock_query_prof + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_query_prof.assert_called_once_with(self.mock_hub) + + def test_query_default_requirement_profile_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_query_default_requirement_profile_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_query_default_requirement_profile_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_get_policies_by_id(self): + mock_get_policies_by_id = MagicMock() + with patch('salt.utils.pbm.get_policies_by_id', + mock_get_policies_by_id): + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_get_policies_by_id.assert_called_once_with( + self.mock_prof_mgr, [self.mock_policy_id]) + + def test_no_policy_refs(self): + mock_get_policies_by_id = MagicMock() + with patch('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=None)): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Storage policy with id \'fake_policy_id\' was not ' + 'found') + + def test_return_policy_ref(self): + mock_get_policies_by_id = MagicMock() + ret = salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(ret, self.mock_policy_refs[0]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class AssignDefaultStoragePolicyToDatastoreTestCase(TestCase): + '''Tests for salt.utils.pbm.assign_default_storage_policy_to_datastore''' + def setUp(self): + self.mock_ds = MagicMock(_moId='fake_ds_moid') + self.mock_policy = MagicMock() + self.mock_hub = MagicMock() + self.mock_prof_mgr = MagicMock() + patches = ( + ('salt.utils.pbm.pbm.placement.PlacementHub', + MagicMock(return_value=self.mock_hub)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ds', 'mock_hub', 'mock_policy', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_placement_hub(self): + mock_get_placement_hub = MagicMock() + with patch('salt.utils.pbm.pbm.placement.PlacementHub', + mock_get_placement_hub): + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + mock_get_placement_hub.assert_called_once_with( + hubId='fake_ds_moid', hubType='Datastore') + + def test_assign_default_requirement_profile(self): + mock_assign_prof = MagicMock() + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + mock_assign_prof + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + mock_assign_prof.assert_called_once_with( + self.mock_policy.profileId, [self.mock_hub]) + + def test_assign_default_requirement_profile_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_assign_default_requirement_profile_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_assign_default_requirement_profile_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') diff --git a/tests/unit/utils/vmware/test_connection.py b/tests/unit/utils/vmware/test_connection.py index 4a95e9b67fc..d8afbb0504c 100644 --- a/tests/unit/utils/vmware/test_connection.py +++ b/tests/unit/utils/vmware/test_connection.py @@ -13,6 +13,7 @@ import ssl import sys # Import Salt testing libraries +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ PropertyMock @@ -852,6 +853,96 @@ class IsConnectionToAVCenterTestCase(TestCase): excinfo.exception.strerror) +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetNewServiceInstanceStub(TestCase, LoaderModuleMockMixin): + '''Tests for salt.utils.vmware.get_new_service_instance_stub''' + def setup_loader_modules(self): + return {salt.utils.vmware: { + '__virtual__': MagicMock(return_value='vmware'), + 'sys': MagicMock(), + 'ssl': MagicMock()}} + + def setUp(self): + self.mock_stub = MagicMock( + host='fake_host:1000', + cookie='ignore"fake_cookie') + self.mock_si = MagicMock( + _stub=self.mock_stub) + self.mock_ret = MagicMock() + self.mock_new_stub = MagicMock() + self.context_dict = {} + patches = (('salt.utils.vmware.VmomiSupport.GetRequestContext', + MagicMock( + return_value=self.context_dict)), + ('salt.utils.vmware.SoapStubAdapter', + MagicMock(return_value=self.mock_new_stub))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 9)) + self.mock_context = MagicMock() + self.mock_create_default_context = \ + MagicMock(return_value=self.mock_context) + salt.utils.vmware.ssl.create_default_context = \ + self.mock_create_default_context + + def tearDown(self): + for attr in ('mock_stub', 'mock_si', 'mock_ret', 'mock_new_stub', + 'context_dict', 'mock_context', + 'mock_create_default_context'): + delattr(self, attr) + + def test_ssl_default_context_loaded(self): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.mock_create_default_context.assert_called_once_with() + self.assertFalse(self.mock_context.check_hostname) + self.assertEqual(self.mock_context.verify_mode, + salt.utils.vmware.ssl.CERT_NONE) + + def test_ssl_default_context_not_loaded(self): + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 8)) + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.mock_create_default_context.call_count, 0) + + def test_session_cookie_in_context(self): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.context_dict['vcSessionCookie'], 'fake_cookie') + + def test_get_new_stub(self): + mock_get_new_stub = MagicMock() + with patch('salt.utils.vmware.SoapStubAdapter', mock_get_new_stub): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path', 'fake_ns', 'fake_version') + mock_get_new_stub.assert_called_once_with( + host='fake_host', ns='fake_ns', path='fake_path', + version='fake_version', poolSize=0, sslContext=self.mock_context) + + def test_get_new_stub_2_7_8_python(self): + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 8)) + mock_get_new_stub = MagicMock() + with patch('salt.utils.vmware.SoapStubAdapter', mock_get_new_stub): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path', 'fake_ns', 'fake_version') + mock_get_new_stub.assert_called_once_with( + host='fake_host', ns='fake_ns', path='fake_path', + version='fake_version', poolSize=0, sslContext=None) + + def test_new_stub_returned(self): + ret = salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.mock_new_stub.cookie, 'ignore"fake_cookie') + self.assertEqual(ret, self.mock_new_stub) + + @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') class GetServiceInstanceFromManagedObjectTestCase(TestCase): diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py new file mode 100644 index 00000000000..458e240e283 --- /dev/null +++ b/tests/unit/utils/vmware/test_dvs.py @@ -0,0 +1,784 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for dvs related functions in salt.utils.vmware +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call +from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ + ArgumentValueError, VMwareRuntimeError + +#i Import Salt libraries +import salt.utils.vmware as vmware +# Import Third Party Libs +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +class FakeTaskClass(object): + pass + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDvssTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock() + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'name': 'fake_dvs1'}, + {'object': MagicMock(), + 'name': 'fake_dvs2'}, + {'object': MagicMock(), + 'name': 'fake_dvs3'}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock()), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec', + 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_dvss(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvss(self.mock_dc_ref) + mock_traversal_spec.assert_has_calls( + [call(path='childEntity', skip=False, type=vim.Folder), + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec'])]) + + def test_get_mors_with_properties(self): + vmware.get_dvss(self.mock_dc_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualSwitch, + container_ref=self.mock_dc_ref, property_list=['name'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref) + self.assertEqual(ret, []) + + def test_get_all_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref, get_all_dvss=True) + self.assertEqual(ret, [i['object'] for i in self.mock_items]) + + def test_filtered_all_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref, + dvs_names=['fake_dvs1', 'fake_dvs3', 'no_dvs']) + self.assertEqual(ret, [self.mock_items[0]['object'], + self.mock_items[2]['object']]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetNetworkFolderTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock() + self.mock_traversal_spec = MagicMock() + self.mock_entries = [{'object': MagicMock(), + 'name': 'fake_netw_folder'}] + self.mock_get_mors = MagicMock(return_value=self.mock_entries) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dc')), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec)), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec', + 'mock_entries', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_network_folder(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_network_folder(self.mock_dc_ref) + mock_traversal_spec.assert_called_once_with( + path='networkFolder', skip=False, type=vim.Datacenter) + + def test_get_mors_with_properties(self): + vmware.get_network_folder(self.mock_dc_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.Folder, container_ref=self.mock_dc_ref, + property_list=['name'], traversal_spec=self.mock_traversal_spec) + + def test_get_no_network_folder(self): + with patch('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + vmware.get_network_folder(self.mock_dc_ref) + self.assertEqual(excinfo.exception.strerror, + 'Network folder in datacenter \'fake_dc\' wasn\'t ' + 'retrieved') + + def test_get_network_folder(self): + ret = vmware.get_network_folder(self.mock_dc_ref) + self.assertEqual(ret, self.mock_entries[0]['object']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateDvsTestCase(TestCase): + def setUp(self): + self.mock_dc_ref = MagicMock() + self.mock_dvs_create_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_netw_folder = \ + MagicMock(CreateDVS_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dc')), + ('salt.utils.vmware.get_network_folder', + MagicMock(return_value=self.mock_netw_folder)), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dc_ref', 'mock_dvs_create_spec', + 'mock_task', 'mock_netw_folder', 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_no_dvs_create_spec(self): + mock_spec = MagicMock(configSpec=None) + mock_config_spec = MagicMock() + mock_dvs_create_spec = MagicMock(return_value=mock_spec) + mock_vmware_dvs_config_spec = \ + MagicMock(return_value=mock_config_spec) + with patch('salt.utils.vmware.vim.DVSCreateSpec', + mock_dvs_create_spec): + with patch('salt.utils.vmware.vim.VMwareDVSConfigSpec', + mock_vmware_dvs_config_spec): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_dvs_create_spec.assert_called_once_with() + mock_vmware_dvs_config_spec.assert_called_once_with() + self.assertEqual(mock_spec.configSpec, mock_config_spec) + self.assertEqual(mock_config_spec.name, 'fake_dvs') + self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(mock_spec) + + def test_get_network_folder(self): + mock_get_network_folder = MagicMock() + with patch('salt.utils.vmware.get_network_folder', + mock_get_network_folder): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_get_network_folder.assert_called_once_with(self.mock_dc_ref) + + def test_create_dvs_task_passed_in_spec(self): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.mock_netw_folder.CreateDVS_Task.assert_called_once_with( + self.mock_dvs_create_spec) + + def test_create_dvs_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_dvs_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_dvs_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateDvsTestCase(TestCase): + def setUp(self): + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_dvs_ref = MagicMock( + ReconfigureDvs_Task=MagicMock(return_value=self.mock_task)) + self.mock_dvs_spec = MagicMock() + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dvs_ref', 'mock_task', 'mock_dvs_spec', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_reconfigure_dvs_task(self): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.mock_dvs_ref.ReconfigureDvs_Task.assert_called_once_with( + self.mock_dvs_spec) + + def test_reconfigure_dvs_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_reconfigure_dvs_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_reconfigure_dvs_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class SetDvsNetworkResourceManagementEnabledTestCase(TestCase): + def setUp(self): + self.mock_enabled = MagicMock() + self.mock_dvs_ref = MagicMock( + EnableNetworkResourceManagement=MagicMock()) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dvs_ref', 'mock_enabled'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_enable_network_resource_management(self): + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.mock_dvs_ref.EnableNetworkResourceManagement.assert_called_once_with( + enable=self.mock_enabled) + + def test_enable_network_resource_management_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_enable_network_resource_management_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + + def test_enable_network_resource_management_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDvportgroupsTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock(spec=vim.Datacenter) + self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch) + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'name': 'fake_pg1'}, + {'object': MagicMock(), + 'name': 'fake_pg2'}, + {'object': MagicMock(), + 'name': 'fake_pg3'}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock()), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_dvs_ref', + 'mock_traversal_spec', 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_unsupported_parrent(self): + with self.assertRaises(ArgumentValueError) as excinfo: + vmware.get_dvportgroups(MagicMock()) + self.assertEqual(excinfo.exception.strerror, + 'Parent has to be either a datacenter, or a ' + 'distributed virtual switch') + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_dvportgroups(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec_datacenter_parent(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvportgroups(self.mock_dc_ref) + mock_traversal_spec.assert_has_calls( + [call(path='childEntity', skip=False, type=vim.Folder), + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec'])]) + + def test_traversal_spec_dvs_parent(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvportgroups(self.mock_dvs_ref) + mock_traversal_spec.assert_called_once_with( + path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) + + def test_get_mors_with_properties(self): + vmware.get_dvportgroups(self.mock_dvs_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualPortgroup, + container_ref=self.mock_dvs_ref, property_list=['name'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_pgs(self): + ret = vmware.get_dvportgroups(self.mock_dvs_ref) + self.assertEqual(ret, []) + + def test_get_all_pgs(self): + ret = vmware.get_dvportgroups(self.mock_dvs_ref, + get_all_portgroups=True) + self.assertEqual(ret, [i['object'] for i in self.mock_items]) + + def test_filtered_pgs(self): + ret = vmware.get_dvss(self.mock_dc_ref, + dvs_names=['fake_pg1', 'fake_pg3', 'no_pg']) + self.assertEqual(ret, [self.mock_items[0]['object'], + self.mock_items[2]['object']]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetUplinkDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch) + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'tag': [MagicMock(key='fake_tag')]}, + {'object': MagicMock(), + 'tag': [MagicMock(key='SYSTEM/DVS.UPLINKPG')]}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dvs_ref', 'mock_traversal_spec', + 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + mock_traversal_spec.assert_called_once_with( + path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) + + def test_get_mors_with_properties(self): + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualPortgroup, + container_ref=self.mock_dvs_ref, property_list=['tag'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_uplink_pg(self): + with patch('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.assertEqual(excinfo.exception.strerror, + 'Uplink portgroup of DVS \'fake_dvs\' wasn\'t found') + + def test_get_uplink_pg(self): + ret = vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.assertEqual(ret, self.mock_items[1]['object']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_pg_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_dvs_ref = \ + MagicMock(CreateDVPortgroup_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_spec', 'mock_dvs_ref', 'mock_task', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_create_dvporgroup_task(self): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.mock_dvs_ref.CreateDVPortgroup_Task.assert_called_once_with( + self.mock_pg_spec) + + def test_create_dvporgroup_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_pg_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_pg_ref = \ + MagicMock(ReconfigureDVPortgroup_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_pg')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_spec', 'mock_pg_ref', 'mock_task', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref) + + def test_reconfigure_dvporgroup_task(self): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.mock_pg_ref.ReconfigureDVPortgroup_Task.assert_called_once_with( + self.mock_pg_spec) + + def test_reconfigure_dvporgroup_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_reconfigure_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_reconfigure_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_pg', + '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class RemoveDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_pg_ref = \ + MagicMock(Destroy_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_pg')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_ref', 'mock_task', 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.remove_dvportgroup(self.mock_pg_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref) + + def test_destroy_task(self): + vmware.remove_dvportgroup(self.mock_pg_ref) + self.mock_pg_ref.Destroy_Task.assert_called_once_with() + + def test_destroy_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_destroy_treconfigure_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_destroy_treconfigure_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.remove_dvportgroup(self.mock_pg_ref) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_pg', + '') diff --git a/tests/unit/utils/vmware/test_host.py b/tests/unit/utils/vmware/test_host.py index bd28c70f61c..0f6965fb7c2 100644 --- a/tests/unit/utils/vmware/test_host.py +++ b/tests/unit/utils/vmware/test_host.py @@ -14,6 +14,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock # Import Salt libraries +from salt.exceptions import ArgumentValueError import salt.utils.vmware # Import Third Party Libs try: @@ -46,14 +47,22 @@ class GetHostsTestCase(TestCase): self.mock_host1, self.mock_host2, self.mock_host3 = MagicMock(), \ MagicMock(), MagicMock() self.mock_prop_host1 = {'name': 'fake_hostname1', - 'object': self.mock_host1} + 'object': self.mock_host1} self.mock_prop_host2 = {'name': 'fake_hostname2', - 'object': self.mock_host2} + 'object': self.mock_host2} self.mock_prop_host3 = {'name': 'fake_hostname3', - 'object': self.mock_host3} + 'object': self.mock_host3} self.mock_prop_hosts = [self.mock_prop_host1, self.mock_prop_host2, self.mock_prop_host3] + def test_cluster_no_datacenter(self): + with self.assertRaises(ArgumentValueError) as excinfo: + salt.utils.vmware.get_hosts(self.mock_si, + cluster_name='fake_cluster') + self.assertEqual(excinfo.exception.strerror, + 'Must specify the datacenter when specifying the ' + 'cluster') + def test_get_si_no_datacenter_no_cluster(self): mock_get_mors = MagicMock() mock_get_root_folder = MagicMock(return_value=self.mock_root_folder) @@ -124,23 +133,20 @@ class GetHostsTestCase(TestCase): self.assertEqual(res, []) def test_filter_cluster(self): - cluster1 = vim.ClusterComputeResource('fake_good_cluster') - cluster2 = vim.ClusterComputeResource('fake_bad_cluster') - # Mock cluster1.name and cluster2.name - cluster1._stub = MagicMock(InvokeAccessor=MagicMock( - return_value='fake_good_cluster')) - cluster2._stub = MagicMock(InvokeAccessor=MagicMock( - return_value='fake_bad_cluster')) - self.mock_prop_host1['parent'] = cluster2 - self.mock_prop_host2['parent'] = cluster1 - self.mock_prop_host3['parent'] = cluster1 + self.mock_prop_host1['parent'] = vim.ClusterComputeResource('cluster') + self.mock_prop_host2['parent'] = vim.ClusterComputeResource('cluster') + self.mock_prop_host3['parent'] = vim.Datacenter('dc') + mock_get_cl_name = MagicMock( + side_effect=['fake_bad_cluster', 'fake_good_cluster']) with patch('salt.utils.vmware.get_mors_with_properties', MagicMock(return_value=self.mock_prop_hosts)): - res = salt.utils.vmware.get_hosts(self.mock_si, - datacenter_name='fake_datacenter', - cluster_name='fake_good_cluster', - get_all_hosts=True) - self.assertEqual(res, [self.mock_host2, self.mock_host3]) + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_cl_name): + res = salt.utils.vmware.get_hosts( + self.mock_si, datacenter_name='fake_datacenter', + cluster_name='fake_good_cluster', get_all_hosts=True) + self.assertEqual(mock_get_cl_name.call_count, 2) + self.assertEqual(res, [self.mock_host2]) def test_no_hosts(self): with patch('salt.utils.vmware.get_mors_with_properties', diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 43434225ae3..8f9a069149b 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -264,14 +264,14 @@ class GetDatastoresTestCase(TestCase): mock_reference, get_all_datastores=True) - mock_traversal_spec_init.assert_called([ + mock_traversal_spec_init.assert_has_calls([ + call(path='datastore', + skip=False, + type=vim.Datacenter), call(path='childEntity', selectSet=['traversal'], skip=False, - type=vim.Folder), - call(path='datastore', - skip=False, - type=vim.Datacenter)]) + type=vim.Folder)]) def test_unsupported_reference_type(self): class FakeClass(object): @@ -379,7 +379,7 @@ class RenameDatastoreTestCase(TestCase): with self.assertRaises(VMwareApiError) as excinfo: salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') - self.assertEqual(excinfo.exception.message, 'vim_fault') + self.assertEqual(excinfo.exception.strerror, 'vim_fault') def test_rename_datastore_raise_runtime_fault(self): exc = vmodl.RuntimeFault() @@ -388,7 +388,7 @@ class RenameDatastoreTestCase(TestCase): with self.assertRaises(VMwareRuntimeError) as excinfo: salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') - self.assertEqual(excinfo.exception.message, 'runtime_fault') + self.assertEqual(excinfo.exception.strerror, 'runtime_fault') def test_rename_datastore(self): salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name')