From 068afc369d1d13ae306d0d4e1e145d268e9175e4 Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Sun, 26 Feb 2017 17:18:24 +0100 Subject: [PATCH 001/348] Added the saltmod.parallel_runners state. This new state is intended for use with the orchestrate runner. It is used in a way very similar to saltmod.runner, except that it executes multiple runners in parallel. --- salt/states/saltmod.py | 174 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 0b3735fb281..bc1186d1d0d 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -26,6 +26,8 @@ from __future__ import absolute_import # Import python libs import fnmatch import logging +import sys +import threading import time # Import salt libs @@ -60,6 +62,47 @@ def _fire_args(tag_data): ) +def _parallel_map(func, inputs): + ''' + Applies a function to each element of a list, returning the resulting list. + + A separate thread is created for each element in the input list and the + passed function is called for each of the elements. When all threads have + finished execution a list with the results corresponding to the inputs is + returned. + + If one of the threads fails (because the function throws an exception), + that exception is reraised. If more than one thread fails, the exception + from the first thread (according to the index of the input element) is + reraised. + + func: + function that is applied on each input element. + inputs: + list of elements that shall be processed. The length of this list also + defines the number of threads created. + ''' + outputs = len(inputs) * [None] + errors = len(inputs) * [None] + def create_thread(index): + def run_thread(): + try: + outputs[index] = func(inputs[index]) + except: + errors[index] = sys.exc_info() + thread = threading.Thread(target=run_thread) + thread.start() + return thread + threads = list(six.moves.map(create_thread, six.moves.range(len(inputs)))) + for thread in threads: + thread.join() + for error in errors: + if error is not None: + exc_type, exc_value, exc_traceback = error + six.reraise(exc_type, exc_value, exc_traceback) + return outputs + + def state(name, tgt, ssh=False, @@ -689,6 +732,137 @@ def runner(name, **kwargs): return ret +def parallel_runners(name, runners): + ''' + Executes multiple runner modules on the master in parallel. + + .. versionadded:: 2017.x.0 (Nitrogen) + + A separate process is spawned for each runner. This state is intended to be + used with the orchestrate runner in place of the ``saltmod.runner`` state + when different tasks should be run in parallel. In general, Salt states are + not safe when used concurrently, so ensure that they are used in a safe way + (e.g. by only targeting separate minions in parallel tasks). + + name: + name identifying this state. The name is provided as part of the + output, but not used for anything else. + + runners: + list of runners that should be run in parallel. Each element of the + list has to be a dictionary. This dictionary's name entry stores the + name of the runner function that shall be invoked. The optional kwarg + entry stores a dictionary of named arguments that are passed to the + runner function. + + .. code-block:: yaml + + parallel-state: + saltext.parallel-runner: + - runners: + - name: state.orchestrate + kwarg: + mods: orchestrate_state_1 + - name: state.orcestrate + kwarg: + mods: orchestrate_state_2 + ''' + try: + jid = __orchestration_jid__ + except NameError: + log.debug( + 'Unable to fire args event due to missing __orchestration_jid__') + jid = None + + def call_runner(runner_config): + return __salt__['saltutil.runner'](runner_config['name'], + __orchestration_jid__=jid, + __env__=__env__, + full_return=True, + **(runner_config['kwarg'])) + + outputs = _parallel_map(call_runner, runners) + + success = six.moves.reduce( + lambda x, y: x and y, + [not ('success' in out and not out['success']) for out in outputs], + True) + + def find_new_and_old_in_changes(data, prefix): + if isinstance(data, dict) and data: + if 'new' in data and 'old' in data: + return [(prefix, {'new': data['new'], 'old': data['old']})] + else: + return [ + change_item + for key, value in six.iteritems(data) + for change_item in find_new_and_old_in_changes( + value, prefix + '[' + str(key) + ']') + ] + if isinstance(data, list) and list: + return [ + change_item + for index, value in six.moves.zip( + six.moves.range(len(data)), data) + for change_item in find_new_and_old_in_changes( + value, prefix + '[' + str(index) + ']') + ] + else: + return [] + def find_changes(data, prefix): + if isinstance(data, dict) and data: + if 'changes' in data: + return find_new_and_old_in_changes(data['changes'], prefix) + else: + return [ + change_item + for key, value in six.iteritems(data) + for change_item in find_changes( + value, prefix + '[' + str(key) + ']') + ] + else: + return [] + def find_changes_in_output(output, index): + try: + data = output['return']['data'] + except KeyError: + data = {} + return find_changes(data, '[' + str(index) + ']') + changes = dict([ + change_item + for change_items in six.moves.map( + find_changes_in_output, outputs, six.moves.range(len(outputs))) + for change_item in change_items + ]) + + def generate_comment(index, out): + runner_failed = 'success' in out and not out['success'] + runner_return = out.get('return') + comment = ( + 'Runner ' + str(index) + ' was ' + + ('not ' if runner_failed else '') + + 'successful and returned ' + + (str(runner_return) if runner_return else ' nothing') + '.') + return comment + comment = '\n'.join(six.moves.map(generate_comment, + six.moves.range(len(outputs)), + outputs)) + + ret = { + 'name': name, + 'result': success, + 'changes': changes, + 'comment': comment + } + + ret['__orchestration__'] = True + # The 'runner' function includes out['jid'] as '__jid__' in the returned + # dict, but we cannot do this here because we have more than one JID if + # we have more than one runner. + + return ret + + def wheel(name, **kwargs): ''' Execute a wheel module on the master From 1582e92e8eb0dee0e3502e9a134a448f47ec3353 Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Mon, 27 Feb 2017 21:29:22 +0100 Subject: [PATCH 002/348] Fixed the documentation (thread vs. process). The documentation erroneously used the word process in one place where thread would actually have been correct. This commit fixes this issue. --- salt/states/saltmod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index bc1186d1d0d..4dca00fb456 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -738,7 +738,7 @@ def parallel_runners(name, runners): .. versionadded:: 2017.x.0 (Nitrogen) - A separate process is spawned for each runner. This state is intended to be + A separate thread is spawned for each runner. This state is intended to be used with the orchestrate runner in place of the ``saltmod.runner`` state when different tasks should be run in parallel. In general, Salt states are not safe when used concurrently, so ensure that they are used in a safe way From c3b9035e41433fe212cbc6dfe781e07b75757823 Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Sun, 5 Mar 2017 21:11:34 +0100 Subject: [PATCH 003/348] Fix two typos in docs for salt.parallel_runners. --- salt/states/saltmod.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 4dca00fb456..ddbf8bcb24b 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -758,12 +758,12 @@ def parallel_runners(name, runners): .. code-block:: yaml parallel-state: - saltext.parallel-runner: + salt.parallel-runner: - runners: - name: state.orchestrate kwarg: mods: orchestrate_state_1 - - name: state.orcestrate + - name: state.orchestrate kwarg: mods: orchestrate_state_2 ''' From 7155bdf563a2d934c18803351f84cf0768fb5dbe Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Sun, 5 Mar 2017 22:30:10 +0100 Subject: [PATCH 004/348] Allow for a missing kwarg parameter. The code in saltmod.parallel_runners would fail if the (optional) kwarg argument was missing. This is fixed by using an empty dictionary for kwarg by default. --- salt/states/saltmod.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index ddbf8bcb24b..fffdd507d5b 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -779,7 +779,8 @@ def parallel_runners(name, runners): __orchestration_jid__=jid, __env__=__env__, full_return=True, - **(runner_config['kwarg'])) + **(runner_config.get(['kwarg'], + {}))) outputs = _parallel_map(call_runner, runners) From 536093b696fef1c9952268d78966202559235dac Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Thu, 9 Mar 2017 14:14:48 +0100 Subject: [PATCH 005/348] Fixed incorrect use of list as dict key. The name parameter in a call to dict.get(...) was accidentally wrapped in brackets, leading to a TypeError ("unhashable type: 'list'"). --- salt/states/saltmod.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index fffdd507d5b..2900382f83e 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -779,8 +779,7 @@ def parallel_runners(name, runners): __orchestration_jid__=jid, __env__=__env__, full_return=True, - **(runner_config.get(['kwarg'], - {}))) + **(runner_config.get('kwarg', {}))) outputs = _parallel_map(call_runner, runners) From 7804a95480384808a987af5bbb7f12ed782257c1 Mon Sep 17 00:00:00 2001 From: Sebastian Marsching Date: Wed, 15 Mar 2017 17:26:09 +0100 Subject: [PATCH 006/348] Improve configuration format and merging of outputs. The configuration format for specifying the list of runners has been changed so that it matches the format used in other places. The merging of outputs from the runners has been improved so that the outputs are correctly passed on regardless of the format used by the runner. --- salt/states/saltmod.py | 197 ++++++++++++++++++++++++++--------------- 1 file changed, 125 insertions(+), 72 deletions(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 2900382f83e..1f6794972bd 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -32,6 +32,8 @@ import time # Import salt libs import salt.syspaths +import salt.exceptions +import salt.output import salt.utils import salt.utils.event import salt.ext.six as six @@ -88,7 +90,7 @@ def _parallel_map(func, inputs): def run_thread(): try: outputs[index] = func(inputs[index]) - except: + except: # pylint: disable=bare-except errors[index] = sys.exc_info() thread = threading.Thread(target=run_thread) thread.start() @@ -758,15 +760,49 @@ def parallel_runners(name, runners): .. code-block:: yaml parallel-state: - salt.parallel-runner: + salt.parallel_runners: - runners: - - name: state.orchestrate - kwarg: - mods: orchestrate_state_1 - - name: state.orchestrate - kwarg: - mods: orchestrate_state_2 + my_runner_1: + - name: state.orchestrate + - kwarg: + mods: orchestrate_state_1 + my_runner_2: + - name: state.orchestrate + - kwarg: + mods: orchestrate_state_2 ''' + # For the sake of consistency, we treat a single string in the same way as + # a key without a value. This allows something like + # salt.parallel_runners: + # - runners: + # state.orchestrate + # Obviously, this will only work if the specified runner does not need any + # arguments. + if isinstance(runners, six.string_types): + runners = {runners: [{name: runners}]} + # If the runners argument is not a string, it must be a dict. Everything + # else is considered an error. + if not isinstance(runners, dict): + return { + 'name': name, + 'result': False, + 'changes': {}, + 'comment': 'The runners parameter must be a string or dict.' + } + # The configuration for each runner is given as a list of key-value pairs. + # This is not very useful for what we want to do, but it is the typical + # style used in Salt. For further processing, we convert each of these + # lists to a dict. This also makes it easier to check whether a name has + # been specified explicitly. + for runner_id, runner_config in six.iteritems(runners): + if runner_config is None: + runner_config = {} + else: + runner_config = salt.utils.repack_dictlist(runner_config) + if 'name' not in runner_config: + runner_config['name'] = runner_id + runners[runner_id] = runner_config + try: jid = __orchestration_jid__ except NameError: @@ -781,81 +817,98 @@ def parallel_runners(name, runners): full_return=True, **(runner_config.get('kwarg', {}))) - outputs = _parallel_map(call_runner, runners) + try: + outputs = _parallel_map(call_runner, list(six.itervalues(runners))) + except salt.exceptions.SaltException as exc: + return { + 'name': name, + 'result': False, + 'success': False, + 'changes': {}, + 'comment': 'One of the runners raised an exception: {0}'.format( + exc) + } + # We bundle the results of the runners with the IDs of the runners so that + # we can easily identify which output belongs to which runner. At the same + # time we exctract the actual return value of the runner (saltutil.runner + # adds some extra information that is not interesting to us). + outputs = { + runner_id: out['return']for runner_id, out in + six.moves.zip(six.iterkeys(runners), outputs) + } - success = six.moves.reduce( - lambda x, y: x and y, - [not ('success' in out and not out['success']) for out in outputs], - True) - - def find_new_and_old_in_changes(data, prefix): - if isinstance(data, dict) and data: - if 'new' in data and 'old' in data: - return [(prefix, {'new': data['new'], 'old': data['old']})] + # If each of the runners returned its output in the format compatible with + # the 'highstate' outputter, we can leverage this fact when merging the + # outputs. + highstate_output = all( + [out.get('outputter', '') == 'highstate' and 'data' in out for out in + six.itervalues(outputs)] + ) + # The following helper function is used to extract changes from highstate + # output. + def extract_changes(obj): + if not isinstance(obj, dict): + return {} + elif 'changes' in obj: + if (isinstance(obj['changes'], dict) + and obj['changes'].get('out', '') == 'highstate' + and 'ret' in obj['changes']): + return obj['changes']['ret'] else: - return [ - change_item - for key, value in six.iteritems(data) - for change_item in find_new_and_old_in_changes( - value, prefix + '[' + str(key) + ']') - ] - if isinstance(data, list) and list: - return [ - change_item - for index, value in six.moves.zip( - six.moves.range(len(data)), data) - for change_item in find_new_and_old_in_changes( - value, prefix + '[' + str(index) + ']') + return obj['changes'] + else: + found_changes = {} + for key, value in six.iteritems(obj): + change = extract_changes(value) + if change: + found_changes[key] = change + return found_changes + if highstate_output: + failed_runners = [runner_id for runner_id, out in + six.iteritems(outputs) if + out['data'].get('retcode', 0) != 0] + all_successful = not failed_runners + if all_successful: + comment = 'All runner functions executed successfully.' + else: + runner_comments = [ + 'Runner {0} failed with return value:\n{1}'.format( + runner_id, + salt.output.out_format(outputs[runner_id], + 'nested', + __opts__, + nested_indent=2) + ) for runner_id in failed_runners ] + comment = '\n'.join(runner_comments) + changes = {} + for runner_id, out in six.iteritems(outputs): + runner_changes = extract_changes(out['data']) + if runner_changes: + changes[runner_id] = runner_changes + else: + failed_runners = [runner_id for runner_id, out in + six.iteritems(outputs) if + out.get('exit_code', 0) != 0] + all_successful = not failed_runners + if all_successful: + comment = 'All runner functions executed successfully.' else: - return [] - def find_changes(data, prefix): - if isinstance(data, dict) and data: - if 'changes' in data: - return find_new_and_old_in_changes(data['changes'], prefix) + if len(failed_runners) == 1: + comment = 'Runner {0} failed.'.format(failed_runners[0]) else: - return [ - change_item - for key, value in six.iteritems(data) - for change_item in find_changes( - value, prefix + '[' + str(key) + ']') - ] - else: - return [] - def find_changes_in_output(output, index): - try: - data = output['return']['data'] - except KeyError: - data = {} - return find_changes(data, '[' + str(index) + ']') - changes = dict([ - change_item - for change_items in six.moves.map( - find_changes_in_output, outputs, six.moves.range(len(outputs))) - for change_item in change_items - ]) - - def generate_comment(index, out): - runner_failed = 'success' in out and not out['success'] - runner_return = out.get('return') - comment = ( - 'Runner ' + str(index) + ' was ' - + ('not ' if runner_failed else '') - + 'successful and returned ' - + (str(runner_return) if runner_return else ' nothing') + '.') - return comment - comment = '\n'.join(six.moves.map(generate_comment, - six.moves.range(len(outputs)), - outputs)) - + comment =\ + 'Runners {0} failed.'.format(', '.join(failed_runners)) + changes = {'ret': { + runner_id: out for runner_id, out in six.iteritems(outputs) + }} ret = { 'name': name, - 'result': success, + 'result': all_successful, 'changes': changes, 'comment': comment } - ret['__orchestration__'] = True # The 'runner' function includes out['jid'] as '__jid__' in the returned # dict, but we cannot do this here because we have more than one JID if # we have more than one runner. From 924d039ca48dfaadcfca012cee3452a89592ca74 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 21 Mar 2017 11:48:12 -0600 Subject: [PATCH 007/348] Newlines for lint compliance --- salt/states/saltmod.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 1f6794972bd..6ed9d2b6dca 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -86,6 +86,7 @@ def _parallel_map(func, inputs): ''' outputs = len(inputs) * [None] errors = len(inputs) * [None] + def create_thread(index): def run_thread(): try: @@ -844,8 +845,10 @@ def parallel_runners(name, runners): [out.get('outputter', '') == 'highstate' and 'data' in out for out in six.itervalues(outputs)] ) + # The following helper function is used to extract changes from highstate # output. + def extract_changes(obj): if not isinstance(obj, dict): return {} From 567e5ea2575710d2989362b89329d1b8bd21c15b Mon Sep 17 00:00:00 2001 From: "dnABic (Andreja Babic)" Date: Sun, 14 May 2017 15:10:45 +0200 Subject: [PATCH 008/348] added parameter apply_to for rabbitmq policy --- salt/modules/rabbitmq.py | 4 +++- salt/states/rabbitmq_policy.py | 7 +++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/modules/rabbitmq.py b/salt/modules/rabbitmq.py index b55f2dd1737..0a3d7985672 100644 --- a/salt/modules/rabbitmq.py +++ b/salt/modules/rabbitmq.py @@ -848,7 +848,7 @@ def list_policies(vhost="/", runas=None): return ret -def set_policy(vhost, name, pattern, definition, priority=None, runas=None): +def set_policy(vhost, name, pattern, definition, apply_to=None, priority=None, runas=None): ''' Set a policy based on rabbitmqctl set_policy. @@ -871,6 +871,8 @@ def set_policy(vhost, name, pattern, definition, priority=None, runas=None): cmd = [RABBITMQCTL, 'set_policy', '-p', vhost] if priority: cmd.extend(['--priority', priority]) + if apply_to: + cmd.extend(['--apply-to', apply_to]) cmd.extend([name, pattern, definition]) res = __salt__['cmd.run_all'](cmd, runas=runas, python_shell=False) log.debug('Set policy: {0}'.format(res['stdout'])) diff --git a/salt/states/rabbitmq_policy.py b/salt/states/rabbitmq_policy.py index 37c4c8ff778..16801abe253 100644 --- a/salt/states/rabbitmq_policy.py +++ b/salt/states/rabbitmq_policy.py @@ -36,6 +36,7 @@ def __virtual__(): def present(name, pattern, definition, + apply_to=None, priority=0, vhost='/', runas=None): @@ -52,6 +53,8 @@ def present(name, A json dict describing the policy priority Priority (defaults to 0) + apply_to + Apply policy to 'queues', 'exchanges' or 'all' (defailt to 'all') vhost Virtual host to apply to (defaults to '/') runas @@ -68,6 +71,8 @@ def present(name, updates.append('Pattern') if policy.get('definition') != definition: updates.append('Definition') + if apply_to and (policy.get('apply-to') != apply_to): + updates.append('Applyto') if int(policy.get('priority')) != priority: updates.append('Priority') @@ -85,6 +90,7 @@ def present(name, name, pattern, definition, + apply_to, priority=priority, runas=runas) elif updates: @@ -97,6 +103,7 @@ def present(name, name, pattern, definition, + apply_to, priority=priority, runas=runas) From eb73bac25fec711ae494cf855888d837fedc1a8e Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 22 Aug 2017 15:11:42 -0600 Subject: [PATCH 009/348] Switch order of apply_to and priority --- salt/modules/rabbitmq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/rabbitmq.py b/salt/modules/rabbitmq.py index 0a3d7985672..0a5cb82df89 100644 --- a/salt/modules/rabbitmq.py +++ b/salt/modules/rabbitmq.py @@ -848,7 +848,7 @@ def list_policies(vhost="/", runas=None): return ret -def set_policy(vhost, name, pattern, definition, apply_to=None, priority=None, runas=None): +def set_policy(vhost, name, pattern, definition, priority=None, apply_to=None, runas=None): ''' Set a policy based on rabbitmqctl set_policy. From 32d7d34fe599821297a0c5b9ef842b3dac9b808c Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 23 Aug 2017 21:31:28 +0200 Subject: [PATCH 010/348] First simple draft for the deletion verification --- salt/modules/kubernetes.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 2e17b114443..ba530db26b3 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -40,6 +40,7 @@ import base64 import logging import yaml import tempfile +from time import sleep from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems @@ -692,7 +693,12 @@ def delete_deployment(name, namespace='default', **kwargs): name=name, namespace=namespace, body=body) - return api_response.to_dict() + mutable_api_response = api_response.to_dict() + while show_deployment(name, namespace) is not None: + sleep(0.5) + else: + mutable_api_response['code'] = 200 + return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None From 767af9bb4fc2c1a5916612c75ff4abb274a9b268 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 28 Aug 2017 08:53:52 +0200 Subject: [PATCH 011/348] Added timeout for checking the deployment If the time limit is hit, the checking is aborted and we return with return-code None. --- salt/modules/kubernetes.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index ba530db26b3..a842c7ccf1c 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -40,7 +40,9 @@ import base64 import logging import yaml import tempfile +import signal from time import sleep +from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems @@ -68,6 +70,9 @@ log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' +_polling_time_limit = 20 + + def __virtual__(): ''' @@ -79,6 +84,21 @@ def __virtual__(): return False, 'python kubernetes library not found' +class TimeoutException(Exception): + pass + + +@contextmanager +def _time_limit(seconds): + def signal_handler(signum, frame): + raise TimeoutException, "Timed out!" + signal.signal(signal.SIGALRM, signal_handler) + signal.alarm(seconds) + try: + yield + finally: + signal.alarm(0) + # pylint: disable=no-member def _setup_conn(**kwargs): ''' @@ -694,10 +714,14 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - while show_deployment(name, namespace) is not None: - sleep(0.5) - else: - mutable_api_response['code'] = 200 + try: + with _time_limit(_polling_time_limit): + while show_deployment(name, namespace) is not None: + sleep(1) + else: + mutable_api_response['code'] = 200 + except TimeoutException: + pass return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: From 52b1cb814752e534c9faefbc3748484619f31a0f Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 28 Aug 2017 17:01:43 +0200 Subject: [PATCH 012/348] Compatibility with Python3.6 --- salt/modules/kubernetes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index a842c7ccf1c..39a4da473ec 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -91,7 +91,7 @@ class TimeoutException(Exception): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): - raise TimeoutException, "Timed out!" + raise(TimeoutException, "Timed out!") signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: @@ -99,6 +99,7 @@ def _time_limit(seconds): finally: signal.alarm(0) + # pylint: disable=no-member def _setup_conn(**kwargs): ''' From 3fe623778e9f7cce5a020b43e111627cc8c42e55 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Tue, 29 Aug 2017 09:25:04 +0200 Subject: [PATCH 013/348] Added Windows fallback Linux uses signal.alarm to just terminate the polling when a time limit is hit.For Windows are are just counting the loop cycles. --- salt/modules/kubernetes.py | 51 +++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 39a4da473ec..c089bab05d2 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -41,6 +41,7 @@ import logging import yaml import tempfile import signal +from sys import platform from time import sleep from contextlib import contextmanager @@ -70,9 +71,6 @@ log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' -_polling_time_limit = 20 - - def __virtual__(): ''' @@ -88,16 +86,19 @@ class TimeoutException(Exception): pass -@contextmanager -def _time_limit(seconds): - def signal_handler(signum, frame): - raise(TimeoutException, "Timed out!") - signal.signal(signal.SIGALRM, signal_handler) - signal.alarm(seconds) - try: - yield - finally: - signal.alarm(0) +if not platform.startswith("win"): + @contextmanager + def _time_limit(seconds): + def signal_handler(signum, frame): + raise(TimeoutException, "Timed out!") + signal.signal(signal.SIGALRM, signal_handler) + signal.alarm(seconds) + try: + yield + finally: + signal.alarm(0) + + _polling_time_limit = 30 # pylint: disable=no-member @@ -715,14 +716,24 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - try: - with _time_limit(_polling_time_limit): - while show_deployment(name, namespace) is not None: - sleep(1) - else: + if not platform.startswith("win"): + try: + with _time_limit(_polling_time_limit): + while show_deployment(name, namespace) is not None: + sleep(1) + else: + mutable_api_response['code'] = 200 + except TimeoutException: + pass + else: + # Windows has not signal.alarm implementation, so we are just falling + # back to loop-counting. + for i in range(60): + if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 - except TimeoutException: - pass + break + else: + sleep(1) return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: From 702a058c38ea570d8fc323fd32132088f814d16f Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Tue, 29 Aug 2017 11:33:37 +0200 Subject: [PATCH 014/348] Fixed linting * Python3 error for exception raising and * Disabled linting for while-loop since the loop is broken via timeout. --- salt/modules/kubernetes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index c089bab05d2..7e01242e499 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -90,7 +90,7 @@ if not platform.startswith("win"): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): - raise(TimeoutException, "Timed out!") + raise TimeoutException signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: @@ -719,7 +719,7 @@ def delete_deployment(name, namespace='default', **kwargs): if not platform.startswith("win"): try: with _time_limit(_polling_time_limit): - while show_deployment(name, namespace) is not None: + while show_deployment(name, namespace) is not None: # pylint: disable=useless-else-on-loop sleep(1) else: mutable_api_response['code'] = 200 From 56938d5bf28fd829f0d5dd03ac20efd04790c619 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 31 Aug 2017 16:50:22 +0300 Subject: [PATCH 015/348] Fix ldap token groups auth. --- salt/auth/__init__.py | 34 +++++++++++++++++++++------------- salt/auth/ldap.py | 4 ++-- salt/daemons/masterapi.py | 21 +++------------------ salt/master.py | 21 +++------------------ 4 files changed, 29 insertions(+), 51 deletions(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index f90488e153c..e39ecf83733 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -200,7 +200,7 @@ class LoadAuth(object): ''' if not self.authenticate_eauth(load): return {} - fstr = '{0}.auth'.format(load['eauth']) + hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) tok = str(hash_type(os.urandom(512)).hexdigest()) t_path = os.path.join(self.opts['token_dir'], tok) @@ -224,8 +224,9 @@ class LoadAuth(object): acl_ret = self.__get_acl(load) tdata['auth_list'] = acl_ret - if 'groups' in load: - tdata['groups'] = load['groups'] + groups = self.get_groups(load) + if groups: + tdata['groups'] = groups try: with salt.utils.files.set_umask(0o177): @@ -345,7 +346,7 @@ class LoadAuth(object): return False return True - def get_auth_list(self, load): + def get_auth_list(self, load, token=None): ''' Retrieve access list for the user specified in load. The list is built by eauth module or from master eauth configuration. @@ -353,30 +354,37 @@ class LoadAuth(object): list if the user has no rights to execute anything on this master and returns non-empty list if user is allowed to execute particular functions. ''' + # Get auth list from token + if token and self.opts['keep_acl_in_token'] and 'auth_list' in token: + return token['auth_list'] # Get acl from eauth module. auth_list = self.__get_acl(load) if auth_list is not None: return auth_list - if load['eauth'] not in self.opts['external_auth']: + eauth = token['eauth'] if token else load['eauth'] + if eauth not in self.opts['external_auth']: # No matching module is allowed in config log.warning('Authorization failure occurred.') return None - name = self.load_name(load) # The username we are attempting to auth with - groups = self.get_groups(load) # The groups this user belongs to - eauth_config = self.opts['external_auth'][load['eauth']] - if groups is None or groups is False: + if token: + name = token['name'] + groups = token['groups'] + else: + name = self.load_name(load) # The username we are attempting to auth with + groups = self.get_groups(load) # The groups this user belongs to + eauth_config = self.opts['external_auth'][eauth] + if not groups: groups = [] group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups # First we need to know if the user is allowed to proceed via any of their group memberships. group_auth_match = False for group_config in group_perm_keys: - group_config = group_config.rstrip('%') - for group in groups: - if group == group_config: - group_auth_match = True + if group_config.rstrip('%') in groups: + group_auth_match = True + break # If a group_auth_match is set it means only that we have a # user which matches at least one or more of the groups defined # in the configuration file. diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py index 396c1d00a2e..30654298152 100644 --- a/salt/auth/ldap.py +++ b/salt/auth/ldap.py @@ -306,7 +306,7 @@ def groups(username, **kwargs): ''' group_list = [] - bind = _bind(username, kwargs['password'], + bind = _bind(username, kwargs.get('password'), anonymous=_config('anonymous', mandatory=False)) if bind: log.debug('ldap bind to determine group membership succeeded!') @@ -371,7 +371,7 @@ def groups(username, **kwargs): search_results = bind.search_s(search_base, ldap.SCOPE_SUBTREE, search_string, - [_config('accountattributename'), 'cn']) + [_config('accountattributename'), 'cn', _config('groupattribute')]) for _, entry in search_results: if username in entry[_config('accountattributename')]: group_list.append(entry['cn'][0]) diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 9ca6c582fb9..d47a5c3aa64 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -1055,12 +1055,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) else: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1102,12 +1097,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) elif 'eauth' in load: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1217,12 +1207,7 @@ class LocalFuncs(object): return '' # Get acl from eauth module. - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( diff --git a/salt/master.py b/salt/master.py index 649a89a0722..b913aeb1e53 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1705,12 +1705,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.runner_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', @@ -1774,12 +1769,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.wheel_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', message=('Authentication failure of type "token" occurred for ' @@ -1900,12 +1890,7 @@ class ClearFuncs(object): return '' # Get acl - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( From f29f5b0cce79f7ef00c52fc7474f8724606030a6 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 31 Aug 2017 20:39:35 +0300 Subject: [PATCH 016/348] Fix for tests: don't require 'groups' in the eauth token. --- salt/auth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index e39ecf83733..73e4c98f8ae 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -370,7 +370,7 @@ class LoadAuth(object): if token: name = token['name'] - groups = token['groups'] + groups = token.get('groups') else: name = self.load_name(load) # The username we are attempting to auth with groups = self.get_groups(load) # The groups this user belongs to From 227a753454875a48836b86a097305bb1a285055e Mon Sep 17 00:00:00 2001 From: Levi Dahl Michelsen Date: Tue, 5 Sep 2017 13:21:22 +0200 Subject: [PATCH 017/348] Added RethinkDB external pillar module --- salt/pillar/rethinkdb_pillar.py | 160 ++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 salt/pillar/rethinkdb_pillar.py diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py new file mode 100644 index 00000000000..377192a6de5 --- /dev/null +++ b/salt/pillar/rethinkdb_pillar.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +''' +Provide external pillar data from RethinkDB + +:depends: rethinkdb (on the salt-master) + + +salt master rethinkdb configuration +=================================== +These variables must be configured in your master configuration file. + * ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'`` + * ``rethinkdb.port`` - The port the RethinkDB server listens on. + Defaults to ``'28015'`` + * ``rethinkdb.database`` - The database to connect to. + Defaults to ``'salt'`` + * ``rethinkdb.username`` - The username for connecting to RethinkDB. + Defaults to ``''`` + * ``rethinkdb.password`` - The password for connecting to RethinkDB. + Defaults to ``''`` + + +salt-master ext_pillar configuration +==================================== + +The ext_pillar function arguments are given in single line dictionary notation. + +.. code-block:: yaml + + ext_pillar: + - rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar} + +In the example above the following happens. + * The salt-master will look for external pillars in the 'ext_pillar' table + on the RethinkDB host + * The minion id will be matched against the 'minion_id' field + * Pillars will be retrieved from the nested field 'pillar_root' + * Found pillars will be merged inside a key called 'external_pillar' + + +Module Documentation +==================== +''' +from __future__ import absolute_import + +# Import python libraries +import logging + +# Import 3rd party libraries +try: + import rethinkdb as r + HAS_RETHINKDB = True +except ImportError: + HAS_RETHINKDB = False + +__virtualname__ = 'rethinkdb' + +__opts__ = { + 'rethinkdb.host': 'salt', + 'rethinkdb.port': '28015', + 'rethinkdb.database': 'salt', + 'rethinkdb.username': None, + 'rethinkdb.password': None +} + + +def __virtual__(): + if not HAS_RETHINKDB: + return False + return True + + +# Configure logging +log = logging.getLogger(__name__) + + +def ext_pillar(minion_id, + pillar, + table='pillar', + id_field=None, + field=None, + pillar_key=None): + ''' + Collect minion external pillars from a RethinkDB database + +Arguments: + * `table`: The RethinkDB table containing external pillar information. + Defaults to ``'pillar'`` + * `id_field`: Field in document containing the minion id. + If blank then we assume the table index matches minion ids + * `field`: Specific field in the document used for pillar data, if blank + then the entire document will be used + * `pillar_key`: The salt-master will nest found external pillars under + this key before merging into the minion pillars. If blank, external + pillars will be merged at top level + ''' + host = __opts__['rethinkdb.host'] + port = __opts__['rethinkdb.port'] + database = __opts__['rethinkdb.database'] + username = __opts__['rethinkdb.username'] + password = __opts__['rethinkdb.password'] + + log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar' + .format(host, port, username)) + + # Connect to the database + conn = r.connect(host=host, + port=port, + db=database, + user=username, + password=password) + + data = None + + try: + + if id_field: + log.debug('ext_pillar.rethinkdb: looking up pillar. ' + 'table: {0}, field: {1}, minion: {2}'.format( + table, id_field, minion_id)) + + if field: + data = r.table(table).filter( + {id_field: minion_id}).pluck(field).run(conn) + else: + data = r.table(table).filter({id_field: minion_id}).run(conn) + + else: + log.debug('ext_pillar.rethinkdb: looking up pillar. ' + 'table: {0}, field: id, minion: {1}'.format( + table, minion_id)) + + if field: + data = r.table(table).get(minion_id).pluck(field).run(conn) + else: + data = r.table(table).get(minion_id).run(conn) + + finally: + if conn.is_open(): + conn.close() + + if data.items: + + # Return nothing if multiple documents are found for a minion + if len(data.items) > 1: + log.error('ext_pillar.rethinkdb: ambiguous documents found for ' + 'minion {0}'.format(minion_id)) + return {} + + else: + for document in data: + result = document + + if pillar_key: + return {pillar_key: result} + return result + + else: + # No document found in the database + log.debug('ext_pillar.rethinkdb: no document found') + return {} From 09dfa1d8006f2e6cdedb0aebbd7d5abcaa6f9e96 Mon Sep 17 00:00:00 2001 From: Levi Dahl Michelsen Date: Tue, 5 Sep 2017 19:53:57 +0200 Subject: [PATCH 018/348] Replaced for loop on result cursor with data.items.pop() --- salt/pillar/rethinkdb_pillar.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index 377192a6de5..5156b23d500 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -147,8 +147,7 @@ Arguments: return {} else: - for document in data: - result = document + result = data.items.pop() if pillar_key: return {pillar_key: result} @@ -158,3 +157,4 @@ Arguments: # No document found in the database log.debug('ext_pillar.rethinkdb: no document found') return {} + From b3934c8431afec250ba502eb5eb1cad25fea09a0 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 5 Sep 2017 16:22:08 -0600 Subject: [PATCH 019/348] Remove trailing newlines --- salt/pillar/rethinkdb_pillar.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index 5156b23d500..0a4793205f6 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -157,4 +157,3 @@ Arguments: # No document found in the database log.debug('ext_pillar.rethinkdb: no document found') return {} - From 99fe1383254188417697fbea62648e1976edc488 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 6 Sep 2017 08:52:23 +0200 Subject: [PATCH 020/348] Code styling and added log message for timeout --- salt/modules/kubernetes.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 7e01242e499..dcc365a4713 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -98,7 +98,7 @@ if not platform.startswith("win"): finally: signal.alarm(0) - _polling_time_limit = 30 + POLLING_TIME_LIMIT = 30 # pylint: disable=no-member @@ -718,7 +718,7 @@ def delete_deployment(name, namespace='default', **kwargs): mutable_api_response = api_response.to_dict() if not platform.startswith("win"): try: - with _time_limit(_polling_time_limit): + with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: # pylint: disable=useless-else-on-loop sleep(1) else: @@ -734,6 +734,10 @@ def delete_deployment(name, namespace='default', **kwargs): break else: sleep(1) + if mutable_api_response['code'] != 200: + log.warning("Reached polling time limit. Deployment is not yet " + "deleted, but we are backing off. Sorry, but you'll " + "have to check manually.") return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: From daf4948b3d2fef2a9f58b1810464664aae3c9337 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 6 Sep 2017 10:16:51 +0200 Subject: [PATCH 021/348] Catching error when PIDfile cannot be deleted Usually the PIDfile is locate in /run. If Salt is not started with root permissions, it is not able to delete the PIDfile in /run. It should be safe to just log and ignore this error, since Salt overwrites the PIDfile on the next start. --- salt/utils/parsers.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index ac96bec4d6c..f05949f7d2b 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -882,7 +882,14 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): # We've loaded and merged options into the configuration, it's safe # to query about the pidfile if self.check_pidfile(): - os.unlink(self.config['pidfile']) + try: + os.unlink(self.config['pidfile']) + except OSError as err: + self.info( + 'PIDfile could not be deleted: {0}'.format( + self.config['pidfile'], traceback.format_exc(err) + ) + ) def set_pidfile(self): from salt.utils.process import set_pidfile From 6e3eb76c7953f252e2fdf0ab6105c564b79af0ef Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 6 Sep 2017 13:16:10 +0200 Subject: [PATCH 022/348] Removed unused format argument --- salt/utils/parsers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index f05949f7d2b..b89a0e45c7e 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -887,7 +887,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): except OSError as err: self.info( 'PIDfile could not be deleted: {0}'.format( - self.config['pidfile'], traceback.format_exc(err) + self.config['pidfile'] ) ) From 7b600e283297bd5fa473c79984ae3b97869fcd90 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 6 Sep 2017 17:03:29 +0200 Subject: [PATCH 023/348] Added pylint-disable statements and import for salt.ext.six.moves.range --- salt/modules/kubernetes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index dcc365a4713..ab238edc59d 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -49,6 +49,7 @@ from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems import salt.utils import salt.utils.templates +from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self @@ -719,9 +720,9 @@ def delete_deployment(name, namespace='default', **kwargs): if not platform.startswith("win"): try: with _time_limit(POLLING_TIME_LIMIT): - while show_deployment(name, namespace) is not None: # pylint: disable=useless-else-on-loop + while show_deployment(name, namespace) is not None: sleep(1) - else: + else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutException: pass From 842b07fd257d53046a6d968a74f66c7f015550c4 Mon Sep 17 00:00:00 2001 From: Paul Miller Date: Sun, 20 Aug 2017 09:06:39 -0400 Subject: [PATCH 024/348] Prevent spurious "Template does not exist" error This was merged previously (though slightly differently) in #39516 Took me a second to track it down and then realized that I fixed this in 2016.x --- salt/pillar/__init__.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index a62e11dc771..8d5eb7e998b 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -405,20 +405,19 @@ class Pillar(object): self.opts['pillarenv'], ', '.join(self.opts['file_roots']) ) else: - tops[self.opts['pillarenv']] = [ - compile_template( - self.client.cache_file( - self.opts['state_top'], - self.opts['pillarenv'] - ), - self.rend, - self.opts['renderer'], - self.opts['renderer_blacklist'], - self.opts['renderer_whitelist'], - self.opts['pillarenv'], - _pillar_rend=True, - ) - ] + top = self.client.cache_file(self.opts['state_top'], self.opts['pillarenv']) + if top: + tops[self.opts['pillarenv']] = [ + compile_template( + top, + self.rend, + self.opts['renderer'], + self.opts['renderer_blacklist'], + self.opts['renderer_whitelist'], + self.opts['pillarenv'], + _pillar_rend=True, + ) + ] else: for saltenv in self._get_envs(): if self.opts.get('pillar_source_merging_strategy', None) == "none": From 20619b24c458f56b8d690e912feed6474d63db8a Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Thu, 7 Sep 2017 10:20:46 +0200 Subject: [PATCH 025/348] Fixed test for delete_deployment Due to implementation change, we need to mock the return value. --- tests/unit/modules/test_kubernetes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index 1de939f6b00..46ac7601581 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -104,9 +104,9 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( - **{"delete_namespaced_deployment.return_value.to_dict.return_value": {}} + **{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': 200}} ) - self.assertEqual(kubernetes.delete_deployment("test"), {}) + self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200}) self.assertTrue( kubernetes.kubernetes.client.ExtensionsV1beta1Api(). delete_namespaced_deployment().to_dict.called) From ac3386fae6c070bb05f58df750f53343f4f0c316 Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Thu, 7 Sep 2017 14:11:32 +0300 Subject: [PATCH 026/348] handle cases where a vm doesn't have "base_template_name" attribute for example, when a VM was imported from another XEN cluster etc' modified: salt/cloud/clouds/xen.py --- salt/cloud/clouds/xen.py | 59 +++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index d1caaab282b..87f5175aa45 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -129,6 +129,9 @@ def get_configured_provider(): def _get_session(): ''' Get a connection to the XenServer host + note: a session can be opened only to the pool master + if the a connection attempmt is made to a non pool master machine + an exception will be raised ''' api_version = '1.0' originator = 'salt_cloud_{}_driver'.format(__virtualname__) @@ -157,13 +160,29 @@ def _get_session(): default=False, search_global=False ) - session = XenAPI.Session(url, ignore_ssl=ignore_ssl) - log.debug('url: {} user: {} password: {}, originator: {}'.format( - url, - user, - 'XXX-pw-redacted-XXX', - originator)) - session.xenapi.login_with_password(user, password, api_version, originator) + try: + session = XenAPI.Session(url, ignore_ssl=ignore_ssl) + log.debug('url: {} user: {} password: {}, originator: {}'.format( + url, + user, + 'XXX-pw-redacted-XXX', + originator)) + session.xenapi.login_with_password(user, password, api_version, originator) + except XenAPI.Failure as ex: + ''' + if the server on the url is not the pool master, + the pool master's address will be rturned in the exception message + ''' + pool_master_addr = str(ex.__dict__['details'][1]) + slash_parts = url.split('/') + new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr + session = XenAPI.Session(new_url, ignore_ssl=ignore_ssl) + log.debug('url: {} user: {} password: {}, originator: {}'.format( + url, + user, + 'XXX-pw-redacted-XXX', + originator)) + session.xenapi.login_with_password(user, password, api_version, originator) return session @@ -182,9 +201,15 @@ def list_nodes(): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + try: + base_template_name = record['other_config']['base_template_name'] + except Exception as KeyError: + base_template_name = None + log.debug( + 'VM returned no base template name: {}'.format(name)) ret[record['name_label']] = { 'id': record['uuid'], - 'image': record['other_config']['base_template_name'], + 'image': base_template_name, 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], @@ -296,10 +321,17 @@ def list_nodes_full(session=None): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + # catch cases where vm doesn't have a base template value + try: + base_template_name = record['other_config']['base_template_name'] + except Exception as KeyError: + base_template_name = None + log.debug( + 'VM returned no base template name: {}'.format(name)) vm_cfg = session.xenapi.VM.get_record(vm) vm_cfg['id'] = record['uuid'] vm_cfg['name'] = record['name_label'] - vm_cfg['image'] = record['other_config']['base_template_name'] + vm_cfg['image'] = base_template_name vm_cfg['size'] = None vm_cfg['state'] = record['power_state'] vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session) @@ -455,8 +487,15 @@ def show_instance(name, session=None, call=None): vm = _get_vm(name, session=session) record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + # catch cases where the VM doesn't have 'base_template_name' attribute + try: + base_template_name = record['other_config']['base_template_name'] + log.debug( + 'VM returned no base template name: {}'.format(name)) + except Exception as KeyError: + base_template_name = None ret = {'id': record['uuid'], - 'image': record['other_config']['base_template_name'], + 'image': base_template_name, 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], From 0c71da95f67197bd339f5179e206813c49bef06a Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Thu, 7 Sep 2017 15:47:13 +0200 Subject: [PATCH 027/348] Using salt method to identify MS Windows, single instead of double quotes --- salt/modules/kubernetes.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index ab238edc59d..f26630edc7f 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -87,7 +87,7 @@ class TimeoutException(Exception): pass -if not platform.startswith("win"): +if salt.utils.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): @@ -717,7 +717,7 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - if not platform.startswith("win"): + if salt.utils.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: @@ -736,9 +736,9 @@ def delete_deployment(name, namespace='default', **kwargs): else: sleep(1) if mutable_api_response['code'] != 200: - log.warning("Reached polling time limit. Deployment is not yet " - "deleted, but we are backing off. Sorry, but you'll " - "have to check manually.") + log.warning('Reached polling time limit. Deployment is not yet ' + 'deleted, but we are backing off. Sorry, but you\'ll ' + 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: From 7431ec64e3d8dbc1dd524300c0f4bcaebd88fdf8 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Fri, 8 Sep 2017 08:20:14 +0200 Subject: [PATCH 028/348] Removed unused sys import --- salt/modules/kubernetes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index f26630edc7f..aa06645660d 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -41,7 +41,6 @@ import logging import yaml import tempfile import signal -from sys import platform from time import sleep from contextlib import contextmanager From 7ead7fc48abd7d32ad3591bc109050954460e022 Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Sun, 10 Sep 2017 13:10:37 +0300 Subject: [PATCH 029/348] * if trying to connect to a XEN server which is not the pool master, the module will now switch the connection to the pool master (pool master info is returned as part of the exception raised by the XENapi when trying to get a session from a pool member) * handle a case wares a VM doesn't have a base image attribute this is the case with default system templates and imported VMs --- salt/cloud/clouds/xen.py | 49 ++++++++-------------------------------- 1 file changed, 9 insertions(+), 40 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 87f5175aa45..49a0202c70d 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -129,9 +129,6 @@ def get_configured_provider(): def _get_session(): ''' Get a connection to the XenServer host - note: a session can be opened only to the pool master - if the a connection attempmt is made to a non pool master machine - an exception will be raised ''' api_version = '1.0' originator = 'salt_cloud_{}_driver'.format(__virtualname__) @@ -153,15 +150,8 @@ def _get_session(): __opts__, search_global=False ) - ignore_ssl = config.get_cloud_config_value( - 'ignore_ssl', - get_configured_provider(), - __opts__, - default=False, - search_global=False - ) try: - session = XenAPI.Session(url, ignore_ssl=ignore_ssl) + session = XenAPI.Session(url) log.debug('url: {} user: {} password: {}, originator: {}'.format( url, user, @@ -170,19 +160,18 @@ def _get_session(): session.xenapi.login_with_password(user, password, api_version, originator) except XenAPI.Failure as ex: ''' - if the server on the url is not the pool master, - the pool master's address will be rturned in the exception message + if the server on the url is not the pool master, the pool master's address will be rturned in the exception message ''' pool_master_addr = str(ex.__dict__['details'][1]) slash_parts = url.split('/') new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr - session = XenAPI.Session(new_url, ignore_ssl=ignore_ssl) - log.debug('url: {} user: {} password: {}, originator: {}'.format( - url, + session = XenAPI.Session(new_url) + log.debug('session is -> url: {} user: {} password: {}, originator:{}'.format( + new_url, user, 'XXX-pw-redacted-XXX', originator)) - session.xenapi.login_with_password(user, password, api_version, originator) + session.xenapi.login_with_password(user,password,api_version,originator) return session @@ -201,15 +190,9 @@ def list_nodes(): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: - try: - base_template_name = record['other_config']['base_template_name'] - except Exception as KeyError: - base_template_name = None - log.debug( - 'VM returned no base template name: {}'.format(name)) ret[record['name_label']] = { 'id': record['uuid'], - 'image': base_template_name, + 'image': record['other_config']['base_template_name'], 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], @@ -321,17 +304,10 @@ def list_nodes_full(session=None): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: - # catch cases where vm doesn't have a base template value - try: - base_template_name = record['other_config']['base_template_name'] - except Exception as KeyError: - base_template_name = None - log.debug( - 'VM returned no base template name: {}'.format(name)) vm_cfg = session.xenapi.VM.get_record(vm) vm_cfg['id'] = record['uuid'] vm_cfg['name'] = record['name_label'] - vm_cfg['image'] = base_template_name + vm_cfg['image'] = record['other_config']['base_template_name'] vm_cfg['size'] = None vm_cfg['state'] = record['power_state'] vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session) @@ -487,15 +463,8 @@ def show_instance(name, session=None, call=None): vm = _get_vm(name, session=session) record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: - # catch cases where the VM doesn't have 'base_template_name' attribute - try: - base_template_name = record['other_config']['base_template_name'] - log.debug( - 'VM returned no base template name: {}'.format(name)) - except Exception as KeyError: - base_template_name = None ret = {'id': record['uuid'], - 'image': base_template_name, + 'image': record['other_config']['base_template_name'], 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], From c471a29527551e7b3fccabd47ce4e98f1e050e80 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 8 Sep 2017 14:10:46 -0600 Subject: [PATCH 030/348] make cache dirs when spm starts --- salt/cli/spm.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/cli/spm.py b/salt/cli/spm.py index 3d347c80a8d..303e5ce65f4 100644 --- a/salt/cli/spm.py +++ b/salt/cli/spm.py @@ -14,7 +14,7 @@ from __future__ import absolute_import # Import Salt libs import salt.spm import salt.utils.parsers as parsers -from salt.utils.verify import verify_log +from salt.utils.verify import verify_log, verify_env class SPM(parsers.SPMParser): @@ -29,6 +29,10 @@ class SPM(parsers.SPMParser): ui = salt.spm.SPMCmdlineInterface() self.parse_args() self.setup_logfile_logger() + v_dirs = [ + self.config['cachedir'], + ] + verify_env(v_dirs, self.config['user'],) verify_log(self.config) client = salt.spm.SPMClient(ui, self.config) client.run(self.args) From 68f529ee5ea891db9e8c7d32791710c9160a04aa Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 11 Sep 2017 10:58:16 -0400 Subject: [PATCH 031/348] Add 2016.11.8 release notes --- doc/topics/releases/2016.11.8.rst | 1719 +++++++++++++++++++++++++++++ 1 file changed, 1719 insertions(+) create mode 100644 doc/topics/releases/2016.11.8.rst diff --git a/doc/topics/releases/2016.11.8.rst b/doc/topics/releases/2016.11.8.rst new file mode 100644 index 00000000000..9f4eb68dab4 --- /dev/null +++ b/doc/topics/releases/2016.11.8.rst @@ -0,0 +1,1719 @@ +============================ +Salt 2016.11.8 Release Notes +============================ + +Version 2016.11.8 is a bugfix release for :ref:`2016.11.0 `.] + +Changes for v2016.11.7..v2016.11.8 +---------------------------------- + +Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): + +*Generated at: 2017-09-11T14:52:27Z* + +Statistics: + +- Total Merges: **169** +- Total Issue references: **70** +- Total PR references: **206** + +Changes: + + +- **PR** `#43271`_: (*twangboy*) Fix minor formatting issue + @ *2017-08-30T18:35:12Z* + + * cf21f91 Merge pull request `#43271`_ from twangboy/win_fix_pkg.install + * 91b062f Fix formatting issue, spaces surrounding + + +- **PR** `#43228`_: (*twangboy*) Win fix pkg.install + @ *2017-08-30T14:26:21Z* + + * 3a0b02f Merge pull request `#43228`_ from twangboy/win_fix_pkg.install + * 13dfabb Fix regex statement, add `.` + + * 31ff69f Add underscore to regex search + + * 3cf2b65 Fix spelling + + * ed030a3 Use regex to detect salt-minion install + + * e5daff4 Fix pkg.install + +- **PR** `#43191`_: (*viktorkrivak*) Fix apache.config with multiple statement + @ *2017-08-28T18:13:44Z* + + * b4c689d Merge pull request `#43191`_ from viktorkrivak/fix-apache-config-multi-entity + * c15bcbe Merge remote-tracking branch 'upstream/2016.11' into fix-apache-config-multi-entity + + * 4164047 Fix apache.config with multiple statement At this moment when you post more than one statement in config only last is used. Also file is rewrited multiple times until last statement is written. Example: salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '8080'}, {'Proxy': "Something"}]" Ends only with Proxy Something and ignore Listen 8080, This patch fix this issue. + +- **PR** `#43154`_: (*lomeroe*) Backport `#43116`_ to 2016.11 + @ *2017-08-28T16:40:41Z* + + - **ISSUE** `#42279`_: (*dafyddj*) win_lgpo matches multiple policies due to startswith() + | refs: `#43116`_ `#43116`_ `#43154`_ + - **PR** `#43116`_: (*lomeroe*) Fix 42279 in develop + | refs: `#43154`_ + * b90e59e Merge pull request `#43154`_ from lomeroe/`bp-43116`_-2016.11 + * 8f593b0 verify that files exist before trying to remove them, win_file.remove raises an exception if the file does not exist + + * 33a30ba correcting bad format statement in search for policy to be disabled + + * acc3d7a correct fopen calls from salt.utils for 2016.11's utils function + + * 2da1cdd lint fix + + * 61bd12c track xml namespace to ensure policies w/duplicate IDs or Names do not conflict + + * f232bed add additional checks for ADM policies that have the same ADMX policy ID (`#42279`_) + +- **PR** `#43202`_: (*garethgreenaway*) Reverting previous augeas module changes + @ *2017-08-28T13:14:27Z* + + - **ISSUE** `#42642`_: (*githubcdr*) state.augeas + | refs: `#42669`_ `#43202`_ + * 5308c27 Merge pull request `#43202`_ from garethgreenaway/42642_2016_11_augeas_module_revert_fix + * ef7e93e Reverting this change due to it breaking other uses. + +- **PR** `#43103`_: (*aogier*) genesis.bootstrap deboostrap fix + @ *2017-08-25T20:48:23Z* + + - **ISSUE** `#43101`_: (*aogier*) genesis.bootstrap fails if no pkg AND exclude_pkgs (which can't be a string) + | refs: `#43103`_ + * f16b724 Merge pull request `#43103`_ from aogier/43101-genesis-bootstrap + * db94f3b better formatting + + * e5cc667 tests: fix a leftover and simplify some parts + + * 13e5997 lint + + * 216ced6 allow comma-separated pkgs lists, quote args, test deb behaviour + + * d8612ae fix debootstrap and enhance packages selection/deletion via cmdline + +- **PR** `#42663`_: (*jagguli*) Check remote tags before deciding to do a fetch `#42329`_ + @ *2017-08-25T20:14:32Z* + + - **ISSUE** `#42329`_: (*jagguli*) State git.latest does not pull latest tags + | refs: `#42663`_ + * 4863771 Merge pull request `#42663`_ from StreetHawkInc/fix_git_tag_check + * 2b5af5b Remove refs/tags prefix from remote tags + + * 3f2e96e Convert set to list for serializer + + * 2728e5d Only include new tags in changes + + * 4b1df2f Exclude annotated tags from checks + + * 389c037 Check remote tags before deciding to do a fetch `#42329`_ + +- **PR** `#43199`_: (*corywright*) Add `disk.format` alias for `disk.format_` + @ *2017-08-25T19:21:07Z* + + - **ISSUE** `#43198`_: (*corywright*) disk.format_ needs to be aliased to disk.format + | refs: `#43199`_ + * 4193e7f Merge pull request `#43199`_ from corywright/disk-format-alias + * f00d3a9 Add `disk.format` alias for `disk.format_` + +- **PR** `#43196`_: (*gtmanfred*) Pin request install to version for npm tests + @ *2017-08-25T18:43:06Z* + + - **ISSUE** `#495`_: (*syphernl*) mysql.* without having MySQL installed/configured gives traceback + | refs: `#43196`_ + * 5471f9f Merge pull request `#43196`_ from gtmanfred/2016.11 + * ccd2241 Pin request install to version + +- **PR** `#43178`_: (*terminalmage*) git.detached: Fix traceback when rev is a SHA and is not present locally + @ *2017-08-25T13:58:37Z* + + - **ISSUE** `#43143`_: (*abulford*) git.detached does not fetch if rev is missing from local + | refs: `#43178`_ + * ace2715 Merge pull request `#43178`_ from terminalmage/issue43143 + * 2640833 git.detached: Fix traceback when rev is a SHA and is not present locally + +- **PR** `#43179`_: (*terminalmage*) Fix missed deprecation + @ *2017-08-24T22:52:34Z* + + * 12e9507 Merge pull request `#43179`_ from terminalmage/old-deprecation + * 3adf8ad Fix missed deprecation + +- **PR** `#43171`_: (*terminalmage*) Add warning about adding new functions to salt/utils/__init__.py + @ *2017-08-24T19:10:23Z* + + * b595440 Merge pull request `#43171`_ from terminalmage/salt-utils-warning + * 7b5943a Add warning about adding new functions to salt/utils/__init__.py + +- **PR** `#43173`_: (*Ch3LL*) Add New Release Branch Strategy to Contribution Docs + @ *2017-08-24T19:04:56Z* + + * 4f273ca Merge pull request `#43173`_ from Ch3LL/add_branch_docs + * 1b24244 Add New Release Branch Strategy to Contribution Docs + +- **PR** `#43151`_: (*ushmodin*) state.sls hangs on file.recurse with clean: True on windows + @ *2017-08-23T17:25:33Z* + + - **PR** `#42969`_: (*ushmodin*) state.sls hangs on file.recurse with clean: True on windows + | refs: `#43151`_ + * 669b376 Merge pull request `#43151`_ from ushmodin/2016.11 + * c5841e2 state.sls hangs on file.recurse with clean: True on windows + +- **PR** `#42986`_: (*renner*) Notify systemd synchronously (via NOTIFY_SOCKET) + @ *2017-08-22T16:52:56Z* + + * ae9d2b7 Merge pull request `#42986`_ from renner/systemd-notify + * 79c53f3 Fallback to systemd_notify_call() in case of socket.error + + * f176547 Notify systemd synchronously (via NOTIFY_SOCKET) + +- **PR** `#43037`_: (*mcarlton00*) Issue `#43036`_ Bhyve virtual grain in Linux VMs + @ *2017-08-22T16:43:40Z* + + - **ISSUE** `#43036`_: (*mcarlton00*) Linux VMs in Bhyve aren't displayed properly in grains + | refs: `#43037`_ + * b420fbe Merge pull request `#43037`_ from mcarlton00/fix-bhyve-grains + * 73315f0 Issue `#43036`_ Bhyve virtual grain in Linux VMs + +- **PR** `#43100`_: (*vutny*) [DOCS] Add missing `utils` sub-dir listed for `extension_modules` + @ *2017-08-22T15:40:09Z* + + * 0a86f2d Merge pull request `#43100`_ from vutny/doc-add-missing-utils-ext + * af743ff [DOCS] Add missing `utils` sub-dir listed for `extension_modules` + +- **PR** `#42985`_: (*DmitryKuzmenko*) Properly handle `prereq` having lost requisites. + @ *2017-08-21T22:49:39Z* + + - **ISSUE** `#15171`_: (*JensRantil*) Maximum recursion limit hit related to requisites + | refs: `#42985`_ + * e2bf2f4 Merge pull request `#42985`_ from DSRCorporation/bugs/15171_recursion_limit + * 651b1ba Properly handle `prereq` having lost requisites. + +- **PR** `#43092`_: (*blarghmatey*) Fixed issue with silently passing all tests in Testinfra module + @ *2017-08-21T20:22:08Z* + + * e513333 Merge pull request `#43092`_ from mitodl/2016.11 + * d4b113a Fixed issue with silently passing all tests in Testinfra module + +- **PR** `#43060`_: (*twangboy*) Osx update pkg scripts + @ *2017-08-21T20:06:12Z* + + * 77a443c Merge pull request `#43060`_ from twangboy/osx_update_pkg_scripts + * ef8a14c Remove /opt/salt instead of /opt/salt/bin + + * 2dd62aa Add more information to the description + + * f44f5b7 Only stop services if they are running + + * 3b62bf9 Remove salt from the path + + * ebdca3a Update pkg-scripts + +- **PR** `#43064`_: (*terminalmage*) Fix race condition in git.latest + @ *2017-08-21T14:29:52Z* + + - **ISSUE** `#42869`_: (*abednarik*) Git Module : Failed to update repository + | refs: `#43064`_ + * 1b1b6da Merge pull request `#43064`_ from terminalmage/issue42869 + * 093c0c2 Fix race condition in git.latest + +- **PR** `#43054`_: (*lorengordon*) Uses ConfigParser to read yum config files + @ *2017-08-18T20:49:44Z* + + - **ISSUE** `#42041`_: (*lorengordon*) pkg.list_repo_pkgs fails to find pkgs with spaces around yum repo enabled value + | refs: `#43054`_ + - **PR** `#42045`_: (*arount*) Fix: salt.modules.yumpkg: ConfigParser to read ini like files. + | refs: `#43054`_ + * 96e8e83 Merge pull request `#43054`_ from lorengordon/fix/yumpkg/config-parser + * 3b2cb81 fix typo in salt.modules.yumpkg + + * 38add0e break if leading comments are all fetched + + * d7f65dc fix configparser import & log if error was raised + + * ca1b1bb use configparser to parse yum repo file + +- **PR** `#43048`_: (*rallytime*) Back-port `#43031`_ to 2016.11 + @ *2017-08-18T12:56:04Z* + + - **PR** `#43031`_: (*gtmanfred*) use a ruby gem that doesn't have dependencies + | refs: `#43048`_ + * 43aa46f Merge pull request `#43048`_ from rallytime/`bp-43031`_ + * 35e4504 use a ruby gem that doesn't have dependencies + +- **PR** `#43023`_: (*terminalmage*) Fixes/improvements to Jenkins state/module + @ *2017-08-18T01:33:10Z* + + * ad89ff3 Merge pull request `#43023`_ from terminalmage/fix-jenkins-xml-caching + * 33fd8ff Update jenkins.py + + * fc306fc Add missing colon in `if` statement + + * 822eabc Catch exceptions raised when making changes to jenkins + + * 91b583b Improve and correct execption raising + + * f096917 Raise an exception if we fail to cache the config xml + +- **PR** `#43026`_: (*rallytime*) Back-port `#43020`_ to 2016.11 + @ *2017-08-17T23:19:46Z* + + - **PR** `#43020`_: (*gtmanfred*) test with gem that appears to be abandoned + | refs: `#43026`_ + * 2957467 Merge pull request `#43026`_ from rallytime/`bp-43020`_ + * 0eb15a1 test with gem that appears to be abandoned + +- **PR** `#43033`_: (*rallytime*) Back-port `#42760`_ to 2016.11 + @ *2017-08-17T22:24:43Z* + + - **ISSUE** `#40490`_: (*alxwr*) saltstack x509 incompatible to m2crypto 0.26.0 + | refs: `#42760`_ + - **PR** `#42760`_: (*AFriemann*) Catch TypeError thrown by m2crypto when parsing missing subjects in c… + | refs: `#43033`_ + * 4150b09 Merge pull request `#43033`_ from rallytime/`bp-42760`_ + * 3e3f7f5 Catch TypeError thrown by m2crypto when parsing missing subjects in certificate files. + +- **PR** `#43032`_: (*rallytime*) Back-port `#42547`_ to 2016.11 + @ *2017-08-17T21:53:50Z* + + - **PR** `#42547`_: (*blarghmatey*) Updated testinfra modules to work with more recent versions + | refs: `#43032`_ + * b124d36 Merge pull request `#43032`_ from rallytime/`bp-42547`_ + * ea4d7f4 Updated testinfra modules to work with more recent versions + +- **PR** `#43027`_: (*pabloh007*) Fixes ignore push flag for docker.push module issue `#42992`_ + @ *2017-08-17T19:55:37Z* + + - **ISSUE** `#42992`_: (*pabloh007*) docker.save flag push does is ignored + * a88386a Merge pull request `#43027`_ from pabloh007/fix-docker-save-push-2016-11 + * d0fd949 Fixes ignore push flag for docker.push module issue `#42992`_ + +- **PR** `#42890`_: (*DmitryKuzmenko*) Make chunked mode in salt-cp optional + @ *2017-08-17T18:37:44Z* + + - **ISSUE** `#42627`_: (*taigrrr8*) salt-cp no longer works. Was working a few months back. + | refs: `#42890`_ + * 51d1684 Merge pull request `#42890`_ from DSRCorporation/bugs/42627_salt-cp + * cfddbf1 Apply code review: update the doc + + * afedd3b Typos and version fixes in the doc. + + * 9fedf60 Fixed 'test_valid_docs' test. + + * 9993886 Make chunked mode in salt-cp optional (disabled by default). + +- **PR** `#43009`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-08-17T18:00:09Z* + + - **PR** `#42954`_: (*Ch3LL*) [2016.3] Bump latest and previous versions + - **PR** `#42949`_: (*Ch3LL*) Add Security Notice to 2016.3.7 Release Notes + - **PR** `#42942`_: (*Ch3LL*) [2016.3] Add clean_id function to salt.utils.verify.py + * b3c253c Merge pull request `#43009`_ from rallytime/merge-2016.11 + * 566ba4f Merge branch '2016.3' into '2016.11' + + * 13b8637 Merge pull request `#42942`_ from Ch3LL/2016.3.6_follow_up + + * f281e17 move additional minion config options to 2016.3.8 release notes + + * 168604b remove merge conflict + + * 8a07d95 update release notes with cve number + + * 149633f Add release notes for 2016.3.7 release + + * 7a4cddc Add clean_id function to salt.utils.verify.py + + * bbb1b29 Merge pull request `#42954`_ from Ch3LL/latest_2016.3 + + * b551e66 [2016.3] Bump latest and previous versions + + * 5d5edc5 Merge pull request `#42949`_ from Ch3LL/2016.3.7_docs + + * d75d374 Add Security Notice to 2016.3.7 Release Notes + +- **PR** `#43021`_: (*terminalmage*) Use socket.AF_INET6 to get the correct value instead of doing an OS check + @ *2017-08-17T17:57:09Z* + + - **PR** `#43014`_: (*Ch3LL*) Change AF_INET6 family for mac in test_host_to_ips + | refs: `#43021`_ + * 37c63e7 Merge pull request `#43021`_ from terminalmage/fix-network-test + * 4089b7b Use socket.AF_INET6 to get the correct value instead of doing an OS check + +- **PR** `#43019`_: (*rallytime*) Update bootstrap script to latest stable: v2017.08.17 + @ *2017-08-17T17:56:41Z* + + * 8f64232 Merge pull request `#43019`_ from rallytime/bootstrap_2017.08.17 + * 2f762b3 Update bootstrap script to latest stable: v2017.08.17 + +- **PR** `#43014`_: (*Ch3LL*) Change AF_INET6 family for mac in test_host_to_ips + | refs: `#43021`_ + @ *2017-08-17T16:17:51Z* + + * ff1caeee Merge pull request `#43014`_ from Ch3LL/fix_network_mac + * b8eee44 Change AF_INET6 family for mac in test_host_to_ips + +- **PR** `#42968`_: (*vutny*) [DOCS] Fix link to Salt Cloud Feature Matrix + @ *2017-08-16T13:16:16Z* + + * 1ee9499 Merge pull request `#42968`_ from vutny/doc-salt-cloud-ref + * 44ed53b [DOCS] Fix link to Salt Cloud Feature Matrix + +- **PR** `#42291`_: (*vutny*) Fix `#38839`_: remove `state` from Reactor runner kwags + @ *2017-08-15T23:01:08Z* + + - **ISSUE** `#38839`_: (*DaveOHenry*) Invoking runner.cloud.action via reactor sls fails + | refs: `#42291`_ + * 923f974 Merge pull request `#42291`_ from vutny/`fix-38839`_ + * 5f8f98a Fix `#38839`_: remove `state` from Reactor runner kwags + +- **PR** `#42940`_: (*gtmanfred*) create new ip address before checking list of allocated ips + @ *2017-08-15T21:47:18Z* + + - **ISSUE** `#42644`_: (*stamak*) nova salt-cloud -P Private IPs returned, but not public. Checking for misidentified IPs + | refs: `#42940`_ + * c20bc7d Merge pull request `#42940`_ from gtmanfred/2016.11 + * 253e216 fix IP address spelling + + * bd63074 create new ip address before checking list of allocated ips + +- **PR** `#42959`_: (*rallytime*) Back-port `#42883`_ to 2016.11 + @ *2017-08-15T21:25:48Z* + + - **PR** `#42883`_: (*rallytime*) Fix failing boto tests + | refs: `#42959`_ + * d6496ec Merge pull request `#42959`_ from rallytime/`bp-42883`_ + * c6b9ca4 Lint fix: add missing space + + * 5597b1a Skip 2 failing tests in Python 3 due to upstream bugs + + * a0b19bd Update account id value in boto_secgroup module unit test + + * 60b406e @mock_elb needs to be changed to @mock_elb_deprecated as well + + * 6ae1111 Replace @mock_ec2 calls with @mock_ec2_deprecated calls + +- **PR** `#42944`_: (*Ch3LL*) [2016.11] Add clean_id function to salt.utils.verify.py + @ *2017-08-15T18:06:12Z* + + * 6366e05 Merge pull request `#42944`_ from Ch3LL/2016.11.6_follow_up + * 7e0a20a Add release notes for 2016.11.7 release + + * 63823f8 Add clean_id function to salt.utils.verify.py + +- **PR** `#42952`_: (*Ch3LL*) [2016.11] Bump latest and previous versions + @ *2017-08-15T17:23:02Z* + + * 49d339c Merge pull request `#42952`_ from Ch3LL/latest_2016.11 + * 74e7055 [2016.11] Bump latest and previous versions + +- **PR** `#42950`_: (*Ch3LL*) Add Security Notice to 2016.11.7 Release Notes + @ *2017-08-15T16:50:23Z* + + * b0d2e05 Merge pull request `#42950`_ from Ch3LL/2016.11.7_docs + * a6f902d Add Security Notice to 2016.11.77 Release Notes + +- **PR** `#42836`_: (*aneeshusa*) Backport salt.utils.versions from develop to 2016.11 + @ *2017-08-14T20:56:54Z* + + - **PR** `#42835`_: (*aneeshusa*) Fix typo in utils/versions.py module + | refs: `#42836`_ + * c0ff69f Merge pull request `#42836`_ from lyft/backport-utils.versions-to-2016.11 + * 86ce700 Backport salt.utils.versions from develop to 2016.11 + +- **PR** `#42919`_: (*rallytime*) Back-port `#42871`_ to 2016.11 + @ *2017-08-14T20:44:00Z* + + - **PR** `#42871`_: (*amalleo25*) Update joyent.rst + | refs: `#42919`_ + * 64a79dd Merge pull request `#42919`_ from rallytime/`bp-42871`_ + * 4e46c96 Update joyent.rst + +- **PR** `#42918`_: (*rallytime*) Back-port `#42848`_ to 2016.11 + @ *2017-08-14T20:43:43Z* + + - **ISSUE** `#42803`_: (*gmcwhistler*) master_type: str, not working as expected, parent salt-minion process dies. + | refs: `#42848`_ + - **ISSUE** `#42753`_: (*grichmond-salt*) SaltReqTimeout Error on Some Minions when One Master in a Multi-Master Configuration is Unavailable + | refs: `#42848`_ + - **PR** `#42848`_: (*DmitryKuzmenko*) Execute fire_master asynchronously in the main minion thread. + | refs: `#42918`_ + * bea8ec1 Merge pull request `#42918`_ from rallytime/`bp-42848`_ + * cdb4812 Make lint happier. + + * 62eca9b Execute fire_master asynchronously in the main minion thread. + +- **PR** `#42861`_: (*twangboy*) Fix pkg.install salt-minion using salt-call + @ *2017-08-14T19:07:22Z* + + * 52bce32 Merge pull request `#42861`_ from twangboy/win_pkg_install_salt + * 0d3789f Fix pkg.install salt-minion using salt-call + +- **PR** `#42798`_: (*s-sebastian*) Update return data before calling returners + @ *2017-08-14T15:51:30Z* + + * b9f4f87 Merge pull request `#42798`_ from s-sebastian/2016.11 + * 1cc8659 Update return data before calling returners + +- **PR** `#41977`_: (*abulford*) Fix dockerng.network_* ignoring of tests=True + @ *2017-08-11T18:37:20Z* + + - **ISSUE** `#41976`_: (*abulford*) dockerng network states do not respect test=True + | refs: `#41977`_ `#41977`_ + * c15d003 Merge pull request `#41977`_ from redmatter/fix-dockerng-network-ignores-test + * 1cc2aa5 Fix dockerng.network_* ignoring of tests=True + +- **PR** `#42886`_: (*sarcasticadmin*) Adding missing output flags to salt cli docs + @ *2017-08-11T18:35:19Z* + + * 3b9c3c5 Merge pull request `#42886`_ from sarcasticadmin/adding_docs_salt_outputs + * 744bf95 Adding missing output flags to salt cli + +- **PR** `#42882`_: (*gtmanfred*) make sure cmd is not run when npm isn't installed + @ *2017-08-11T17:53:14Z* + + * e5b98c8 Merge pull request `#42882`_ from gtmanfred/2016.11 + * da3402a make sure cmd is not run when npm isn't installed + +- **PR** `#42788`_: (*amendlik*) Remove waits and retries from Saltify deployment + @ *2017-08-11T15:38:05Z* + + * 5962c95 Merge pull request `#42788`_ from amendlik/saltify-timeout + * 928b523 Remove waits and retries from Saltify deployment + +- **PR** `#42877`_: (*terminalmage*) Add virtual func for cron state module + @ *2017-08-11T15:33:09Z* + + * 227ecdd Merge pull request `#42877`_ from terminalmage/add-cron-state-virtual + * f1de196 Add virtual func for cron state module + +- **PR** `#42859`_: (*terminalmage*) Add note about git CLI requirement for GitPython to GitFS tutorial + @ *2017-08-11T14:53:03Z* + + * ab9f6ce Merge pull request `#42859`_ from terminalmage/gitpython-git-cli-note + * 35e05c9 Add note about git CLI requirement for GitPython to GitFS tutorial + +- **PR** `#42856`_: (*gtmanfred*) skip cache_clean test if npm version is >= 5.0.0 + @ *2017-08-11T13:39:20Z* + + - **ISSUE** `#41770`_: (*Ch3LL*) NPM v5 incompatible with salt.modules.cache_list + | refs: `#42856`_ + - **ISSUE** `#475`_: (*thatch45*) Change yaml to use C bindings + | refs: `#42856`_ + * 682b4a8 Merge pull request `#42856`_ from gtmanfred/2016.11 + * b458b89 skip cache_clean test if npm version is >= 5.0.0 + +- **PR** `#42864`_: (*whiteinge*) Make syndic_log_file respect root_dir setting + @ *2017-08-11T13:28:21Z* + + * 01ea854 Merge pull request `#42864`_ from whiteinge/syndic-log-root_dir + * 4b1f55d Make syndic_log_file respect root_dir setting + +- **PR** `#42851`_: (*terminalmage*) Backport `#42651`_ to 2016.11 + @ *2017-08-10T18:02:39Z* + + - **PR** `#42651`_: (*gtmanfred*) python2- prefix for fedora 26 packages + * 2dde1f7 Merge pull request `#42851`_ from terminalmage/`bp-42651`_ + * a3da86e fix syntax + + * 6ecdbce make sure names are correct + + * f83b553 add py3 for versionlock + + * 21934f6 python2- prefix for fedora 26 packages + +- **PR** `#42806`_: (*rallytime*) Update doc references in glusterfs.volume_present + @ *2017-08-10T14:10:16Z* + + - **ISSUE** `#42683`_: (*rgcosma*) Gluster module broken in 2017.7 + | refs: `#42806`_ + * c746f79 Merge pull request `#42806`_ from rallytime/`fix-42683`_ + * 8c8640d Update doc references in glusterfs.volume_present + +- **PR** `#42829`_: (*twangboy*) Fix passing version in pkgs as shown in docs + @ *2017-08-10T14:07:24Z* + + * 27a8a26 Merge pull request `#42829`_ from twangboy/win_pkg_fix_install + * 83b9b23 Add winrepo to docs about supporting versions in pkgs + + * 81fefa6 Add ability to pass version in pkgs list + +- **PR** `#42838`_: (*twangboy*) Document requirements for win_pki + @ *2017-08-10T13:59:46Z* + + * 3c3ac6a Merge pull request `#42838`_ from twangboy/win_doc_pki + * f0a1d06 Standardize PKI Client + + * 7de687a Document requirements for win_pki + +- **PR** `#42805`_: (*rallytime*) Back-port `#42552`_ to 2016.11 + @ *2017-08-09T22:37:56Z* + + - **PR** `#42552`_: (*remijouannet*) update consul module following this documentation https://www.consul.… + | refs: `#42805`_ + * b3e2ae3 Merge pull request `#42805`_ from rallytime/`bp-42552`_ + * 5a91c1f update consul module following this documentation https://www.consul.io/api/acl.html + +- **PR** `#42804`_: (*rallytime*) Back-port `#42784`_ to 2016.11 + @ *2017-08-09T22:37:40Z* + + - **ISSUE** `#42731`_: (*infoveinx*) http.query template_data render exception + | refs: `#42804`_ + - **PR** `#42784`_: (*gtmanfred*) only read file if ret is not a string in http.query + | refs: `#42804`_ + * d2ee793 Merge pull request `#42804`_ from rallytime/`bp-42784`_ + * dbd29e4 only read file if it is not a string + +- **PR** `#42826`_: (*terminalmage*) Fix misspelling of "versions" + @ *2017-08-09T19:39:43Z* + + * 4cbf805 Merge pull request `#42826`_ from terminalmage/fix-spelling + * 00f9314 Fix misspelling of "versions" + +- **PR** `#42786`_: (*Ch3LL*) Fix typo for template_dict in http docs + @ *2017-08-08T18:14:50Z* + + * de997ed Merge pull request `#42786`_ from Ch3LL/fix_typo + * 90a2fb6 Fix typo for template_dict in http docs + +- **PR** `#42795`_: (*lomeroe*) backport `#42744`_ to 2016.11 + @ *2017-08-08T17:17:15Z* + + - **ISSUE** `#42600`_: (*twangboy*) Unable to set 'Not Configured' using win_lgpo execution module + | refs: `#42744`_ `#42795`_ + - **PR** `#42744`_: (*lomeroe*) fix `#42600`_ in develop + | refs: `#42795`_ + * bf6153e Merge pull request `#42795`_ from lomeroe/`bp-42744`__201611 + * 695f8c1 fix `#42600`_ in develop + +- **PR** `#42748`_: (*whiteinge*) Workaround Orchestrate problem that highstate outputter mutates data + @ *2017-08-07T21:11:33Z* + + - **ISSUE** `#42747`_: (*whiteinge*) Outputters mutate data which can be a problem for Runners and perhaps other things + | refs: `#42748`_ + * 61fad97 Merge pull request `#42748`_ from whiteinge/save-before-output + * de60b77 Workaround Orchestrate problem that highstate outputter mutates data + +- **PR** `#42764`_: (*amendlik*) Fix infinite loop with salt-cloud and Windows nodes + @ *2017-08-07T20:47:07Z* + + * a4e3e7e Merge pull request `#42764`_ from amendlik/cloud-win-loop + * f3dcfca Fix infinite loops on failed Windows deployments + +- **PR** `#42694`_: (*gtmanfred*) allow adding extra remotes to a repository + @ *2017-08-07T18:08:11Z* + + - **ISSUE** `#42690`_: (*ChristianBeer*) git.latest state with remote set fails on first try + | refs: `#42694`_ + * da85326 Merge pull request `#42694`_ from gtmanfred/2016.11 + * 1a0457a allow adding extra remotes to a repository + +- **PR** `#42669`_: (*garethgreenaway*) [2016.11] Fixes to augeas module + @ *2017-08-06T17:58:03Z* + + - **ISSUE** `#42642`_: (*githubcdr*) state.augeas + | refs: `#42669`_ `#43202`_ + * 7b2119f Merge pull request `#42669`_ from garethgreenaway/42642_2016_11_augeas_module_fix + * 2441308 Updating the call to shlex_split to pass the posix=False argument so that quotes are preserved. + +- **PR** `#42629`_: (*xiaoanyunfei*) tornado api + @ *2017-08-03T22:21:20Z* + + * 3072576 Merge pull request `#42629`_ from xiaoanyunfei/tornadoapi + * 1e13383 tornado api + +- **PR** `#42655`_: (*whiteinge*) Reenable cpstats for rest_cherrypy + @ *2017-08-03T20:44:10Z* + + - **PR** `#33806`_: (*cachedout*) Work around upstream cherrypy bug + | refs: `#42655`_ + * f0f00fc Merge pull request `#42655`_ from whiteinge/rest_cherrypy-reenable-stats + * deb6316 Fix lint errors + + * 6bd91c8 Reenable cpstats for rest_cherrypy + +- **PR** `#42693`_: (*gilbsgilbs*) Fix RabbitMQ tags not properly set. + @ *2017-08-03T20:23:08Z* + + - **ISSUE** `#42686`_: (*gilbsgilbs*) Unable to set multiple RabbitMQ tags + | refs: `#42693`_ `#42693`_ + * 21cf15f Merge pull request `#42693`_ from gilbsgilbs/fix-rabbitmq-tags + * 78fccdc Cast to list in case tags is a tuple. + + * 287b57b Fix RabbitMQ tags not properly set. + +- **PR** `#42574`_: (*sbojarski*) Fixed error reporting in "boto_cfn.present" function. + @ *2017-08-01T17:55:29Z* + + - **ISSUE** `#41433`_: (*sbojarski*) boto_cfn.present fails when reporting error for failed state + | refs: `#42574`_ + * f2b0c9b Merge pull request `#42574`_ from sbojarski/boto-cfn-error-reporting + * 5c945f1 Fix debug message in "boto_cfn._validate" function. + + * 181a1be Fixed error reporting in "boto_cfn.present" function. + +- **PR** `#42623`_: (*terminalmage*) Fix unicode constructor in custom YAML loader + @ *2017-07-31T19:25:18Z* + + * bc1effc Merge pull request `#42623`_ from terminalmage/fix-unicode-constructor + * fcf4588 Fix unicode constructor in custom YAML loader + +- **PR** `#42515`_: (*gtmanfred*) Allow not interpreting backslashes in the repl + @ *2017-07-28T16:00:09Z* + + * cbf752c Merge pull request `#42515`_ from gtmanfred/backslash + * cc4e456 Allow not interpreting backslashes in the repl + +- **PR** `#42586`_: (*gdubroeucq*) [Fix] yumpkg.py: add option to the command "check-update" + @ *2017-07-27T23:52:00Z* + + - **ISSUE** `#42456`_: (*gdubroeucq*) Use yum lib + | refs: `#42586`_ + * 5494958 Merge pull request `#42586`_ from gdubroeucq/2016.11 + * 9c0b5cc Remove extra newline + + * d2ef448 yumpkg.py: clean + + * a96f7c0 yumpkg.py: add option to the command "check-update" + +- **PR** `#41988`_: (*abulford*) Fix dockerng.network_* name matching + @ *2017-07-27T21:25:06Z* + + - **ISSUE** `#41982`_: (*abulford*) dockerng.network_* matches too easily + | refs: `#41988`_ `#41988`_ + * 6b45deb Merge pull request `#41988`_ from redmatter/fix-dockerng-network-matching + * 9eea796 Add regression tests for `#41982`_ + + * 3369f00 Fix broken unit test test_network_absent + + * 0ef6cf6 Add trace logging of dockerng.networks result + + * 515c612 Fix dockerng.network_* name matching + +- **PR** `#42339`_: (*isbm*) Bugfix: Jobs scheduled to run at a future time stay pending for Salt minions (bsc`#1036125`_) + @ *2017-07-27T19:05:51Z* + + - **ISSUE** `#1036125`_: (**) + * 4b16109 Merge pull request `#42339`_ from isbm/isbm-jobs-scheduled-in-a-future-bsc1036125 + * bbba84c Bugfix: Jobs scheduled to run at a future time stay pending for Salt minions (bsc`#1036125`_) + +- **PR** `#42077`_: (*vutny*) Fix scheduled job run on Master if `when` parameter is a list + @ *2017-07-27T19:04:23Z* + + - **ISSUE** `#23516`_: (*dkiser*) BUG: cron job scheduler sporadically works + | refs: `#42077`_ + - **PR** `#41973`_: (*vutny*) Fix Master/Minion scheduled jobs based on Cron expressions + | refs: `#42077`_ + * 6c5a7c6 Merge pull request `#42077`_ from vutny/fix-jobs-scheduled-with-whens + * b1960ce Fix scheduled job run on Master if `when` parameter is a list + +- **PR** `#42414`_: (*vutny*) DOCS: unify hash sum with hash type format + @ *2017-07-27T18:48:40Z* + + * f9cb536 Merge pull request `#42414`_ from vutny/unify-hash-params-format + * d1f2a93 DOCS: unify hash sum with hash type format + +- **PR** `#42523`_: (*rallytime*) Add a mention of the True/False returns with __virtual__() + @ *2017-07-27T18:13:07Z* + + - **ISSUE** `#42375`_: (*dragonpaw*) salt.modules.*.__virtualname__ doens't work as documented. + | refs: `#42523`_ + * 535c922 Merge pull request `#42523`_ from rallytime/`fix-42375`_ + * 685c2cc Add information about returning a tuple with an error message + + * fa46651 Add a mention of the True/False returns with __virtual__() + +- **PR** `#42527`_: (*twangboy*) Document changes to Windows Update in Windows 10/Server 2016 + @ *2017-07-27T17:45:38Z* + + * 0df0e7e Merge pull request `#42527`_ from twangboy/win_wua + * 0373791 Correct capatlization + + * af3bcc9 Document changes to Windows Update in 10/2016 + +- **PR** `#42551`_: (*binocvlar*) Remove '-s' (--script) argument to parted within align_check function + @ *2017-07-27T17:35:31Z* + + * 69b0658 Merge pull request `#42551`_ from binocvlar/fix-lack-of-align-check-output + * c4fabaa Remove '-s' (--script) argument to parted within align_check function + +- **PR** `#42573`_: (*rallytime*) Back-port `#42433`_ to 2016.11 + @ *2017-07-27T13:51:21Z* + + - **ISSUE** `#42403`_: (*astronouth7303*) [2017.7] Pillar empty when state is applied from orchestrate + | refs: `#42433`_ + - **PR** `#42433`_: (*terminalmage*) Only force saltenv/pillarenv to be a string when not None + | refs: `#42573`_ + * 9e0b4e9 Merge pull request `#42573`_ from rallytime/`bp-42433`_ + * 0293429 Only force saltenv/pillarenv to be a string when not None + +- **PR** `#42571`_: (*twangboy*) Avoid loading system PYTHON* environment vars + @ *2017-07-26T22:48:55Z* + + * e931ed2 Merge pull request `#42571`_ from twangboy/win_add_pythonpath + * d55a44d Avoid loading user site packages + + * 9af1eb2 Ignore any PYTHON* environment vars already on the system + + * 4e2fb03 Add pythonpath to batch files and service + +- **PR** `#42387`_: (*DmitryKuzmenko*) Fix race condition in usage of weakvaluedict + @ *2017-07-25T20:57:42Z* + + - **ISSUE** `#42371`_: (*tsaridas*) Minion unresponsive after trying to failover + | refs: `#42387`_ + * de2f397 Merge pull request `#42387`_ from DSRCorporation/bugs/42371_KeyError_WeakValueDict + * e721c7e Don't use `key in weakvaluedict` because it could lie. + +- **PR** `#41968`_: (*root360-AndreasUlm*) Fix rabbitmqctl output sanitizer for version 3.6.10 + @ *2017-07-25T19:12:36Z* + + - **ISSUE** `#41955`_: (*root360-AndreasUlm*) rabbitmq 3.6.10 changed output => rabbitmq-module broken + | refs: `#41968`_ + * 641a9d7 Merge pull request `#41968`_ from root360-AndreasUlm/fix-rabbitmqctl-output-handler + * 76fd941 added tests for rabbitmq 3.6.10 output handler + + * 3602af1 Fix rabbitmqctl output handler for 3.6.10 + +- **PR** `#42479`_: (*gtmanfred*) validate ssh_interface for ec2 + @ *2017-07-25T18:37:18Z* + + - **ISSUE** `#42477`_: (*aikar*) Invalid ssh_interface value prevents salt-cloud provisioning without reason of why + | refs: `#42479`_ + * 66fede3 Merge pull request `#42479`_ from gtmanfred/interface + * c32c1b2 fix pylint + + * 99ec634 validate ssh_interface for ec2 + +- **PR** `#42516`_: (*rallytime*) Add info about top file to pillar walk-through example to include edit.vim + @ *2017-07-25T17:01:12Z* + + - **ISSUE** `#42405`_: (*felrivero*) The documentation is incorrectly compiled (PILLAR section) + | refs: `#42516`_ + * a925c70 Merge pull request `#42516`_ from rallytime/`fix-42405`_ + * e3a6717 Add info about top file to pillar walk-through example to include edit.vim + +- **PR** `#42509`_: (*clem-compilatio*) Fix _assign_floating_ips in openstack.py + @ *2017-07-24T17:14:13Z* + + - **ISSUE** `#42417`_: (*clem-compilatio*) salt-cloud - openstack - "no more floating IP addresses" error - but public_ip in node + | refs: `#42509`_ + * 1bd5bbc Merge pull request `#42509`_ from clem-compilatio/`fix-42417`_ + * 72924b0 Fix _assign_floating_ips in openstack.py + +- **PR** `#42464`_: (*garethgreenaway*) [2016.11] Small fix to modules/git.py + @ *2017-07-21T21:28:57Z* + + * 4bf35a7 Merge pull request `#42464`_ from garethgreenaway/2016_11_remove_tmp_identity_file + * ff24102 Uncomment the line that removes the temporary identity file. + +- **PR** `#42443`_: (*garethgreenaway*) [2016.11] Fix to slack engine + @ *2017-07-21T15:48:57Z* + + - **ISSUE** `#42357`_: (*Giandom*) Salt pillarenv problem with slack engine + | refs: `#42443`_ + * e2120db Merge pull request `#42443`_ from garethgreenaway/42357_pass_args_kwargs_correctly + * 635810b Updating the slack engine in 2016.11 to pass the args and kwrags correctly to LocalClient + +- **PR** `#42200`_: (*shengis*) Fix `#42198`_ + @ *2017-07-21T14:47:29Z* + + - **ISSUE** `#42198`_: (*shengis*) state sqlite3.row_absent fail with "parameters are of unsupported type" + | refs: `#42200`_ + * 8262cc9 Merge pull request `#42200`_ from shengis/sqlite3_fix_row_absent_2016.11 + * 407b8f4 Fix `#42198`_ If where_args is not set, not using it in the delete request. + +- **PR** `#42424`_: (*goten4*) Fix error message when tornado or pycurl is not installed + @ *2017-07-20T21:53:40Z* + + - **ISSUE** `#42413`_: (*goten4*) Invalid error message when proxy_host is set and tornado not installed + | refs: `#42424`_ + * d9df97e Merge pull request `#42424`_ from goten4/2016.11 + * 1c0574d Fix error message when tornado or pycurl is not installed + +- **PR** `#42350`_: (*twangboy*) Fixes problem with Version and OS Release related grains on certain versions of Python (2016.11) + @ *2017-07-19T17:07:26Z* + + * 42bb1a6 Merge pull request `#42350`_ from twangboy/win_fix_ver_grains_2016.11 + * 8c04840 Detect Server OS with a desktop release name + +- **PR** `#42356`_: (*meaksh*) Allow to check whether a function is available on the AliasesLoader wrapper + @ *2017-07-19T16:56:41Z* + + * 0a72e56 Merge pull request `#42356`_ from meaksh/2016.11-AliasesLoader-wrapper-fix + * 915d942 Allow to check whether a function is available on the AliasesLoader wrapper + +- **PR** `#42368`_: (*twangboy*) Remove build and dist directories before install (2016.11) + @ *2017-07-19T16:47:28Z* + + * 10eb7b7 Merge pull request `#42368`_ from twangboy/win_fix_build_2016.11 + * a7c910c Remove build and dist directories before install + +- **PR** `#42370`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-07-18T22:39:41Z* + + - **PR** `#42359`_: (*Ch3LL*) [2016.3] Update version numbers in doc config for 2017.7.0 release + * 016189f Merge pull request `#42370`_ from rallytime/merge-2016.11 + * 0aa5dde Merge branch '2016.3' into '2016.11' + + * e9b0f20 Merge pull request `#42359`_ from Ch3LL/doc-update-2016.3 + + * dc85b5e [2016.3] Update version numbers in doc config for 2017.7.0 release + +- **PR** `#42360`_: (*Ch3LL*) [2016.11] Update version numbers in doc config for 2017.7.0 release + @ *2017-07-18T19:23:30Z* + + * f06a6f1 Merge pull request `#42360`_ from Ch3LL/doc-update-2016.11 + * b90b7a7 [2016.11] Update version numbers in doc config for 2017.7.0 release + +- **PR** `#42319`_: (*rallytime*) Add more documentation for config options that are missing from master/minion docs + @ *2017-07-18T18:02:32Z* + + - **ISSUE** `#32400`_: (*rallytime*) Document Default Config Values + | refs: `#42319`_ + * e0595b0 Merge pull request `#42319`_ from rallytime/config-docs + * b40f980 Add more documentation for config options that are missing from master/minion docs + +- **PR** `#42352`_: (*CorvinM*) Multiple documentation fixes + @ *2017-07-18T15:10:37Z* + + - **ISSUE** `#42333`_: (*b3hni4*) Getting "invalid type of dict, a list is required" when trying to configure engines in master config file + | refs: `#42352`_ + * 7894040 Merge pull request `#42352`_ from CorvinM/issue42333 + * 526b6ee Multiple documentation fixes + +- **PR** `#42353`_: (*terminalmage*) is_windows is a function, not a propery/attribute + @ *2017-07-18T14:38:51Z* + + * b256001 Merge pull request `#42353`_ from terminalmage/fix-git-test + * 14cf6ce is_windows is a function, not a propery/attribute + +- **PR** `#42264`_: (*rallytime*) Update minion restart section in FAQ doc for windows + @ *2017-07-17T17:40:40Z* + + - **ISSUE** `#41116`_: (*hrumph*) FAQ has wrong instructions for upgrading Windows minion. + | refs: `#42264`_ + * 866a1fe Merge pull request `#42264`_ from rallytime/`fix-41116`_ + * bd63888 Add mono-spacing to salt-minion reference for consistency + + * 30d62f4 Update minion restart section in FAQ doc for windows + +- **PR** `#42275`_: (*terminalmage*) pkg.installed: pack name/version into pkgs argument + @ *2017-07-17T17:38:39Z* + + - **ISSUE** `#42194`_: (*jryberg*) pkg version: latest are now broken, appending -latest to filename + | refs: `#42275`_ + * 9a70708 Merge pull request `#42275`_ from terminalmage/issue42194 + * 6638749 pkg.installed: pack name/version into pkgs argument + +- **PR** `#42269`_: (*rallytime*) Add some clarity to "multiple quotes" section of yaml docs + @ *2017-07-17T17:38:18Z* + + - **ISSUE** `#41721`_: (*sazaro*) state.sysrc broken when setting the value to YES or NO + | refs: `#42269`_ + * e588f23 Merge pull request `#42269`_ from rallytime/`fix-41721`_ + * f2250d4 Add a note about using different styles of quotes. + + * 38d9b3d Add some clarity to "multiple quotes" section of yaml docs + +- **PR** `#42282`_: (*rallytime*) Handle libcloud objects that throw RepresenterErrors with --out=yaml + @ *2017-07-17T17:36:35Z* + + - **ISSUE** `#42152`_: (*dubb-b*) salt-cloud errors on Rackspace driver using -out=yaml + | refs: `#42282`_ + * 5aaa214 Merge pull request `#42282`_ from rallytime/`fix-42152`_ + * f032223 Handle libcloud objects that throw RepresenterErrors with --out=yaml + +- **PR** `#42308`_: (*lubyou*) Force file removal on Windows. Fixes `#42295`_ + @ *2017-07-17T17:12:13Z* + + - **ISSUE** `#42295`_: (*lubyou*) file.absent fails on windows if the file to be removed has the "readonly" attribute set + | refs: `#42308`_ + * fb5697a Merge pull request `#42308`_ from lubyou/42295-fix-file-absent-windows + * 026ccf4 Force file removal on Windows. Fixes `#42295`_ + +- **PR** `#42314`_: (*rallytime*) Add clarification to salt ssh docs about key auto-generation. + @ *2017-07-17T14:07:49Z* + + - **ISSUE** `#42267`_: (*gzcwnk*) salt-ssh not creating ssh keys automatically as per documentation + | refs: `#42314`_ + * da2a8a5 Merge pull request `#42314`_ from rallytime/`fix-42267`_ + * c406046 Add clarification to salt ssh docs about key auto-generation. + +- **PR** `#41945`_: (*garethgreenaway*) Fixes to modules/git.py + @ *2017-07-14T17:46:10Z* + + - **ISSUE** `#41936`_: (*michaelkarrer81*) git.latest identity does not set the correct user for the private key file on the minion + | refs: `#41945`_ + - **ISSUE** `#1`_: (*thatch45*) Enable regex on the salt cli + * acadd54 Merge pull request `#41945`_ from garethgreenaway/41936_allow_identity_files_with_user + * 44841e5 Moving the call to cp.get_file inside the with block to ensure the umask is preserved when we grab the file. + + * f9ba60e Merge pull request `#1`_ from terminalmage/pr-41945 + + * 1b60261 Restrict set_umask to mkstemp call only + + * 68549f3 Fixing umask to we can set files as executable. + + * 4949bf3 Updating to swap on the new salt.utils.files.set_umask context_manager + + * 8faa9f6 Updating PR with requested changes. + + * 494765e Updating the git module to allow an identity file to be used when passing the user parameter + +- **PR** `#42289`_: (*CorvinM*) Multiple empty_password fixes for state.user + @ *2017-07-14T16:14:02Z* + + - **ISSUE** `#42240`_: (*casselt*) empty_password in user.present always changes password, even with test=True + | refs: `#42289`_ + - **PR** `#41543`_: (*cri-epita*) Fix user creation with empty password + | refs: `#42289`_ `#42289`_ + * f90e04a Merge pull request `#42289`_ from CorvinM/`bp-41543`_ + * 357dc22 Fix user creation with empty password + +- **PR** `#42123`_: (*vutny*) DOCS: describe importing custom util classes + @ *2017-07-12T15:53:24Z* + + * a91a3f8 Merge pull request `#42123`_ from vutny/fix-master-utils-import + * 6bb8b8f Add missing doc for ``utils_dirs`` Minion config option + + * f1bc58f Utils: add example of module import + +- **PR** `#42261`_: (*rallytime*) Some minor doc fixes for dnsutil module so they'll render correctly + @ *2017-07-11T23:14:53Z* + + * e2aa511 Merge pull request `#42261`_ from rallytime/minor-doc-fix + * 8c76bbb Some minor doc fixes for dnsutil module so they'll render correctly + +- **PR** `#42262`_: (*rallytime*) Back-port `#42224`_ to 2016.11 + @ *2017-07-11T23:14:25Z* + + - **PR** `#42224`_: (*tdutrion*) Remove duplicate instruction in Openstack Rackspace config example + | refs: `#42262`_ + * 3e9dfbc Merge pull request `#42262`_ from rallytime/`bp-42224`_ + * c31ded3 Remove duplicate instruction in Openstack Rackspace config example + +- **PR** `#42181`_: (*garethgreenaway*) fixes to state.py for names parameter + @ *2017-07-11T21:21:32Z* + + - **ISSUE** `#42137`_: (*kiemlicz*) cmd.run with multiple commands - random order of execution + | refs: `#42181`_ + * 7780579 Merge pull request `#42181`_ from garethgreenaway/42137_backport_fix_from_2017_7 + * a34970b Back porting the fix for 2017.7 that ensures the order of the names parameter. + +- **PR** `#42253`_: (*gtmanfred*) Only use unassociated ips when unable to allocate + @ *2017-07-11T20:53:51Z* + + - **PR** `#38965`_: (*toanju*) salt-cloud will use list_floating_ips for OpenStack + | refs: `#42253`_ + - **PR** `#34280`_: (*kevinanderson1*) salt-cloud will use list_floating_ips for Openstack + | refs: `#38965`_ + * 7253786 Merge pull request `#42253`_ from gtmanfred/2016.11 + * 53e2576 Only use unassociated ips when unable to allocate + +- **PR** `#42252`_: (*UtahDave*) simple docstring updates + @ *2017-07-11T20:48:33Z* + + * b2a4698 Merge pull request `#42252`_ from UtahDave/2016.11local + * e6a9563 simple doc updates + +- **PR** `#42235`_: (*astronouth7303*) Abolish references to `dig` in examples. + @ *2017-07-10T20:06:11Z* + + - **ISSUE** `#42232`_: (*astronouth7303*) Half of dnsutil refers to dig + | refs: `#42235`_ + * 781fe13 Merge pull request `#42235`_ from astronouth7303/patch-1-2016.3 + * 4cb51bd Make note of dig partial requirement. + + * 08e7d83 Abolish references to `dig` in examples. + +- **PR** `#42215`_: (*twangboy*) Add missing config to example + @ *2017-07-07T20:18:44Z* + + * 83cbd76 Merge pull request `#42215`_ from twangboy/win_iis_docs + * c07e220 Add missing config to example + +- **PR** `#42211`_: (*terminalmage*) Only pass a saltenv in orchestration if one was explicitly passed (2016.11) + @ *2017-07-07T20:16:35Z* + + * 274946a Merge pull request `#42211`_ from terminalmage/issue40928 + * 22a18fa Only pass a saltenv in orchestration if one was explicitly passed (2016.11) + +- **PR** `#42173`_: (*rallytime*) Back-port `#37424`_ to 2016.11 + @ *2017-07-07T16:39:59Z* + + - **PR** `#37424`_: (*kojiromike*) Avoid Early Convert ret['comment'] to String + | refs: `#42173`_ + * 89261cf Merge pull request `#42173`_ from rallytime/`bp-37424`_ + * 01addb6 Avoid Early Convert ret['comment'] to String + +- **PR** `#42175`_: (*rallytime*) Back-port `#39366`_ to 2016.11 + @ *2017-07-06T19:51:47Z* + + - **ISSUE** `#39365`_: (*dglloyd*) service.running fails if sysv script has no status command and enable: True + | refs: `#39366`_ + - **PR** `#39366`_: (*dglloyd*) Pass sig to service.status in after_toggle + | refs: `#42175`_ + * 3b17fb7 Merge pull request `#42175`_ from rallytime/`bp-39366`_ + * 53f7b98 Pass sig to service.status in after_toggle + +- **PR** `#42172`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-07-06T18:16:29Z* + + - **PR** `#42155`_: (*phsteve*) Fix docs for puppet.plugin_sync + * ea16f47 Merge pull request `#42172`_ from rallytime/merge-2016.11 + * b1fa332 Merge branch '2016.3' into '2016.11' + + * 8fa1fa5 Merge pull request `#42155`_ from phsteve/doc-fix-puppet + + * fb2cb78 Fix docs for puppet.plugin_sync so code-block renders properly and sync is spelled consistently + +- **PR** `#42176`_: (*rallytime*) Back-port `#42109`_ to 2016.11 + @ *2017-07-06T18:15:35Z* + + - **PR** `#42109`_: (*arthurlogilab*) [doc] Update aws.rst - add Debian default username + | refs: `#42176`_ + * 6307b98 Merge pull request `#42176`_ from rallytime/`bp-42109`_ + * 686926d Update aws.rst - add Debian default username + +- **PR** `#42095`_: (*terminalmage*) Add debug logging to dockerng.login + @ *2017-07-06T17:13:05Z* + + * 28c4e4c Merge pull request `#42095`_ from terminalmage/docker-login-debugging + * bd27870 Add debug logging to dockerng.login + +- **PR** `#42119`_: (*terminalmage*) Fix regression in CLI pillar override for salt-call + @ *2017-07-06T17:02:52Z* + + - **ISSUE** `#42116`_: (*terminalmage*) CLI pillar override regression in 2017.7.0rc1 + | refs: `#42119`_ + * 2b754bc Merge pull request `#42119`_ from terminalmage/issue42116 + * 9a26894 Add integration test for 42116 + + * 1bb42bb Fix regression when CLI pillar override is used with salt-call + +- **PR** `#42121`_: (*terminalmage*) Fix pillar.get when saltenv is passed + @ *2017-07-06T16:52:34Z* + + - **ISSUE** `#42114`_: (*clallen*) saltenv bug in pillar.get execution module function + | refs: `#42121`_ + * 8c0a83c Merge pull request `#42121`_ from terminalmage/issue42114 + * d142912 Fix pillar.get when saltenv is passed + +- **PR** `#42094`_: (*terminalmage*) Prevent command from showing in exception when output_loglevel=quiet + @ *2017-07-06T16:18:09Z* + + * 687992c Merge pull request `#42094`_ from terminalmage/quiet-exception + * 47d61f4 Prevent command from showing in exception when output_loglevel=quiet + +- **PR** `#42163`_: (*vutny*) Fix `#42115`_: parse libcloud "rc" version correctly + @ *2017-07-06T16:15:07Z* + + - **ISSUE** `#42115`_: (*nomeelnoj*) Installing EPEL repo breaks salt-cloud + | refs: `#42163`_ + * dad2551 Merge pull request `#42163`_ from vutny/`fix-42115`_ + * b27b1e3 Fix `#42115`_: parse libcloud "rc" version correctly + +- **PR** `#42164`_: (*Ch3LL*) Fix kerberos create_keytab doc + @ *2017-07-06T15:55:33Z* + + * 2a8ae2b Merge pull request `#42164`_ from Ch3LL/fix_kerb_doc + * 7c0fb24 Fix kerberos create_keytab doc + +- **PR** `#42141`_: (*rallytime*) Back-port `#42098`_ to 2016.11 + @ *2017-07-06T15:11:49Z* + + - **PR** `#42098`_: (*twangboy*) Change repo_ng to repo-ng + | refs: `#42141`_ + * 678d4d4 Merge pull request `#42141`_ from rallytime/`bp-42098`_ + * bd80243 Change repo_ng to repo-ng + +- **PR** `#42140`_: (*rallytime*) Back-port `#42097`_ to 2016.11 + @ *2017-07-06T15:11:29Z* + + - **PR** `#42097`_: (*gtmanfred*) require large timediff for ipv6 warning + | refs: `#42140`_ + * c8afd7a Merge pull request `#42140`_ from rallytime/`bp-42097`_ + * 9c4e132 Import datetime + + * 1435bf1 require large timediff for ipv6 warning + +- **PR** `#42142`_: (*Ch3LL*) Update builds available for rc1 + @ *2017-07-05T21:11:56Z* + + * c239664 Merge pull request `#42142`_ from Ch3LL/change_builds + * e1694af Update builds available for rc1 + +- **PR** `#42078`_: (*damon-atkins*) pkg.install and pkg.remove fix version number input. + @ *2017-07-05T06:04:57Z* + + * 4780d78 Merge pull request `#42078`_ from damon-atkins/fix_convert_flt_str_version_on_cmd_line + * 09d37dd Fix comment typo + + * 7167549 Handle version=None when converted to a string it becomes 'None' parm should default to empty string rather than None, it would fix better with existing code. + + * 4fb2bb1 Fix typo + + * cf55c33 pkg.install and pkg.remove on the command line take number version numbers, store them within a float. However version is a string, to support versions numbers like 1.3.4 + +- **PR** `#42105`_: (*Ch3LL*) Update releasecanddiate doc with new 2017.7.0rc1 Release + @ *2017-07-04T03:14:42Z* + + * 46d575a Merge pull request `#42105`_ from Ch3LL/update_rc + * d4e7b91 Update releasecanddiate doc with new 2017.7.0rc1 Release + +- **PR** `#42099`_: (*rallytime*) Remove references in docs to pip install salt-cloud + @ *2017-07-03T22:13:44Z* + + - **ISSUE** `#41885`_: (*astronouth7303*) Recommended pip installation outdated? + | refs: `#42099`_ + * d38548b Merge pull request `#42099`_ from rallytime/`fix-41885`_ + * c2822e0 Remove references in docs to pip install salt-cloud + +- **PR** `#42086`_: (*abulford*) Make result=true if Docker volume already exists + @ *2017-07-03T15:48:33Z* + + - **ISSUE** `#42076`_: (*abulford*) dockerng.volume_present test looks as though it would cause a change + | refs: `#42086`_ `#42086`_ + * 81d606a Merge pull request `#42086`_ from redmatter/fix-dockerng-volume-present-result + * 8d54968 Make result=true if Docker volume already exists + +- **PR** `#42021`_: (*gtmanfred*) Set concurrent to True when running states with sudo + @ *2017-06-30T21:02:15Z* + + - **ISSUE** `#25842`_: (*shikhartanwar*) Running salt-minion as non-root user to execute sudo commands always returns an error + | refs: `#42021`_ + * 7160697 Merge pull request `#42021`_ from gtmanfred/2016.11 + * 26beb18 Set concurrent to True when running states with sudo + +- **PR** `#42029`_: (*terminalmage*) Mock socket.getaddrinfo in unit.utils.network_test.NetworkTestCase.test_host_to_ips + @ *2017-06-30T20:58:56Z* + + * b784fbb Merge pull request `#42029`_ from terminalmage/host_to_ips + * 26f848e Mock socket.getaddrinfo in unit.utils.network_test.NetworkTestCase.test_host_to_ips + +- **PR** `#42055`_: (*dmurphy18*) Upgrade support for gnupg v2.1 and higher + @ *2017-06-30T20:54:02Z* + + * e067020 Merge pull request `#42055`_ from dmurphy18/handle_gnupgv21 + * e20cea6 Upgrade support for gnupg v2.1 and higher + +- **PR** `#42048`_: (*Ch3LL*) Add initial 2016.11.7 Release Notes + @ *2017-06-30T16:00:05Z* + + * 74ba2ab Merge pull request `#42048`_ from Ch3LL/add_11.7 + * 1de5e00 Add initial 2016.11.7 Release Notes + +- **PR** `#42024`_: (*leeclemens*) doc: Specify versionadded for SELinux policy install/uninstall + @ *2017-06-29T23:29:50Z* + + * ca4e619 Merge pull request `#42024`_ from leeclemens/doc/selinux + * b63a3c0 doc: Specify versionadded for SELinux policy install/uninstall + +- **PR** `#42030`_: (*whiteinge*) Re-add msgpack to mocked imports + @ *2017-06-29T20:47:59Z* + + - **PR** `#42028`_: (*whiteinge*) Revert "Allow docs to be built under Python 3" + | refs: `#42030`_ + - **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + * 50856d0 Merge pull request `#42030`_ from whiteinge/revert-py3-doc-chagnes-pt-2 + * 18dfa98 Re-add msgpack to mocked imports + +- **PR** `#42028`_: (*whiteinge*) Revert "Allow docs to be built under Python 3" + | refs: `#42030`_ + @ *2017-06-29T19:47:46Z* + + - **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + * 53031d2 Merge pull request `#42028`_ from saltstack/revert-41961-py3_doc + * 5592e6e Revert "Allow docs to be built under Python 3" + +- **PR** `#42017`_: (*lorengordon*) Fixes typo "nozerconf" -> "nozeroconf" + @ *2017-06-29T17:30:48Z* + + - **ISSUE** `#42013`_: (*dusto*) Misspelled nozeroconf in salt/modules/rh_ip.py + | refs: `#42017`_ + * 1416bf7 Merge pull request `#42017`_ from lorengordon/issue-42013 + * b6cf5f2 Fixes typo nozerconf -> nozeroconf + +- **PR** `#41906`_: (*terminalmage*) Better support for numeric saltenvs + @ *2017-06-29T17:19:33Z* + + * 0ebb50b Merge pull request `#41906`_ from terminalmage/numeric-saltenv + * 2d798de Better support for numeric saltenvs + +- **PR** `#41995`_: (*terminalmage*) Temporarily set the umask before writing an auth token + @ *2017-06-29T01:09:48Z* + + * 6a3c03c Merge pull request `#41995`_ from terminalmage/token-umask + * 4f54b00 Temporarily set the umask before writing an auth token + +- **PR** `#41999`_: (*terminalmage*) Update IP address for unit.utils.network_test.NetworkTestCase.test_host_to_ips + @ *2017-06-29T01:01:31Z* + + * e3801b0 Merge pull request `#41999`_ from terminalmage/fix-network-test + * fb6a933 Update IP address for unit.utils.network_test.NetworkTestCase.test_host_to_ips + +- **PR** `#41991`_: (*Da-Juan*) Accept a list for state_aggregate global setting + @ *2017-06-29T00:58:59Z* + + - **ISSUE** `#18659`_: (*whiteinge*) mod_aggregate not working for list-form configuration + | refs: `#41991`_ + * a7f3892 Merge pull request `#41991`_ from Da-Juan/fix-state_aggregate-list + * c9075b8 Accept a list for state_aggregate setting + +- **PR** `#41993`_: (*UtahDave*) change out salt support link to SaltConf link + @ *2017-06-29T00:55:20Z* + + * 7424f87 Merge pull request `#41993`_ from UtahDave/2016.11local + * bff050a change out salt support link to SaltConf link + +- **PR** `#41987`_: (*rallytime*) [2016.11] Merge forward from 2016.3 to 2016.11 + @ *2017-06-28T20:19:11Z* + + - **PR** `#41981`_: (*Ch3LL*) [2016.3] Bump latest release version to 2016.11.6 + * 3b9ccf0 Merge pull request `#41987`_ from rallytime/merge-2016.11 + * 48867c4 Merge branch '2016.3' into '2016.11' + + * c589eae Merge pull request `#41981`_ from Ch3LL/11.6_3 + + * 2516ae1 [2016.3] Bump latest release version to 2016.11.6 + +- **PR** `#41985`_: (*rallytime*) Back-port `#41780`_ to 2016.11 + @ *2017-06-28T20:18:57Z* + + - **PR** `#41780`_: (*ferringb*) Fix salt.util.render_jinja_tmpl usage for when not used in an environmnet + | refs: `#41985`_ + * 768339d Merge pull request `#41985`_ from rallytime/`bp-41780`_ + * 8f8d3a4 Fix salt.util.render_jinja_tmpl usage for when not used in an environment. + +- **PR** `#41986`_: (*rallytime*) Back-port `#41820`_ to 2016.11 + @ *2017-06-28T20:18:43Z* + + - **ISSUE** `#34963`_: (*craigafinch*) Incorrect behavior or documentation for comments in salt.states.pkgrepo.managed + | refs: `#41820`_ + - **PR** `#41820`_: (*nhavens*) Fix yum repo file comments to work as documented in pkgrepo.managed + | refs: `#41986`_ + * bd9090c Merge pull request `#41986`_ from rallytime/`bp-41820`_ + * 72320e3 Fix yum repo file comments to work as documented in pkgrepo.managed + +- **PR** `#41973`_: (*vutny*) Fix Master/Minion scheduled jobs based on Cron expressions + | refs: `#42077`_ + @ *2017-06-28T16:39:02Z* + + * a31da52 Merge pull request `#41973`_ from vutny/fix-croniter-scheduled-jobs + * 148788e Fix Master/Minion scheduled jobs based on Cron expressions + +- **PR** `#41980`_: (*Ch3LL*) [2016.11] Bump latest release version to 2016.11.6 + @ *2017-06-28T15:35:11Z* + + * 689ff93 Merge pull request `#41980`_ from Ch3LL/11.6_11 + * fe4f571 [2016.11] Bump latest release version to 2016.11.6 + +- **PR** `#41961`_: (*cachedout*) Allow docs to be built under Python 3 + | refs: `#42028`_ + @ *2017-06-27T21:11:54Z* + + * 82b1eb2 Merge pull request `#41961`_ from cachedout/py3_doc + * 7aacddf Allow docs to be built under Python 3 + +- **PR** `#41948`_: (*davidjb*) Fix Composer state's `name` docs; formatting + @ *2017-06-27T17:51:29Z* + + - **PR** `#41933`_: (*davidjb*) Fix Composer state's `name` docs and improve formatting + | refs: `#41948`_ + * f0eb51d Merge pull request `#41948`_ from davidjb/patch-9 + * 0e4b3d9 Fix Composer state's `name` docs; formatting + +- **PR** `#41914`_: (*vutny*) archive.extracted: fix hash sum verification for local archives + @ *2017-06-26T17:59:27Z* + + * e28e10d Merge pull request `#41914`_ from vutny/fix-archive-extracted-local-file-hash + * 54910fe archive.extracted: fix hash sum verification for local archives + +- **PR** `#41912`_: (*Ch3LL*) Allow pacman module to run on Manjaro + @ *2017-06-26T15:35:20Z* + + * 76ad6ff Merge pull request `#41912`_ from Ch3LL/fix_manjaro + * e4dd72a Update os_name_map in core grains for new manjaro systems + + * aa7c839 Allow pacman module to run on Manjaro + +- **PR** `#41516`_: (*kstreee*) Implements MessageClientPool to avoid blocking waiting for zeromq and tcp communications. + @ *2017-06-26T14:41:38Z* + + - **ISSUE** `#38093`_: (*DmitryKuzmenko*) Make threads avoid blocking waiting while communicating using TCP transport. + | refs: `#41516`_ `#41516`_ + - **PR** `#37878`_: (*kstreee*) Makes threads avoid blocking waiting while communicating using Zeromq. + | refs: `#41516`_ `#41516`_ + * ff67d47 Merge pull request `#41516`_ from kstreee/fix-blocking-waiting-tcp-connection + * df96969 Removes redundant closing statements. + + * 94b9ea5 Implements MessageClientPool to avoid blocking waiting for zeromq and tcp communications. + +- **PR** `#41888`_: (*Ch3LL*) Add additional commits to 2016.11.6 release notes + @ *2017-06-22T16:19:00Z* + + * c90cb67 Merge pull request `#41888`_ from Ch3LL/change_release + * 4e1239d Add additional commits to 2016.11.6 release notes + +- **PR** `#41882`_: (*Ch3LL*) Add pycryptodome to crypt_test + @ *2017-06-21T19:51:10Z* + + * 4a32644 Merge pull request `#41882`_ from Ch3LL/fix_crypt_test + * 6f70dbd Add pycryptodome to crypt_test + +- **PR** `#41877`_: (*Ch3LL*) Fix netstat and routes test + @ *2017-06-21T16:16:58Z* + + * 13df29e Merge pull request `#41877`_ from Ch3LL/fix_netstat_test + * d2076a6 Patch salt.utils.which for test_route test + + * 51f7e10 Patch salt.utils.which for test_netstat test + +- **PR** `#41566`_: (*morganwillcock*) win_certutil: workaround for reading serial numbers with non-English languages + @ *2017-06-21T15:40:29Z* + + - **ISSUE** `#41367`_: (*lubyou*) certutil.add_store does not work on non english windows versions or on Windows 10 (localised or English) + | refs: `#41566`_ + * 66f8c83 Merge pull request `#41566`_ from morganwillcock/certutil + * c337d52 Fix test data for test_get_serial, and a typo + + * 7f69613 test and lint fixes + + * 8ee4843 Suppress output of crypt context and be more specifc with whitespace vs. serial + + * 61f817d Match serials based on output position (fix for non-English languages) + +- **PR** `#41679`_: (*terminalmage*) Prevent unnecessary duplicate pillar compilation + @ *2017-06-21T15:32:42Z* + + * 4d0f5c4 Merge pull request `#41679`_ from terminalmage/get-top-file-envs + * a916e8d Improve normalization of saltenv/pillarenv usage for states + + * 02f293a Update state unit tests to reflect recent changes + + * b7e5c11 Don't compile pillar data when getting top file envs + + * 8d6fdb7 Don't compile pillar twice for salt-call + + * d2abfbf Add initial_pillar argument to salt.state + + * 70186de salt.pillar: rename the "pillar" argument to "pillar_override" + +- **PR** `#41853`_: (*vutny*) Fix master side scheduled jobs to return events + @ *2017-06-20T22:06:29Z* + + - **ISSUE** `#39668`_: (*mirceaulinic*) Master scheduled job not recorded on the event bus + | refs: `#41658`_ + - **ISSUE** `#12653`_: (*pengyao*) salt schedule doesn't return jobs result info to master + | refs: `#41853`_ + - **PR** `#41695`_: (*xiaoanyunfei*) fix max RecursionError, Ellipsis + | refs: `#41853`_ + - **PR** `#41658`_: (*garethgreenaway*) Fixes to the salt scheduler + | refs: `#41853`_ + * 29b0acc Merge pull request `#41853`_ from vutny/fix-master-schedule-event + * e206c38 Fix master side scheduled jobs to return events + + +.. _`#1`: https://github.com/saltstack/salt/issues/1 +.. _`#1036125`: https://github.com/saltstack/salt/issues/1036125 +.. _`#12653`: https://github.com/saltstack/salt/issues/12653 +.. _`#15171`: https://github.com/saltstack/salt/issues/15171 +.. _`#18659`: https://github.com/saltstack/salt/issues/18659 +.. _`#23516`: https://github.com/saltstack/salt/issues/23516 +.. _`#25842`: https://github.com/saltstack/salt/issues/25842 +.. _`#32400`: https://github.com/saltstack/salt/issues/32400 +.. _`#33806`: https://github.com/saltstack/salt/pull/33806 +.. _`#34280`: https://github.com/saltstack/salt/pull/34280 +.. _`#34963`: https://github.com/saltstack/salt/issues/34963 +.. _`#37424`: https://github.com/saltstack/salt/pull/37424 +.. _`#37878`: https://github.com/saltstack/salt/pull/37878 +.. _`#38093`: https://github.com/saltstack/salt/issues/38093 +.. _`#38839`: https://github.com/saltstack/salt/issues/38839 +.. _`#38965`: https://github.com/saltstack/salt/pull/38965 +.. _`#39365`: https://github.com/saltstack/salt/issues/39365 +.. _`#39366`: https://github.com/saltstack/salt/pull/39366 +.. _`#39668`: https://github.com/saltstack/salt/issues/39668 +.. _`#40490`: https://github.com/saltstack/salt/issues/40490 +.. _`#41116`: https://github.com/saltstack/salt/issues/41116 +.. _`#41367`: https://github.com/saltstack/salt/issues/41367 +.. _`#41433`: https://github.com/saltstack/salt/issues/41433 +.. _`#41516`: https://github.com/saltstack/salt/pull/41516 +.. _`#41543`: https://github.com/saltstack/salt/pull/41543 +.. _`#41566`: https://github.com/saltstack/salt/pull/41566 +.. _`#41658`: https://github.com/saltstack/salt/pull/41658 +.. _`#41679`: https://github.com/saltstack/salt/pull/41679 +.. _`#41695`: https://github.com/saltstack/salt/pull/41695 +.. _`#41721`: https://github.com/saltstack/salt/issues/41721 +.. _`#41770`: https://github.com/saltstack/salt/issues/41770 +.. _`#41780`: https://github.com/saltstack/salt/pull/41780 +.. _`#41820`: https://github.com/saltstack/salt/pull/41820 +.. _`#41853`: https://github.com/saltstack/salt/pull/41853 +.. _`#41877`: https://github.com/saltstack/salt/pull/41877 +.. _`#41882`: https://github.com/saltstack/salt/pull/41882 +.. _`#41885`: https://github.com/saltstack/salt/issues/41885 +.. _`#41888`: https://github.com/saltstack/salt/pull/41888 +.. _`#41906`: https://github.com/saltstack/salt/pull/41906 +.. _`#41912`: https://github.com/saltstack/salt/pull/41912 +.. _`#41914`: https://github.com/saltstack/salt/pull/41914 +.. _`#41933`: https://github.com/saltstack/salt/pull/41933 +.. _`#41936`: https://github.com/saltstack/salt/issues/41936 +.. _`#41945`: https://github.com/saltstack/salt/pull/41945 +.. _`#41948`: https://github.com/saltstack/salt/pull/41948 +.. _`#41955`: https://github.com/saltstack/salt/issues/41955 +.. _`#41961`: https://github.com/saltstack/salt/pull/41961 +.. _`#41968`: https://github.com/saltstack/salt/pull/41968 +.. _`#41973`: https://github.com/saltstack/salt/pull/41973 +.. _`#41976`: https://github.com/saltstack/salt/issues/41976 +.. _`#41977`: https://github.com/saltstack/salt/pull/41977 +.. _`#41980`: https://github.com/saltstack/salt/pull/41980 +.. _`#41981`: https://github.com/saltstack/salt/pull/41981 +.. _`#41982`: https://github.com/saltstack/salt/issues/41982 +.. _`#41985`: https://github.com/saltstack/salt/pull/41985 +.. _`#41986`: https://github.com/saltstack/salt/pull/41986 +.. _`#41987`: https://github.com/saltstack/salt/pull/41987 +.. _`#41988`: https://github.com/saltstack/salt/pull/41988 +.. _`#41991`: https://github.com/saltstack/salt/pull/41991 +.. _`#41993`: https://github.com/saltstack/salt/pull/41993 +.. _`#41995`: https://github.com/saltstack/salt/pull/41995 +.. _`#41999`: https://github.com/saltstack/salt/pull/41999 +.. _`#42013`: https://github.com/saltstack/salt/issues/42013 +.. _`#42017`: https://github.com/saltstack/salt/pull/42017 +.. _`#42021`: https://github.com/saltstack/salt/pull/42021 +.. _`#42024`: https://github.com/saltstack/salt/pull/42024 +.. _`#42028`: https://github.com/saltstack/salt/pull/42028 +.. _`#42029`: https://github.com/saltstack/salt/pull/42029 +.. _`#42030`: https://github.com/saltstack/salt/pull/42030 +.. _`#42041`: https://github.com/saltstack/salt/issues/42041 +.. _`#42045`: https://github.com/saltstack/salt/pull/42045 +.. _`#42048`: https://github.com/saltstack/salt/pull/42048 +.. _`#42055`: https://github.com/saltstack/salt/pull/42055 +.. _`#42076`: https://github.com/saltstack/salt/issues/42076 +.. _`#42077`: https://github.com/saltstack/salt/pull/42077 +.. _`#42078`: https://github.com/saltstack/salt/pull/42078 +.. _`#42086`: https://github.com/saltstack/salt/pull/42086 +.. _`#42094`: https://github.com/saltstack/salt/pull/42094 +.. _`#42095`: https://github.com/saltstack/salt/pull/42095 +.. _`#42097`: https://github.com/saltstack/salt/pull/42097 +.. _`#42098`: https://github.com/saltstack/salt/pull/42098 +.. _`#42099`: https://github.com/saltstack/salt/pull/42099 +.. _`#42105`: https://github.com/saltstack/salt/pull/42105 +.. _`#42109`: https://github.com/saltstack/salt/pull/42109 +.. _`#42114`: https://github.com/saltstack/salt/issues/42114 +.. _`#42115`: https://github.com/saltstack/salt/issues/42115 +.. _`#42116`: https://github.com/saltstack/salt/issues/42116 +.. _`#42119`: https://github.com/saltstack/salt/pull/42119 +.. _`#42121`: https://github.com/saltstack/salt/pull/42121 +.. _`#42123`: https://github.com/saltstack/salt/pull/42123 +.. _`#42137`: https://github.com/saltstack/salt/issues/42137 +.. _`#42140`: https://github.com/saltstack/salt/pull/42140 +.. _`#42141`: https://github.com/saltstack/salt/pull/42141 +.. _`#42142`: https://github.com/saltstack/salt/pull/42142 +.. _`#42152`: https://github.com/saltstack/salt/issues/42152 +.. _`#42155`: https://github.com/saltstack/salt/pull/42155 +.. _`#42163`: https://github.com/saltstack/salt/pull/42163 +.. _`#42164`: https://github.com/saltstack/salt/pull/42164 +.. _`#42172`: https://github.com/saltstack/salt/pull/42172 +.. _`#42173`: https://github.com/saltstack/salt/pull/42173 +.. _`#42175`: https://github.com/saltstack/salt/pull/42175 +.. _`#42176`: https://github.com/saltstack/salt/pull/42176 +.. _`#42181`: https://github.com/saltstack/salt/pull/42181 +.. _`#42194`: https://github.com/saltstack/salt/issues/42194 +.. _`#42198`: https://github.com/saltstack/salt/issues/42198 +.. _`#42200`: https://github.com/saltstack/salt/pull/42200 +.. _`#42211`: https://github.com/saltstack/salt/pull/42211 +.. _`#42215`: https://github.com/saltstack/salt/pull/42215 +.. _`#42224`: https://github.com/saltstack/salt/pull/42224 +.. _`#42232`: https://github.com/saltstack/salt/issues/42232 +.. _`#42235`: https://github.com/saltstack/salt/pull/42235 +.. _`#42240`: https://github.com/saltstack/salt/issues/42240 +.. _`#42252`: https://github.com/saltstack/salt/pull/42252 +.. _`#42253`: https://github.com/saltstack/salt/pull/42253 +.. _`#42261`: https://github.com/saltstack/salt/pull/42261 +.. _`#42262`: https://github.com/saltstack/salt/pull/42262 +.. _`#42264`: https://github.com/saltstack/salt/pull/42264 +.. _`#42267`: https://github.com/saltstack/salt/issues/42267 +.. _`#42269`: https://github.com/saltstack/salt/pull/42269 +.. _`#42275`: https://github.com/saltstack/salt/pull/42275 +.. _`#42279`: https://github.com/saltstack/salt/issues/42279 +.. _`#42282`: https://github.com/saltstack/salt/pull/42282 +.. _`#42289`: https://github.com/saltstack/salt/pull/42289 +.. _`#42291`: https://github.com/saltstack/salt/pull/42291 +.. _`#42295`: https://github.com/saltstack/salt/issues/42295 +.. _`#42308`: https://github.com/saltstack/salt/pull/42308 +.. _`#42314`: https://github.com/saltstack/salt/pull/42314 +.. _`#42319`: https://github.com/saltstack/salt/pull/42319 +.. _`#42329`: https://github.com/saltstack/salt/issues/42329 +.. _`#42333`: https://github.com/saltstack/salt/issues/42333 +.. _`#42339`: https://github.com/saltstack/salt/pull/42339 +.. _`#42350`: https://github.com/saltstack/salt/pull/42350 +.. _`#42352`: https://github.com/saltstack/salt/pull/42352 +.. _`#42353`: https://github.com/saltstack/salt/pull/42353 +.. _`#42356`: https://github.com/saltstack/salt/pull/42356 +.. _`#42357`: https://github.com/saltstack/salt/issues/42357 +.. _`#42359`: https://github.com/saltstack/salt/pull/42359 +.. _`#42360`: https://github.com/saltstack/salt/pull/42360 +.. _`#42368`: https://github.com/saltstack/salt/pull/42368 +.. _`#42370`: https://github.com/saltstack/salt/pull/42370 +.. _`#42371`: https://github.com/saltstack/salt/issues/42371 +.. _`#42375`: https://github.com/saltstack/salt/issues/42375 +.. _`#42387`: https://github.com/saltstack/salt/pull/42387 +.. _`#42403`: https://github.com/saltstack/salt/issues/42403 +.. _`#42405`: https://github.com/saltstack/salt/issues/42405 +.. _`#42413`: https://github.com/saltstack/salt/issues/42413 +.. _`#42414`: https://github.com/saltstack/salt/pull/42414 +.. _`#42417`: https://github.com/saltstack/salt/issues/42417 +.. _`#42424`: https://github.com/saltstack/salt/pull/42424 +.. _`#42433`: https://github.com/saltstack/salt/pull/42433 +.. _`#42443`: https://github.com/saltstack/salt/pull/42443 +.. _`#42456`: https://github.com/saltstack/salt/issues/42456 +.. _`#42464`: https://github.com/saltstack/salt/pull/42464 +.. _`#42477`: https://github.com/saltstack/salt/issues/42477 +.. _`#42479`: https://github.com/saltstack/salt/pull/42479 +.. _`#42509`: https://github.com/saltstack/salt/pull/42509 +.. _`#42515`: https://github.com/saltstack/salt/pull/42515 +.. _`#42516`: https://github.com/saltstack/salt/pull/42516 +.. _`#42523`: https://github.com/saltstack/salt/pull/42523 +.. _`#42527`: https://github.com/saltstack/salt/pull/42527 +.. _`#42547`: https://github.com/saltstack/salt/pull/42547 +.. _`#42551`: https://github.com/saltstack/salt/pull/42551 +.. _`#42552`: https://github.com/saltstack/salt/pull/42552 +.. _`#42571`: https://github.com/saltstack/salt/pull/42571 +.. _`#42573`: https://github.com/saltstack/salt/pull/42573 +.. _`#42574`: https://github.com/saltstack/salt/pull/42574 +.. _`#42586`: https://github.com/saltstack/salt/pull/42586 +.. _`#42600`: https://github.com/saltstack/salt/issues/42600 +.. _`#42623`: https://github.com/saltstack/salt/pull/42623 +.. _`#42627`: https://github.com/saltstack/salt/issues/42627 +.. _`#42629`: https://github.com/saltstack/salt/pull/42629 +.. _`#42642`: https://github.com/saltstack/salt/issues/42642 +.. _`#42644`: https://github.com/saltstack/salt/issues/42644 +.. _`#42651`: https://github.com/saltstack/salt/pull/42651 +.. _`#42655`: https://github.com/saltstack/salt/pull/42655 +.. _`#42663`: https://github.com/saltstack/salt/pull/42663 +.. _`#42669`: https://github.com/saltstack/salt/pull/42669 +.. _`#42683`: https://github.com/saltstack/salt/issues/42683 +.. _`#42686`: https://github.com/saltstack/salt/issues/42686 +.. _`#42690`: https://github.com/saltstack/salt/issues/42690 +.. _`#42693`: https://github.com/saltstack/salt/pull/42693 +.. _`#42694`: https://github.com/saltstack/salt/pull/42694 +.. _`#42731`: https://github.com/saltstack/salt/issues/42731 +.. _`#42744`: https://github.com/saltstack/salt/pull/42744 +.. _`#42747`: https://github.com/saltstack/salt/issues/42747 +.. _`#42748`: https://github.com/saltstack/salt/pull/42748 +.. _`#42753`: https://github.com/saltstack/salt/issues/42753 +.. _`#42760`: https://github.com/saltstack/salt/pull/42760 +.. _`#42764`: https://github.com/saltstack/salt/pull/42764 +.. _`#42784`: https://github.com/saltstack/salt/pull/42784 +.. _`#42786`: https://github.com/saltstack/salt/pull/42786 +.. _`#42788`: https://github.com/saltstack/salt/pull/42788 +.. _`#42795`: https://github.com/saltstack/salt/pull/42795 +.. _`#42798`: https://github.com/saltstack/salt/pull/42798 +.. _`#42803`: https://github.com/saltstack/salt/issues/42803 +.. _`#42804`: https://github.com/saltstack/salt/pull/42804 +.. _`#42805`: https://github.com/saltstack/salt/pull/42805 +.. _`#42806`: https://github.com/saltstack/salt/pull/42806 +.. _`#42826`: https://github.com/saltstack/salt/pull/42826 +.. _`#42829`: https://github.com/saltstack/salt/pull/42829 +.. _`#42835`: https://github.com/saltstack/salt/pull/42835 +.. _`#42836`: https://github.com/saltstack/salt/pull/42836 +.. _`#42838`: https://github.com/saltstack/salt/pull/42838 +.. _`#42848`: https://github.com/saltstack/salt/pull/42848 +.. _`#42851`: https://github.com/saltstack/salt/pull/42851 +.. _`#42856`: https://github.com/saltstack/salt/pull/42856 +.. _`#42859`: https://github.com/saltstack/salt/pull/42859 +.. _`#42861`: https://github.com/saltstack/salt/pull/42861 +.. _`#42864`: https://github.com/saltstack/salt/pull/42864 +.. _`#42869`: https://github.com/saltstack/salt/issues/42869 +.. _`#42871`: https://github.com/saltstack/salt/pull/42871 +.. _`#42877`: https://github.com/saltstack/salt/pull/42877 +.. _`#42882`: https://github.com/saltstack/salt/pull/42882 +.. _`#42883`: https://github.com/saltstack/salt/pull/42883 +.. _`#42886`: https://github.com/saltstack/salt/pull/42886 +.. _`#42890`: https://github.com/saltstack/salt/pull/42890 +.. _`#42918`: https://github.com/saltstack/salt/pull/42918 +.. _`#42919`: https://github.com/saltstack/salt/pull/42919 +.. _`#42940`: https://github.com/saltstack/salt/pull/42940 +.. _`#42942`: https://github.com/saltstack/salt/pull/42942 +.. _`#42944`: https://github.com/saltstack/salt/pull/42944 +.. _`#42949`: https://github.com/saltstack/salt/pull/42949 +.. _`#42950`: https://github.com/saltstack/salt/pull/42950 +.. _`#42952`: https://github.com/saltstack/salt/pull/42952 +.. _`#42954`: https://github.com/saltstack/salt/pull/42954 +.. _`#42959`: https://github.com/saltstack/salt/pull/42959 +.. _`#42968`: https://github.com/saltstack/salt/pull/42968 +.. _`#42969`: https://github.com/saltstack/salt/pull/42969 +.. _`#42985`: https://github.com/saltstack/salt/pull/42985 +.. _`#42986`: https://github.com/saltstack/salt/pull/42986 +.. _`#42992`: https://github.com/saltstack/salt/issues/42992 +.. _`#43009`: https://github.com/saltstack/salt/pull/43009 +.. _`#43014`: https://github.com/saltstack/salt/pull/43014 +.. _`#43019`: https://github.com/saltstack/salt/pull/43019 +.. _`#43020`: https://github.com/saltstack/salt/pull/43020 +.. _`#43021`: https://github.com/saltstack/salt/pull/43021 +.. _`#43023`: https://github.com/saltstack/salt/pull/43023 +.. _`#43026`: https://github.com/saltstack/salt/pull/43026 +.. _`#43027`: https://github.com/saltstack/salt/pull/43027 +.. _`#43031`: https://github.com/saltstack/salt/pull/43031 +.. _`#43032`: https://github.com/saltstack/salt/pull/43032 +.. _`#43033`: https://github.com/saltstack/salt/pull/43033 +.. _`#43036`: https://github.com/saltstack/salt/issues/43036 +.. _`#43037`: https://github.com/saltstack/salt/pull/43037 +.. _`#43048`: https://github.com/saltstack/salt/pull/43048 +.. _`#43054`: https://github.com/saltstack/salt/pull/43054 +.. _`#43060`: https://github.com/saltstack/salt/pull/43060 +.. _`#43064`: https://github.com/saltstack/salt/pull/43064 +.. _`#43092`: https://github.com/saltstack/salt/pull/43092 +.. _`#43100`: https://github.com/saltstack/salt/pull/43100 +.. _`#43101`: https://github.com/saltstack/salt/issues/43101 +.. _`#43103`: https://github.com/saltstack/salt/pull/43103 +.. _`#43116`: https://github.com/saltstack/salt/pull/43116 +.. _`#43143`: https://github.com/saltstack/salt/issues/43143 +.. _`#43151`: https://github.com/saltstack/salt/pull/43151 +.. _`#43154`: https://github.com/saltstack/salt/pull/43154 +.. _`#43171`: https://github.com/saltstack/salt/pull/43171 +.. _`#43173`: https://github.com/saltstack/salt/pull/43173 +.. _`#43178`: https://github.com/saltstack/salt/pull/43178 +.. _`#43179`: https://github.com/saltstack/salt/pull/43179 +.. _`#43191`: https://github.com/saltstack/salt/pull/43191 +.. _`#43196`: https://github.com/saltstack/salt/pull/43196 +.. _`#43198`: https://github.com/saltstack/salt/issues/43198 +.. _`#43199`: https://github.com/saltstack/salt/pull/43199 +.. _`#43202`: https://github.com/saltstack/salt/pull/43202 +.. _`#43228`: https://github.com/saltstack/salt/pull/43228 +.. _`#43271`: https://github.com/saltstack/salt/pull/43271 +.. _`#475`: https://github.com/saltstack/salt/issues/475 +.. _`#495`: https://github.com/saltstack/salt/issues/495 +.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424 +.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366 +.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543 +.. _`bp-41780`: https://github.com/saltstack/salt/pull/41780 +.. _`bp-41820`: https://github.com/saltstack/salt/pull/41820 +.. _`bp-42097`: https://github.com/saltstack/salt/pull/42097 +.. _`bp-42098`: https://github.com/saltstack/salt/pull/42098 +.. _`bp-42109`: https://github.com/saltstack/salt/pull/42109 +.. _`bp-42224`: https://github.com/saltstack/salt/pull/42224 +.. _`bp-42433`: https://github.com/saltstack/salt/pull/42433 +.. _`bp-42547`: https://github.com/saltstack/salt/pull/42547 +.. _`bp-42552`: https://github.com/saltstack/salt/pull/42552 +.. _`bp-42651`: https://github.com/saltstack/salt/pull/42651 +.. _`bp-42744`: https://github.com/saltstack/salt/pull/42744 +.. _`bp-42760`: https://github.com/saltstack/salt/pull/42760 +.. _`bp-42784`: https://github.com/saltstack/salt/pull/42784 +.. _`bp-42848`: https://github.com/saltstack/salt/pull/42848 +.. _`bp-42871`: https://github.com/saltstack/salt/pull/42871 +.. _`bp-42883`: https://github.com/saltstack/salt/pull/42883 +.. _`bp-43020`: https://github.com/saltstack/salt/pull/43020 +.. _`bp-43031`: https://github.com/saltstack/salt/pull/43031 +.. _`bp-43116`: https://github.com/saltstack/salt/pull/43116 +.. _`fix-38839`: https://github.com/saltstack/salt/issues/38839 +.. _`fix-41116`: https://github.com/saltstack/salt/issues/41116 +.. _`fix-41721`: https://github.com/saltstack/salt/issues/41721 +.. _`fix-41885`: https://github.com/saltstack/salt/issues/41885 +.. _`fix-42115`: https://github.com/saltstack/salt/issues/42115 +.. _`fix-42152`: https://github.com/saltstack/salt/issues/42152 +.. _`fix-42267`: https://github.com/saltstack/salt/issues/42267 +.. _`fix-42375`: https://github.com/saltstack/salt/issues/42375 +.. _`fix-42405`: https://github.com/saltstack/salt/issues/42405 +.. _`fix-42417`: https://github.com/saltstack/salt/issues/42417 +.. _`fix-42683`: https://github.com/saltstack/salt/issues/42683 From ea6e66175552a65b9c3a13d78799fd78c784a760 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 8 Sep 2017 15:07:44 -0600 Subject: [PATCH 032/348] Revert "Reduce fileclient.get_file latency by merging _file_find and _file_hash" This reverts commit 94c62388e792884cebc11095db20e3db81fa1348. --- salt/fileclient.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/salt/fileclient.py b/salt/fileclient.py index 2b4484211c0..fc396fcc562 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -1270,10 +1270,10 @@ class RemoteClient(Client): hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(path, form=hash_type) ret['hash_type'] = hash_type - return ret, list(os.stat(path)) + return ret load = {'path': path, 'saltenv': saltenv, - 'cmd': '_file_hash_and_stat'} + 'cmd': '_file_hash'} return self.channel.send(load) def hash_file(self, path, saltenv='base'): @@ -1282,14 +1282,33 @@ class RemoteClient(Client): master file server prepend the path with salt:// otherwise, prepend the file with / for a local file. ''' - return self.__hash_and_stat_file(path, saltenv)[0] + return self.__hash_and_stat_file(path, saltenv) def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' - return self.__hash_and_stat_file(path, saltenv) + hash_result = self.hash_file(path, saltenv) + try: + path = self._check_proto(path) + except MinionError as err: + if not os.path.isfile(path): + return hash_result, None + else: + try: + return hash_result, list(os.stat(path)) + except Exception: + return hash_result, None + load = {'path': path, + 'saltenv': saltenv, + 'cmd': '_file_find'} + fnd = self.channel.send(load) + try: + stat_result = fnd.get('stat') + except AttributeError: + stat_result = None + return hash_result, stat_result def list_env(self, saltenv='base'): ''' From 6114df8dc3cd44a10380e1868dcc72e083f320fb Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 11 Sep 2017 11:20:33 -0700 Subject: [PATCH 033/348] Adding a small check to ensure we do not continue to populate kwargs with __pub_ items from the kwargs item. --- salt/utils/schedule.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 2a818628aa3..31e3c1aaf1a 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -845,7 +845,8 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) + if key is not 'kwargs': + kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) From e496d28cbf7c0a7c8fe18a2797c83c58d90eca23 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 1 Sep 2017 11:18:20 -0600 Subject: [PATCH 034/348] Fix `unit.utils.test_verify` for Windows Use Windows api to get and set the maxstdio Change messages to work with Windows --- tests/unit/utils/test_verify.py | 42 ++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/tests/unit/utils/test_verify.py b/tests/unit/utils/test_verify.py index 795298877dd..4df9f2d8e59 100644 --- a/tests/unit/utils/test_verify.py +++ b/tests/unit/utils/test_verify.py @@ -10,10 +10,15 @@ import os import sys import stat import shutil -import resource import tempfile import socket +# Import third party libs +try: + import win32file +except ImportError: + import resource + # Import Salt Testing libs from tests.support.unit import skipIf, TestCase from tests.support.paths import TMP @@ -82,7 +87,10 @@ class TestVerify(TestCase): writer = FakeWriter() sys.stderr = writer # Now run the test - self.assertFalse(check_user('nouser')) + if salt.utils.is_windows(): + self.assertTrue(check_user('nouser')) + else: + self.assertFalse(check_user('nouser')) # Restore sys.stderr sys.stderr = stderr if writer.output != 'CRITICAL: User not found: "nouser"\n': @@ -118,7 +126,6 @@ class TestVerify(TestCase): # not support IPv6. pass - @skipIf(True, 'Skipping until we can find why Jenkins is bailing out') def test_max_open_files(self): with TestsLoggingHandler() as handler: logmsg_dbg = ( @@ -139,15 +146,31 @@ class TestVerify(TestCase): 'raise the salt\'s max_open_files setting. Please consider ' 'raising this value.' ) + if salt.utils.is_windows(): + logmsg_crash = ( + '{0}:The number of accepted minion keys({1}) should be lower ' + 'than 1/4 of the max open files soft setting({2}). ' + 'salt-master will crash pretty soon! Please consider ' + 'raising this value.' + ) - mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) + if sys.platform.startswith('win'): + # Check the Windows API for more detail on this + # http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx + # and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html + mof_s = mof_h = win32file._getmaxstdio() + else: + mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) tempdir = tempfile.mkdtemp(prefix='fake-keys') keys_dir = os.path.join(tempdir, 'minions') os.makedirs(keys_dir) mof_test = 256 - resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h)) + if salt.utils.is_windows(): + win32file._setmaxstdio(mof_test) + else: + resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h)) try: prev = 0 @@ -181,7 +204,7 @@ class TestVerify(TestCase): level, newmax, mof_test, - mof_h - newmax, + mof_test - newmax if salt.utils.is_windows() else mof_h - newmax, ), handler.messages ) @@ -206,7 +229,7 @@ class TestVerify(TestCase): 'CRITICAL', newmax, mof_test, - mof_h - newmax, + mof_test - newmax if salt.utils.is_windows() else mof_h - newmax, ), handler.messages ) @@ -218,7 +241,10 @@ class TestVerify(TestCase): raise finally: shutil.rmtree(tempdir) - resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h)) + if salt.utils.is_windows(): + win32file._setmaxstdio(mof_h) + else: + resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h)) @skipIf(NO_MOCK, NO_MOCK_REASON) def test_verify_log(self): From c0dc3f73ef3540c93afc6030e28551289dd18598 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 11 Sep 2017 12:21:21 -0600 Subject: [PATCH 035/348] Use sys.platform instead of salt.utils to detect Windows --- tests/unit/utils/test_verify.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/unit/utils/test_verify.py b/tests/unit/utils/test_verify.py index 4df9f2d8e59..f0335718dde 100644 --- a/tests/unit/utils/test_verify.py +++ b/tests/unit/utils/test_verify.py @@ -14,9 +14,9 @@ import tempfile import socket # Import third party libs -try: +if sys.platform.startswith('win'): import win32file -except ImportError: +else: import resource # Import Salt Testing libs @@ -87,7 +87,7 @@ class TestVerify(TestCase): writer = FakeWriter() sys.stderr = writer # Now run the test - if salt.utils.is_windows(): + if sys.platform.startswith('win'): self.assertTrue(check_user('nouser')) else: self.assertFalse(check_user('nouser')) @@ -146,7 +146,7 @@ class TestVerify(TestCase): 'raise the salt\'s max_open_files setting. Please consider ' 'raising this value.' ) - if salt.utils.is_windows(): + if sys.platform.startswith('win'): logmsg_crash = ( '{0}:The number of accepted minion keys({1}) should be lower ' 'than 1/4 of the max open files soft setting({2}). ' @@ -167,7 +167,7 @@ class TestVerify(TestCase): mof_test = 256 - if salt.utils.is_windows(): + if sys.platform.startswith('win'): win32file._setmaxstdio(mof_test) else: resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h)) @@ -204,7 +204,7 @@ class TestVerify(TestCase): level, newmax, mof_test, - mof_test - newmax if salt.utils.is_windows() else mof_h - newmax, + mof_test - newmax if sys.platform.startswith('win') else mof_h - newmax, ), handler.messages ) @@ -229,7 +229,7 @@ class TestVerify(TestCase): 'CRITICAL', newmax, mof_test, - mof_test - newmax if salt.utils.is_windows() else mof_h - newmax, + mof_test - newmax if sys.platform.startswith('win') else mof_h - newmax, ), handler.messages ) @@ -241,7 +241,7 @@ class TestVerify(TestCase): raise finally: shutil.rmtree(tempdir) - if salt.utils.is_windows(): + if sys.platform.startswith('win'): win32file._setmaxstdio(mof_h) else: resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h)) From be4f26ab21b92dbf5ecb26963c0099a5e2dd28a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= Date: Mon, 11 Sep 2017 19:57:28 +0200 Subject: [PATCH 036/348] Use $HOME to get the user home directory instead using '~' char --- pkg/salt.bash | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/salt.bash b/pkg/salt.bash index 480361fe23c..00174c072f1 100644 --- a/pkg/salt.bash +++ b/pkg/salt.bash @@ -35,7 +35,8 @@ _salt_get_keys(){ } _salt(){ - local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:='~/.cache/salt-comp-cache_functions'} + CACHE_DIR="$HOME/.cache/salt-comp-cache_functions" + local _salt_cache_functions=${SALT_COMP_CACHE_FUNCTIONS:=$CACHE_DIR} local _salt_cache_timeout=${SALT_COMP_CACHE_TIMEOUT:='last hour'} if [ ! -d "$(dirname ${_salt_cache_functions})" ]; then From c91cd1c6d92912c6d2d9eefecf19927b3e39725e Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 11 Sep 2017 16:21:09 -0400 Subject: [PATCH 037/348] Bump deprecation warning for boto_vpc.describe_route_table This deprecation warning needs to be bumped out to Neon instead of Oxygen. See Issue #43223 for more details. --- salt/modules/boto_vpc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index a564b863d02..f18ae2d68a9 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -2456,7 +2456,8 @@ def describe_route_table(route_table_id=None, route_table_name=None, ''' - salt.utils.warn_until('Oxygen', + salt.utils.warn_until( + 'Neon', 'The \'describe_route_table\' method has been deprecated and ' 'replaced by \'describe_route_tables\'.' ) From 7aab1a90e02e2a09a3f89edb978b53735fe1b1c9 Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Tue, 12 Sep 2017 15:07:25 +0300 Subject: [PATCH 038/348] added better debug info and comments --- salt/cloud/clouds/xen.py | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 49a0202c70d..dffff9aa4b2 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -160,7 +160,8 @@ def _get_session(): session.xenapi.login_with_password(user, password, api_version, originator) except XenAPI.Failure as ex: ''' - if the server on the url is not the pool master, the pool master's address will be rturned in the exception message + if the server on the url is not the pool master, the pool master's + address will be rturned in the exception message ''' pool_master_addr = str(ex.__dict__['details'][1]) slash_parts = url.split('/') @@ -189,10 +190,15 @@ def list_nodes(): ret = {} for vm in vms: record = session.xenapi.VM.get_record(vm) - if not record['is_a_template'] and not record['is_control_domain']: - ret[record['name_label']] = { - 'id': record['uuid'], - 'image': record['other_config']['base_template_name'], + if not(record['is_a_template']) and not(record['is_control_domain']): + try: + base_template_name = record['other_config']['base_template_name'] + except Exception as KeyError: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) + ret[record['name_label']] = {'id': record['uuid'], + 'image': base_template_name, 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], @@ -304,10 +310,17 @@ def list_nodes_full(session=None): for vm in vms: record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + # deal with cases where the VM doesn't have 'base_template_name' attribute + try: + base_template_name = record['other_config']['base_template_name'] + except Exception as KeyError: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) vm_cfg = session.xenapi.VM.get_record(vm) vm_cfg['id'] = record['uuid'] vm_cfg['name'] = record['name_label'] - vm_cfg['image'] = record['other_config']['base_template_name'] + vm_cfg['image'] = base_template_name vm_cfg['size'] = None vm_cfg['state'] = record['power_state'] vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session) @@ -463,8 +476,14 @@ def show_instance(name, session=None, call=None): vm = _get_vm(name, session=session) record = session.xenapi.VM.get_record(vm) if not record['is_a_template'] and not record['is_control_domain']: + try: + base_template_name = record['other_config']['base_template_name'] + except Exception as KeyError: + base_template_name = None + log.debug('VM {}, doesnt have base_template_name attribute'.format( + record['name_label'])) ret = {'id': record['uuid'], - 'image': record['other_config']['base_template_name'], + 'image': base_template_name, 'name': record['name_label'], 'size': record['memory_dynamic_max'], 'state': record['power_state'], @@ -724,7 +743,7 @@ def _copy_vm(template=None, name=None, session=None, sr=None): ''' Create VM by copy - This is faster and should be used if source and target are + This is slower and should be used if source and target are NOT in the same storage repository template = object reference From 35c1d8898deb3db520eca389d96553562210b269 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 12 Sep 2017 09:36:34 -0400 Subject: [PATCH 039/348] Add Neon to version list Follow up to #43445 --- salt/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/version.py b/salt/version.py index 0c7695568a8..052ccdf677d 100644 --- a/salt/version.py +++ b/salt/version.py @@ -90,8 +90,8 @@ class SaltStackVersion(object): 'Nitrogen' : (MAX_SIZE - 102, 0), 'Oxygen' : (MAX_SIZE - 101, 0), 'Fluorine' : (MAX_SIZE - 100, 0), + 'Neon' : (MAX_SIZE - 99, 0), # pylint: disable=E8265 - #'Neon' : (MAX_SIZE - 99 , 0), #'Sodium' : (MAX_SIZE - 98 , 0), #'Magnesium' : (MAX_SIZE - 97 , 0), #'Aluminium' : (MAX_SIZE - 96 , 0), From 139e065ce9825ddba52997eac9fd35779c3fef52 Mon Sep 17 00:00:00 2001 From: Olivier Mauras Date: Wed, 12 Jul 2017 17:29:22 +0200 Subject: [PATCH 040/348] New pillar/master_tops saltclass module --- doc/topics/releases/oxygen.rst | 188 +++++++++++ salt/pillar/saltclass.py | 62 ++++ salt/tops/saltclass.py | 69 ++++ salt/utils/saltclass.py | 296 ++++++++++++++++++ .../examples/classes/app/borgbackup.yml | 6 + .../examples/classes/app/ssh/server.yml | 4 + .../examples/classes/default/init.yml | 17 + .../examples/classes/default/motd.yml | 3 + .../examples/classes/default/users.yml | 16 + .../saltclass/examples/classes/roles/app.yml | 21 ++ .../examples/classes/roles/nginx/init.yml | 7 + .../examples/classes/roles/nginx/server.yml | 7 + .../examples/classes/subsidiaries/gnv.yml | 20 ++ .../examples/classes/subsidiaries/qls.yml | 17 + .../examples/classes/subsidiaries/zrh.yml | 24 ++ .../saltclass/examples/nodes/fake_id.yml | 6 + tests/unit/pillar/test_saltclass.py | 43 +++ 17 files changed, 806 insertions(+) create mode 100644 salt/pillar/saltclass.py create mode 100644 salt/tops/saltclass.py create mode 100644 salt/utils/saltclass.py create mode 100644 tests/integration/files/saltclass/examples/classes/app/borgbackup.yml create mode 100644 tests/integration/files/saltclass/examples/classes/app/ssh/server.yml create mode 100644 tests/integration/files/saltclass/examples/classes/default/init.yml create mode 100644 tests/integration/files/saltclass/examples/classes/default/motd.yml create mode 100644 tests/integration/files/saltclass/examples/classes/default/users.yml create mode 100644 tests/integration/files/saltclass/examples/classes/roles/app.yml create mode 100644 tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml create mode 100644 tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml create mode 100644 tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml create mode 100644 tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml create mode 100644 tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml create mode 100644 tests/integration/files/saltclass/examples/nodes/fake_id.yml create mode 100644 tests/unit/pillar/test_saltclass.py diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index d3cd440d456..ec6a79195ed 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -97,6 +97,194 @@ file. For example: These commands will run in sequence **before** the bootstrap script is executed. +New pillar/master_tops module called saltclass +---------------------------------------------- + +This module clones the behaviour of reclass (http://reclass.pantsfullofunix.net/), without the need of an external app, and add several features to improve flexibility. +Saltclass lets you define your nodes from simple ``yaml`` files (``.yml``) through hierarchical class inheritance with the possibility to override pillars down the tree. + +**Features** + +- Define your nodes through hierarchical class inheritance +- Reuse your reclass datas with minimal modifications + - applications => states + - parameters => pillars +- Use Jinja templating in your yaml definitions +- Access to the following Salt objects in Jinja + - ``__opts__`` + - ``__salt__`` + - ``__grains__`` + - ``__pillars__`` + - ``minion_id`` +- Chose how to merge or override your lists using ^ character (see examples) +- Expand variables ${} with possibility to escape them if needed \${} (see examples) +- Ignores missing node/class and will simply return empty without breaking the pillar module completely - will be logged + +An example subset of datas is available here: http://git.mauras.ch/salt/saltclass/src/master/examples + +========================== =========== +Terms usable in yaml files Description +========================== =========== +classes A list of classes that will be processed in order +states A list of states that will be returned by master_tops function +pillars A yaml dictionnary that will be returned by the ext_pillar function +environment Node saltenv that will be used by master_tops +========================== =========== + +A class consists of: + +- zero or more parent classes +- zero or more states +- any number of pillars + +A child class can override pillars from a parent class. +A node definition is a class in itself with an added ``environment`` parameter for ``saltenv`` definition. + +**class names** + +Class names mimic salt way of defining states and pillar files. +This means that ``default.users`` class name will correspond to one of these: + +- ``/classes/default/users.yml`` +- ``/classes/default/users/init.yml`` + +**Saltclass tree** + +A saltclass tree would look like this: + +.. code-block:: text + + + ├── classes + │ ├── app + │ │ ├── borgbackup.yml + │ │ └── ssh + │ │ └── server.yml + │ ├── default + │ │ ├── init.yml + │ │ ├── motd.yml + │ │ └── users.yml + │ ├── roles + │ │ ├── app.yml + │ │ └── nginx + │ │ ├── init.yml + │ │ └── server.yml + │ └── subsidiaries + │ ├── gnv.yml + │ ├── qls.yml + │ └── zrh.yml + └── nodes + ├── geneva + │ └── gnv.node1.yml + ├── lausanne + │ ├── qls.node1.yml + │ └── qls.node2.yml + ├── node127.yml + └── zurich + ├── zrh.node1.yml + ├── zrh.node2.yml + └── zrh.node3.yml + +**Examples** + +``/nodes/lausanne/qls.node1.yml`` + +.. code-block:: yaml + + environment: base + + classes: + {% for class in ['default'] %} + - {{ class }} + {% endfor %} + - subsidiaries.{{ __grains__['id'].split('.')[0] }} + +``/classes/default/init.yml`` + +.. code-block:: yaml + + classes: + - default.users + - default.motd + + states: + - openssh + + pillars: + default: + network: + dns: + srv1: 192.168.0.1 + srv2: 192.168.0.2 + domain: example.com + ntp: + srv1: 192.168.10.10 + srv2: 192.168.10.20 + +``/classes/subsidiaries/gnv.yml`` + +.. code-block:: yaml + + pillars: + default: + network: + sub: Geneva + dns: + srv1: 10.20.0.1 + srv2: 10.20.0.2 + srv3: 192.168.1.1 + domain: gnv.example.com + users: + adm1: + uid: 1210 + gid: 1210 + gecos: 'Super user admin1' + homedir: /srv/app/adm1 + adm3: + uid: 1203 + gid: 1203 + gecos: 'Super user adm + +Variable expansions: + +Escaped variables are rendered as is - ``${test}`` + +Missing variables are rendered as is - ``${net:dns:srv2}`` + +.. code-block:: yaml + + pillars: + app: + config: + dns: + srv1: ${default:network:dns:srv1} + srv2: ${net:dns:srv2} + uri: https://application.domain/call?\${test} + prod_parameters: + - p1 + - p2 + - p3 + pkg: + - app-core + - app-backend + +List override: + +Not using ``^`` as the first entry will simply merge the lists + +.. code-block:: yaml + + pillars: + app: + pkg: + - ^ + - app-frontend + + +**Known limitation** + +Currently you can't have both a variable and an escaped variable in the same string as the escaped one will not be correctly rendered - '\${xx}' will stay as is instead of being rendered as '${xx}' + Newer PyWinRM Versions ---------------------- diff --git a/salt/pillar/saltclass.py b/salt/pillar/saltclass.py new file mode 100644 index 00000000000..41732bffd07 --- /dev/null +++ b/salt/pillar/saltclass.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +''' +SaltClass Pillar Module + +.. code-block:: yaml + + ext_pillar: + - saltclass: + - path: /srv/saltclass + +''' + +# import python libs +from __future__ import absolute_import +import salt.utils.saltclass as sc +import logging + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + This module has no external dependencies + ''' + return True + + +def ext_pillar(minion_id, pillar, *args, **kwargs): + ''' + Node definitions path will be retrieved from args - or set to default - + then added to 'salt_data' dict that is passed to the 'get_pillars' function. + 'salt_data' dict is a convenient way to pass all the required datas to the function + It contains: + - __opts__ + - __salt__ + - __grains__ + - __pillar__ + - minion_id + - path + + If successfull the function will return a pillar dict for minion_id + ''' + # If path has not been set, make a default + for i in args: + if 'path' not in i: + path = '/srv/saltclass' + args[i]['path'] = path + log.warning('path variable unset, using default: {0}'.format(path)) + else: + path = i['path'] + + # Create a dict that will contain our salt dicts to pass it to reclass + salt_data = { + '__opts__': __opts__, + '__salt__': __salt__, + '__grains__': __grains__, + '__pillar__': pillar, + 'minion_id': minion_id, + 'path': path + } + + return sc.get_pillars(minion_id, salt_data) diff --git a/salt/tops/saltclass.py b/salt/tops/saltclass.py new file mode 100644 index 00000000000..585641a0245 --- /dev/null +++ b/salt/tops/saltclass.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +''' +SaltClass master_tops Module + +.. code-block:: yaml + master_tops: + saltclass: + path: /srv/saltclass +''' + +# import python libs +from __future__ import absolute_import +import logging + +import salt.utils.saltclass as sc + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only run if properly configured + ''' + if __opts__['master_tops'].get('saltclass'): + return True + return False + + +def top(**kwargs): + ''' + Node definitions path will be retrieved from __opts__ - or set to default - + then added to 'salt_data' dict that is passed to the 'get_tops' function. + 'salt_data' dict is a convenient way to pass all the required datas to the function + It contains: + - __opts__ + - empty __salt__ + - __grains__ + - empty __pillar__ + - minion_id + - path + + If successfull the function will return a top dict for minion_id + ''' + # If path has not been set, make a default + _opts = __opts__['master_tops']['saltclass'] + if 'path' not in _opts: + path = '/srv/saltclass' + log.warning('path variable unset, using default: {0}'.format(path)) + else: + path = _opts['path'] + + # Create a dict that will contain our salt objects + # to send to get_tops function + if 'id' not in kwargs['opts']: + log.warning('Minion id not found - Returning empty dict') + return {} + else: + minion_id = kwargs['opts']['id'] + + salt_data = { + '__opts__': kwargs['opts'], + '__salt__': {}, + '__grains__': kwargs['grains'], + '__pillar__': {}, + 'minion_id': minion_id, + 'path': path + } + + return sc.get_tops(minion_id, salt_data) diff --git a/salt/utils/saltclass.py b/salt/utils/saltclass.py new file mode 100644 index 00000000000..3df204d5dc1 --- /dev/null +++ b/salt/utils/saltclass.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +import os +import re +import logging +from salt.ext.six import iteritems +import yaml +from jinja2 import FileSystemLoader, Environment + +log = logging.getLogger(__name__) + + +# Renders jinja from a template file +def render_jinja(_file, salt_data): + j_env = Environment(loader=FileSystemLoader(os.path.dirname(_file))) + j_env.globals.update({ + '__opts__': salt_data['__opts__'], + '__salt__': salt_data['__salt__'], + '__grains__': salt_data['__grains__'], + '__pillar__': salt_data['__pillar__'], + 'minion_id': salt_data['minion_id'], + }) + j_render = j_env.get_template(os.path.basename(_file)).render() + return j_render + + +# Renders yaml from rendered jinja +def render_yaml(_file, salt_data): + return yaml.safe_load(render_jinja(_file, salt_data)) + + +# Returns a dict from a class yaml definition +def get_class(_class, salt_data): + l_files = [] + saltclass_path = salt_data['path'] + + straight = '{0}/classes/{1}.yml'.format(saltclass_path, _class) + sub_straight = '{0}/classes/{1}.yml'.format(saltclass_path, + _class.replace('.', '/')) + sub_init = '{0}/classes/{1}/init.yml'.format(saltclass_path, + _class.replace('.', '/')) + + for root, dirs, files in os.walk('{0}/classes'.format(saltclass_path)): + for l_file in files: + l_files.append('{0}/{1}'.format(root, l_file)) + + if straight in l_files: + return render_yaml(straight, salt_data) + + if sub_straight in l_files: + return render_yaml(sub_straight, salt_data) + + if sub_init in l_files: + return render_yaml(sub_init, salt_data) + + log.warning('{0}: Class definition not found'.format(_class)) + return {} + + +# Return environment +def get_env_from_dict(exp_dict_list): + environment = '' + for s_class in exp_dict_list: + if 'environment' in s_class: + environment = s_class['environment'] + return environment + + +# Merge dict b into a +def dict_merge(a, b, path=None): + if path is None: + path = [] + + for key in b: + if key in a: + if isinstance(a[key], list) and isinstance(b[key], list): + if b[key][0] == '^': + b[key].pop(0) + a[key] = b[key] + else: + a[key].extend(b[key]) + elif isinstance(a[key], dict) and isinstance(b[key], dict): + dict_merge(a[key], b[key], path + [str(key)]) + elif a[key] == b[key]: + pass + else: + a[key] = b[key] + else: + a[key] = b[key] + return a + + +# Recursive search and replace in a dict +def dict_search_and_replace(d, old, new, expanded): + for (k, v) in iteritems(d): + if isinstance(v, dict): + dict_search_and_replace(d[k], old, new, expanded) + if v == old: + d[k] = new + return d + + +# Retrieve original value from ${xx:yy:zz} to be expanded +def find_value_to_expand(x, v): + a = x + for i in v[2:-1].split(':'): + if i in a: + a = a.get(i) + else: + a = v + return a + return a + + +# Return a dict that contains expanded variables if found +def expand_variables(a, b, expanded, path=None): + if path is None: + b = a.copy() + path = [] + + for (k, v) in iteritems(a): + if isinstance(v, dict): + expand_variables(v, b, expanded, path + [str(k)]) + else: + if isinstance(v, str): + vre = re.search(r'(^|.)\$\{.*?\}', v) + if vre: + re_v = vre.group(0) + if re_v.startswith('\\'): + v_new = v.replace(re_v, re_v.lstrip('\\')) + b = dict_search_and_replace(b, v, v_new, expanded) + expanded.append(k) + elif not re_v.startswith('$'): + v_expanded = find_value_to_expand(b, re_v[1:]) + v_new = v.replace(re_v[1:], v_expanded) + b = dict_search_and_replace(b, v, v_new, expanded) + expanded.append(k) + else: + v_expanded = find_value_to_expand(b, re_v) + b = dict_search_and_replace(b, v, v_expanded, expanded) + expanded.append(k) + return b + + +def expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand): + # Get classes to expand from minion dictionnary + if not classes_to_expand and 'classes' in minion_dict: + classes_to_expand = minion_dict['classes'] + + # Now loop on list to recursively expand them + for klass in classes_to_expand: + if klass not in seen_classes: + seen_classes.append(klass) + expanded_classes[klass] = get_class(klass, salt_data) + # Fix corner case where class is loaded but doesn't contain anything + if expanded_classes[klass] is None: + expanded_classes[klass] = {} + # Now replace class element in classes_to_expand by expansion + if 'classes' in expanded_classes[klass]: + l_id = classes_to_expand.index(klass) + classes_to_expand[l_id:l_id] = expanded_classes[klass]['classes'] + expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand) + else: + expand_classes_in_order(minion_dict, + salt_data, + seen_classes, + expanded_classes, + classes_to_expand) + + # We may have duplicates here and we want to remove them + tmp = [] + for t_element in classes_to_expand: + if t_element not in tmp: + tmp.append(t_element) + + classes_to_expand = tmp + + # Now that we've retrieved every class in order, + # let's return an ordered list of dicts + ord_expanded_classes = [] + ord_expanded_states = [] + for ord_klass in classes_to_expand: + ord_expanded_classes.append(expanded_classes[ord_klass]) + # And be smart and sort out states list + # Address the corner case where states is empty in a class definition + if 'states' in expanded_classes[ord_klass] and expanded_classes[ord_klass]['states'] is None: + expanded_classes[ord_klass]['states'] = {} + + if 'states' in expanded_classes[ord_klass]: + ord_expanded_states.extend(expanded_classes[ord_klass]['states']) + + # Add our minion dict as final element but check if we have states to process + if 'states' in minion_dict and minion_dict['states'] is None: + minion_dict['states'] = [] + + if 'states' in minion_dict: + ord_expanded_states.extend(minion_dict['states']) + + ord_expanded_classes.append(minion_dict) + + return ord_expanded_classes, classes_to_expand, ord_expanded_states + + +def expanded_dict_from_minion(minion_id, salt_data): + _file = '' + saltclass_path = salt_data['path'] + # Start + for root, dirs, files in os.walk('{0}/nodes'.format(saltclass_path)): + for minion_file in files: + if minion_file == '{0}.yml'.format(minion_id): + _file = os.path.join(root, minion_file) + + # Load the minion_id definition if existing, else an exmpty dict + node_dict = {} + if _file: + node_dict[minion_id] = render_yaml(_file, salt_data) + else: + log.warning('{0}: Node definition not found'.format(minion_id)) + node_dict[minion_id] = {} + + # Get 2 ordered lists: + # expanded_classes: A list of all the dicts + # classes_list: List of all the classes + expanded_classes, classes_list, states_list = expand_classes_in_order( + node_dict[minion_id], + salt_data, [], {}, []) + + # Here merge the pillars together + pillars_dict = {} + for exp_dict in expanded_classes: + if 'pillars' in exp_dict: + dict_merge(pillars_dict, exp_dict) + + return expanded_classes, pillars_dict, classes_list, states_list + + +def get_pillars(minion_id, salt_data): + # Get 2 dicts and 2 lists + # expanded_classes: Full list of expanded dicts + # pillars_dict: dict containing merged pillars in order + # classes_list: All classes processed in order + # states_list: All states listed in order + (expanded_classes, + pillars_dict, + classes_list, + states_list) = expanded_dict_from_minion(minion_id, salt_data) + + # Retrieve environment + environment = get_env_from_dict(expanded_classes) + + # Expand ${} variables in merged dict + # pillars key shouldn't exist if we haven't found any minion_id ref + if 'pillars' in pillars_dict: + pillars_dict_expanded = expand_variables(pillars_dict['pillars'], {}, []) + else: + pillars_dict_expanded = expand_variables({}, {}, []) + + # Build the final pillars dict + pillars_dict = {} + pillars_dict['__saltclass__'] = {} + pillars_dict['__saltclass__']['states'] = states_list + pillars_dict['__saltclass__']['classes'] = classes_list + pillars_dict['__saltclass__']['environment'] = environment + pillars_dict['__saltclass__']['nodename'] = minion_id + pillars_dict.update(pillars_dict_expanded) + + return pillars_dict + + +def get_tops(minion_id, salt_data): + # Get 2 dicts and 2 lists + # expanded_classes: Full list of expanded dicts + # pillars_dict: dict containing merged pillars in order + # classes_list: All classes processed in order + # states_list: All states listed in order + (expanded_classes, + pillars_dict, + classes_list, + states_list) = expanded_dict_from_minion(minion_id, salt_data) + + # Retrieve environment + environment = get_env_from_dict(expanded_classes) + + # Build final top dict + tops_dict = {} + tops_dict[environment] = states_list + + return tops_dict diff --git a/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml b/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml new file mode 100644 index 00000000000..10f2865df73 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/app/borgbackup.yml @@ -0,0 +1,6 @@ +classes: + - app.ssh.server + +pillars: + sshd: + root_access: yes diff --git a/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml b/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml new file mode 100644 index 00000000000..9ebd94322f2 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/app/ssh/server.yml @@ -0,0 +1,4 @@ +pillars: + sshd: + root_access: no + ssh_port: 22 diff --git a/tests/integration/files/saltclass/examples/classes/default/init.yml b/tests/integration/files/saltclass/examples/classes/default/init.yml new file mode 100644 index 00000000000..20a5e450883 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/init.yml @@ -0,0 +1,17 @@ +classes: + - default.users + - default.motd + +states: + - openssh + +pillars: + default: + network: + dns: + srv1: 192.168.0.1 + srv2: 192.168.0.2 + domain: example.com + ntp: + srv1: 192.168.10.10 + srv2: 192.168.10.20 diff --git a/tests/integration/files/saltclass/examples/classes/default/motd.yml b/tests/integration/files/saltclass/examples/classes/default/motd.yml new file mode 100644 index 00000000000..18938d7b1af --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/motd.yml @@ -0,0 +1,3 @@ +pillars: + motd: + text: "Welcome to {{ __grains__['id'] }} system located in ${default:network:sub}" diff --git a/tests/integration/files/saltclass/examples/classes/default/users.yml b/tests/integration/files/saltclass/examples/classes/default/users.yml new file mode 100644 index 00000000000..8bfba671091 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/default/users.yml @@ -0,0 +1,16 @@ +states: + - user_mgt + +pillars: + default: + users: + adm1: + uid: 1201 + gid: 1201 + gecos: 'Super user admin1' + homedir: /home/adm1 + adm2: + uid: 1202 + gid: 1202 + gecos: 'Super user admin2' + homedir: /home/adm2 diff --git a/tests/integration/files/saltclass/examples/classes/roles/app.yml b/tests/integration/files/saltclass/examples/classes/roles/app.yml new file mode 100644 index 00000000000..af244e402ce --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/app.yml @@ -0,0 +1,21 @@ +states: + - app + +pillars: + app: + config: + dns: + srv1: ${default:network:dns:srv1} + srv2: ${default:network:dns:srv2} + uri: https://application.domain/call?\${test} + prod_parameters: + - p1 + - p2 + - p3 + pkg: + - app-core + - app-backend +# Safe minion_id matching +{% if minion_id == 'zrh.node3' %} + safe_pillar: '_only_ zrh.node3 will see this pillar and this cannot be overriden like grains' +{% endif %} diff --git a/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml b/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml new file mode 100644 index 00000000000..996ded51fa5 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/nginx/init.yml @@ -0,0 +1,7 @@ +states: + - nginx_deployment + +pillars: + nginx: + pkg: + - nginx diff --git a/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml b/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml new file mode 100644 index 00000000000..bc290997a6e --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/roles/nginx/server.yml @@ -0,0 +1,7 @@ +classes: + - roles.nginx + +pillars: + nginx: + pkg: + - nginx-module diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml new file mode 100644 index 00000000000..7e7c39c60cd --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/gnv.yml @@ -0,0 +1,20 @@ +pillars: + default: + network: + sub: Geneva + dns: + srv1: 10.20.0.1 + srv2: 10.20.0.2 + srv3: 192.168.1.1 + domain: gnv.example.com + users: + adm1: + uid: 1210 + gid: 1210 + gecos: 'Super user admin1' + homedir: /srv/app/adm1 + adm3: + uid: 1203 + gid: 1203 + gecos: 'Super user admin3' + homedir: /home/adm3 diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml new file mode 100644 index 00000000000..22895482768 --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/qls.yml @@ -0,0 +1,17 @@ +classes: + - app.ssh.server + - roles.nginx.server + +pillars: + default: + network: + sub: Lausanne + dns: + srv1: 10.10.0.1 + domain: qls.example.com + users: + nginx_adm: + uid: 250 + gid: 200 + gecos: 'Nginx admin user' + homedir: /srv/www diff --git a/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml b/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml new file mode 100644 index 00000000000..ac30dc73b9a --- /dev/null +++ b/tests/integration/files/saltclass/examples/classes/subsidiaries/zrh.yml @@ -0,0 +1,24 @@ +classes: + - roles.app + # This should validate that we process a class only once + - app.borgbackup + # As this one should not be processed + # and would override in turn overrides from app.borgbackup + - app.ssh.server + +pillars: + default: + network: + sub: Zurich + dns: + srv1: 10.30.0.1 + srv2: 10.30.0.2 + domain: zrh.example.com + ntp: + srv1: 10.0.0.127 + users: + adm1: + uid: 250 + gid: 250 + gecos: 'Super user admin1' + homedir: /srv/app/1 diff --git a/tests/integration/files/saltclass/examples/nodes/fake_id.yml b/tests/integration/files/saltclass/examples/nodes/fake_id.yml new file mode 100644 index 00000000000..a87137e6fbe --- /dev/null +++ b/tests/integration/files/saltclass/examples/nodes/fake_id.yml @@ -0,0 +1,6 @@ +environment: base + +classes: +{% for class in ['default'] %} + - {{ class }} +{% endfor %} diff --git a/tests/unit/pillar/test_saltclass.py b/tests/unit/pillar/test_saltclass.py new file mode 100644 index 00000000000..30b63f8c548 --- /dev/null +++ b/tests/unit/pillar/test_saltclass.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import +import os + +# Import Salt Testing libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON + +# Import Salt Libs +import salt.pillar.saltclass as saltclass + + +base_path = os.path.dirname(os.path.realpath(__file__)) +fake_minion_id = 'fake_id' +fake_pillar = {} +fake_args = ({'path': '{0}/../../integration/files/saltclass/examples'.format(base_path)}) +fake_opts = {} +fake_salt = {} +fake_grains = {} + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltclassPillarTestCase(TestCase, LoaderModuleMockMixin): + ''' + Tests for salt.pillar.saltclass + ''' + def setup_loader_modules(self): + return {saltclass: {'__opts__': fake_opts, + '__salt__': fake_salt, + '__grains__': fake_grains + }} + + def _runner(self, expected_ret): + full_ret = saltclass.ext_pillar(fake_minion_id, fake_pillar, fake_args) + parsed_ret = full_ret['__saltclass__']['classes'] + self.assertListEqual(parsed_ret, expected_ret) + + def test_succeeds(self): + ret = ['default.users', 'default.motd', 'default'] + self._runner(ret) From fb31e9a530e3efbfc9177f3daddd2ca1ad206d6f Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 12 Sep 2017 10:05:36 -0600 Subject: [PATCH 041/348] Add /norestart switch to vcredist install --- pkg/windows/installer/Salt-Minion-Setup.nsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index ab890529d51..83010476a04 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -204,7 +204,7 @@ Section -Prerequisites ; The Correct version of VCRedist is copied over by "build_pkg.bat" SetOutPath "$INSTDIR\" File "..\prereqs\vcredist.exe" - ExecWait "$INSTDIR\vcredist.exe /qb!" + ExecWait "$INSTDIR\vcredist.exe /qb! /norestart" IfErrors 0 endVcRedist MessageBox MB_OK \ "VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \ From d80aea16cb8a6bc3075ac051e22623d8feeea7bb Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 12 Sep 2017 11:56:17 -0600 Subject: [PATCH 042/348] Handle ErrorCodes returned by VCRedist installer --- pkg/windows/installer/Salt-Minion-Setup.nsi | 28 ++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index 83010476a04..9108fb3e5f2 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -200,17 +200,38 @@ Section -Prerequisites "VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \ /SD IDYES IDNO endVcRedist - ClearErrors ; The Correct version of VCRedist is copied over by "build_pkg.bat" SetOutPath "$INSTDIR\" File "..\prereqs\vcredist.exe" - ExecWait "$INSTDIR\vcredist.exe /qb! /norestart" - IfErrors 0 endVcRedist + # If an output variable is specified ($0 in the case below), + # ExecWait sets the variable with the exit code (and only sets the + # error flag if an error occurs; if an error occurs, the contents + # of the user variable are undefined). + # http://nsis.sourceforge.net/Reference/ExecWait + ClearErrors + ExecWait '"$INSTDIR\vcredist.exe" /qb! /norestart' $0 + IfErrors 0 CheckVcRedistErrorCode: MessageBox MB_OK \ "VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \ /SD IDOK + Goto endVcRedist + + checkVcRedistErrorCode: + # Check for Reboot Error Code (3010) + ${If} $0 == 3010 + MessageBox MB_OK \ + "VC Redist 2008 SP1 MFC installed but requires a restart to complete." \ + /SD IDOK + + # Check for any other errors + ${ElseIfNot} $0 == 0 + MessageBox MB_OK \ + "VC Redist 2008 SP1 MFC failed with ErrorCode: $0. Try installing the package manually." \ + /SD IDOK + ${EndIf} endVcRedist: + ${EndIf} ${EndIf} @@ -715,6 +736,7 @@ Function getMinionConfig confFound: FileOpen $0 "$INSTDIR\conf\minion" r + ClearErrors confLoop: FileRead $0 $1 IfErrors EndOfFile From 2d269d1a763dc3b6de2bd438c036aa75a642cc0e Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 12 Sep 2017 12:59:57 -0600 Subject: [PATCH 043/348] Change all comment markers to '#' --- pkg/windows/installer/Salt-Minion-Setup.nsi | 380 ++++++++++---------- 1 file changed, 190 insertions(+), 190 deletions(-) diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index 9108fb3e5f2..094e3e70300 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -38,7 +38,7 @@ ${StrStrAdv} !define CPUARCH "x86" !endif -; Part of the Trim function for Strings +# Part of the Trim function for Strings !define Trim "!insertmacro Trim" !macro Trim ResultVar String Push "${String}" @@ -55,27 +55,27 @@ ${StrStrAdv} !define MUI_UNICON "salt.ico" !define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp" -; Welcome page +# Welcome page !insertmacro MUI_PAGE_WELCOME -; License page +# License page !insertmacro MUI_PAGE_LICENSE "LICENSE.txt" -; Configure Minion page +# Configure Minion page Page custom pageMinionConfig pageMinionConfig_Leave -; Instfiles page +# Instfiles page !insertmacro MUI_PAGE_INSTFILES -; Finish page (Customized) +# Finish page (Customized) !define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show !define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave !insertmacro MUI_PAGE_FINISH -; Uninstaller pages +# Uninstaller pages !insertmacro MUI_UNPAGE_INSTFILES -; Language files +# Language files !insertmacro MUI_LANGUAGE "English" @@ -175,11 +175,11 @@ ShowInstDetails show ShowUnInstDetails show -; Check and install Visual C++ 2008 SP1 MFC Security Update redist packages -; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info +# Check and install Visual C++ 2008 SP1 MFC Security Update redist packages +# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info Section -Prerequisites - ; VCRedist only needed on Windows Server 2008R2/Windows 7 and below + # VCRedist only needed on Windows Server 2008R2/Windows 7 and below ${If} ${AtMostWin2008R2} !define VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}" @@ -200,7 +200,7 @@ Section -Prerequisites "VC Redist 2008 SP1 MFC is currently not installed. Would you like to install?" \ /SD IDYES IDNO endVcRedist - ; The Correct version of VCRedist is copied over by "build_pkg.bat" + # The Correct version of VCRedist is copied over by "build_pkg.bat" SetOutPath "$INSTDIR\" File "..\prereqs\vcredist.exe" # If an output variable is specified ($0 in the case below), @@ -210,7 +210,7 @@ Section -Prerequisites # http://nsis.sourceforge.net/Reference/ExecWait ClearErrors ExecWait '"$INSTDIR\vcredist.exe" /qb! /norestart' $0 - IfErrors 0 CheckVcRedistErrorCode: + IfErrors 0 CheckVcRedistErrorCode MessageBox MB_OK \ "VC Redist 2008 SP1 MFC failed to install. Try installing the package manually." \ /SD IDOK @@ -257,12 +257,12 @@ Function .onInit Call parseCommandLineSwitches - ; Check for existing installation + # Check for existing installation ReadRegStr $R0 HKLM \ "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \ "UninstallString" StrCmp $R0 "" checkOther - ; Found existing installation, prompt to uninstall + # Found existing installation, prompt to uninstall MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \ "${PRODUCT_NAME} is already installed.$\n$\n\ Click `OK` to remove the existing installation." \ @@ -270,12 +270,12 @@ Function .onInit Abort checkOther: - ; Check for existing installation of full salt + # Check for existing installation of full salt ReadRegStr $R0 HKLM \ "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \ "UninstallString" StrCmp $R0 "" skipUninstall - ; Found existing installation, prompt to uninstall + # Found existing installation, prompt to uninstall MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \ "${PRODUCT_NAME_OTHER} is already installed.$\n$\n\ Click `OK` to remove the existing installation." \ @@ -283,27 +283,27 @@ Function .onInit Abort uninst: - ; Make sure we're in the right directory + # Make sure we're in the right directory ${If} $INSTDIR == "c:\salt\bin\Scripts" StrCpy $INSTDIR "C:\salt" ${EndIf} - ; Stop and remove the salt-minion service + # Stop and remove the salt-minion service nsExec::Exec 'net stop salt-minion' nsExec::Exec 'sc delete salt-minion' - ; Stop and remove the salt-master service + # Stop and remove the salt-master service nsExec::Exec 'net stop salt-master' nsExec::Exec 'sc delete salt-master' - ; Remove salt binaries and batch files + # Remove salt binaries and batch files Delete "$INSTDIR\uninst.exe" Delete "$INSTDIR\nssm.exe" Delete "$INSTDIR\salt*" Delete "$INSTDIR\vcredist.exe" RMDir /r "$INSTDIR\bin" - ; Remove registry entries + # Remove registry entries DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}" @@ -313,7 +313,7 @@ Function .onInit DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}" - ; Remove C:\salt from the Path + # Remove C:\salt from the Path Push "C:\salt" Call RemoveFromPath @@ -326,7 +326,7 @@ Section -Post WriteUninstaller "$INSTDIR\uninst.exe" - ; Uninstall Registry Entries + # Uninstall Registry Entries WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ "DisplayName" "$(^Name)" WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ @@ -342,24 +342,24 @@ Section -Post WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \ "DependOnService" "nsi" - ; Set the estimated size + # Set the estimated size ${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2 IntFmt $0 "0x%08X" $0 WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ "EstimatedSize" "$0" - ; Commandline Registry Entries + # Commandline Registry Entries WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat" WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\" WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat" WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\" - ; Register the Salt-Minion Service + # Register the Salt-Minion Service nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com" nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1" - RMDir /R "$INSTDIR\var\cache\salt" ; removing cache from old version + RMDir /R "$INSTDIR\var\cache\salt" # removing cache from old version Call updateMinionConfig @@ -373,7 +373,7 @@ SectionEnd Function .onInstSuccess - ; If start-minion is 1, then start the service + # If start-minion is 1, then start the service ${If} $StartMinion == 1 nsExec::Exec 'net start salt-minion' ${EndIf} @@ -391,35 +391,35 @@ FunctionEnd Section Uninstall - ; Stop and Remove salt-minion service + # Stop and Remove salt-minion service nsExec::Exec 'net stop salt-minion' nsExec::Exec 'sc delete salt-minion' - ; Remove files + # Remove files Delete "$INSTDIR\uninst.exe" Delete "$INSTDIR\nssm.exe" Delete "$INSTDIR\salt*" Delete "$INSTDIR\vcredist.exe" - ; Remove salt directory, you must check to make sure you're not removing - ; the Program Files directory + # Remove salt directory, you must check to make sure you're not removing + # the Program Files directory ${If} $INSTDIR != 'Program Files' ${AndIf} $INSTDIR != 'Program Files (x86)' RMDir /r "$INSTDIR" ${EndIf} - ; Remove Uninstall Entries + # Remove Uninstall Entries DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" - ; Remove Commandline Entries + # Remove Commandline Entries DeleteRegKey HKLM "${PRODUCT_CALL_REGKEY}" DeleteRegKey HKLM "${PRODUCT_MINION_REGKEY}" - ; Remove C:\salt from the Path + # Remove C:\salt from the Path Push "C:\salt" Call un.RemoveFromPath - ; Automatically close when finished + # Automatically close when finished SetAutoClose true SectionEnd @@ -450,7 +450,7 @@ FunctionEnd Function Trim - Exch $R1 ; Original string + Exch $R1 # Original string Push $R2 Loop: @@ -482,36 +482,36 @@ Function Trim FunctionEnd -;------------------------------------------------------------------------------ -; StrStr Function -; - find substring in a string -; -; Usage: -; Push "this is some string" -; Push "some" -; Call StrStr -; Pop $0 ; "some string" -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# StrStr Function +# - find substring in a string +# +# Usage: +# Push "this is some string" +# Push "some" +# Call StrStr +# Pop $0 ; "some string" +#------------------------------------------------------------------------------ !macro StrStr un Function ${un}StrStr - Exch $R1 ; $R1=substring, stack=[old$R1,string,...] - Exch ; stack=[string,old$R1,...] - Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...] - Push $R3 ; $R3=strlen(substring) - Push $R4 ; $R4=count - Push $R5 ; $R5=tmp - StrLen $R3 $R1 ; Get the length of the Search String - StrCpy $R4 0 ; Set the counter to 0 + Exch $R1 # $R1=substring, stack=[old$R1,string,...] + Exch # stack=[string,old$R1,...] + Exch $R2 # $R2=string, stack=[old$R2,old$R1,...] + Push $R3 # $R3=strlen(substring) + Push $R4 # $R4=count + Push $R5 # $R5=tmp + StrLen $R3 $R1 # Get the length of the Search String + StrCpy $R4 0 # Set the counter to 0 loop: - StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is - ; the size of the length of the search string - StrCmp $R5 $R1 done ; Is the contents of the window the same as - ; search string, then done - StrCmp $R5 "" done ; Is the window empty, then done - IntOp $R4 $R4 + 1 ; Shift the windows one character - Goto loop ; Repeat + StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is + # the size of the length of the search string + StrCmp $R5 $R1 done # Is the contents of the window the same as + # search string, then done + StrCmp $R5 "" done # Is the window empty, then done + IntOp $R4 $R4 + 1 # Shift the windows one character + Goto loop # Repeat done: StrCpy $R1 $R2 "" $R4 @@ -519,7 +519,7 @@ Function ${un}StrStr Pop $R4 Pop $R3 Pop $R2 - Exch $R1 ; $R1=old$R1, stack=[result,...] + Exch $R1 # $R1=old$R1, stack=[result,...] FunctionEnd !macroend @@ -527,74 +527,74 @@ FunctionEnd !insertmacro StrStr "un." -;------------------------------------------------------------------------------ -; AddToPath Function -; - Adds item to Path for All Users -; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native -; Windows Commands -; -; Usage: -; Push "C:\path\to\add" -; Call AddToPath -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# AddToPath Function +# - Adds item to Path for All Users +# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native +# Windows Commands +# +# Usage: +# Push "C:\path\to\add" +# Call AddToPath +#------------------------------------------------------------------------------ !define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' Function AddToPath - Exch $0 ; Path to add - Push $1 ; Current Path - Push $2 ; Results of StrStr / Length of Path + Path to Add - Push $3 ; Handle to Reg / Length of Path - Push $4 ; Result of Registry Call + Exch $0 # Path to add + Push $1 # Current Path + Push $2 # Results of StrStr / Length of Path + Path to Add + Push $3 # Handle to Reg / Length of Path + Push $4 # Result of Registry Call - ; Open a handle to the key in the registry, handle in $3, Error in $4 + # Open a handle to the key in the registry, handle in $3, Error in $4 System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4" - ; Make sure registry handle opened successfully (returned 0) + # Make sure registry handle opened successfully (returned 0) IntCmp $4 0 0 done done - ; Load the contents of path into $1, Error Code into $4, Path length into $2 + # Load the contents of path into $1, Error Code into $4, Path length into $2 System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4" - ; Close the handle to the registry ($3) + # Close the handle to the registry ($3) System::Call "advapi32::RegCloseKey(i $3)" - ; Check for Error Code 234, Path too long for the variable - IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA + # Check for Error Code 234, Path too long for the variable + IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}" MessageBox MB_OK \ "You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \ /SD IDOK Goto done - ; If no error, continue - IntCmp $4 0 +5 ; $4 != NO_ERROR - ; Error 2 means the Key was not found - IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND + # If no error, continue + IntCmp $4 0 +5 # $4 != NO_ERROR + # Error 2 means the Key was not found + IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND DetailPrint "AddToPath: unexpected error code $4" Goto done StrCpy $1 "" - ; Check if already in PATH - Push "$1;" ; The string to search - Push "$0;" ; The string to find + # Check if already in PATH + Push "$1;" # The string to search + Push "$0;" # The string to find Call StrStr - Pop $2 ; The result of the search - StrCmp $2 "" 0 done ; String not found, try again with ';' at the end - ; Otherwise, it's already in the path - Push "$1;" ; The string to search - Push "$0\;" ; The string to find + Pop $2 # The result of the search + StrCmp $2 "" 0 done # String not found, try again with ';' at the end + # Otherwise, it's already in the path + Push "$1;" # The string to search + Push "$0\;" # The string to find Call StrStr - Pop $2 ; The result - StrCmp $2 "" 0 done ; String not found, continue (add) - ; Otherwise, it's already in the path + Pop $2 # The result + StrCmp $2 "" 0 done # String not found, continue (add) + # Otherwise, it's already in the path - ; Prevent NSIS string overflow - StrLen $2 $0 ; Length of path to add ($2) - StrLen $3 $1 ; Length of current path ($3) - IntOp $2 $2 + $3 ; Length of current path + path to add ($2) - IntOp $2 $2 + 2 ; Account for the additional ';' - ; $2 = strlen(dir) + strlen(PATH) + sizeof(";") + # Prevent NSIS string overflow + StrLen $2 $0 # Length of path to add ($2) + StrLen $3 $1 # Length of current path ($3) + IntOp $2 $2 + $3 # Length of current path + path to add ($2) + IntOp $2 $2 + 2 # Account for the additional ';' + # $2 = strlen(dir) + strlen(PATH) + sizeof(";") - ; Make sure the new length isn't over the NSIS_MAX_STRLEN + # Make sure the new length isn't over the NSIS_MAX_STRLEN IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0 DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}" MessageBox MB_OK \ @@ -602,18 +602,18 @@ Function AddToPath /SD IDOK Goto done - ; Append dir to PATH + # Append dir to PATH DetailPrint "Add to PATH: $0" - StrCpy $2 $1 1 -1 ; Copy the last character of the existing path - StrCmp $2 ";" 0 +2 ; Check for trailing ';' - StrCpy $1 $1 -1 ; remove trailing ';' - StrCmp $1 "" +2 ; Make sure Path is not empty - StrCpy $0 "$1;$0" ; Append new path at the end ($0) + StrCpy $2 $1 1 -1 # Copy the last character of the existing path + StrCmp $2 ";" 0 +2 # Check for trailing ';' + StrCpy $1 $1 -1 # remove trailing ';' + StrCmp $1 "" +2 # Make sure Path is not empty + StrCpy $0 "$1;$0" # Append new path at the end ($0) - ; We can use the NSIS command here. Only 'ReadRegStr' is affected + # We can use the NSIS command here. Only 'ReadRegStr' is affected WriteRegExpandStr ${Environ} "PATH" $0 - ; Broadcast registry change to open programs + # Broadcast registry change to open programs SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 done: @@ -626,16 +626,16 @@ Function AddToPath FunctionEnd -;------------------------------------------------------------------------------ -; RemoveFromPath Function -; - Removes item from Path for All Users -; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native -; Windows Commands -; -; Usage: -; Push "C:\path\to\add" -; Call RemoveFromPath -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# RemoveFromPath Function +# - Removes item from Path for All Users +# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native +# Windows Commands +# +# Usage: +# Push "C:\path\to\add" +# Call RemoveFromPath +#------------------------------------------------------------------------------ !macro RemoveFromPath un Function ${un}RemoveFromPath @@ -647,59 +647,59 @@ Function ${un}RemoveFromPath Push $5 Push $6 - ; Open a handle to the key in the registry, handle in $3, Error in $4 + # Open a handle to the key in the registry, handle in $3, Error in $4 System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4" - ; Make sure registry handle opened successfully (returned 0) + # Make sure registry handle opened successfully (returned 0) IntCmp $4 0 0 done done - ; Load the contents of path into $1, Error Code into $4, Path length into $2 + # Load the contents of path into $1, Error Code into $4, Path length into $2 System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4" - ; Close the handle to the registry ($3) + # Close the handle to the registry ($3) System::Call "advapi32::RegCloseKey(i $3)" - ; Check for Error Code 234, Path too long for the variable - IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA + # Check for Error Code 234, Path too long for the variable + IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}" Goto done - ; If no error, continue - IntCmp $4 0 +5 ; $4 != NO_ERROR - ; Error 2 means the Key was not found - IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND + # If no error, continue + IntCmp $4 0 +5 # $4 != NO_ERROR + # Error 2 means the Key was not found + IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND DetailPrint "AddToPath: unexpected error code $4" Goto done StrCpy $1 "" - ; Ensure there's a trailing ';' - StrCpy $5 $1 1 -1 ; Copy the last character of the path - StrCmp $5 ";" +2 ; Check for trailing ';', if found continue - StrCpy $1 "$1;" ; ensure trailing ';' + # Ensure there's a trailing ';' + StrCpy $5 $1 1 -1 # Copy the last character of the path + StrCmp $5 ";" +2 # Check for trailing ';', if found continue + StrCpy $1 "$1;" # ensure trailing ';' - ; Check for our directory inside the path - Push $1 ; String to Search - Push "$0;" ; Dir to Find + # Check for our directory inside the path + Push $1 # String to Search + Push "$0;" # Dir to Find Call ${un}StrStr - Pop $2 ; The results of the search - StrCmp $2 "" done ; If results are empty, we're done, otherwise continue + Pop $2 # The results of the search + StrCmp $2 "" done # If results are empty, we're done, otherwise continue - ; Remove our Directory from the Path + # Remove our Directory from the Path DetailPrint "Remove from PATH: $0" - StrLen $3 "$0;" ; Get the length of our dir ($3) - StrLen $4 $2 ; Get the length of the return from StrStr ($4) - StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove - StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove - StrCpy $3 "$5$6" ; Combine $5 and $6 + StrLen $3 "$0;" # Get the length of our dir ($3) + StrLen $4 $2 # Get the length of the return from StrStr ($4) + StrCpy $5 $1 -$4 # $5 is now the part before the path to remove + StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove + StrCpy $3 "$5$6" # Combine $5 and $6 - ; Check for Trailing ';' - StrCpy $5 $3 1 -1 ; Load the last character of the string - StrCmp $5 ";" 0 +2 ; Check for ';' - StrCpy $3 $3 -1 ; remove trailing ';' + # Check for Trailing ';' + StrCpy $5 $3 1 -1 # Load the last character of the string + StrCmp $5 ";" 0 +2 # Check for ';' + StrCpy $3 $3 -1 # remove trailing ';' - ; Write the new path to the registry + # Write the new path to the registry WriteRegExpandStr ${Environ} "PATH" $3 - ; Broadcast the change to all open applications + # Broadcast the change to all open applications SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 done: @@ -767,64 +767,64 @@ FunctionEnd Function updateMinionConfig ClearErrors - FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading - GetTempFileName $R0 ; get new temp file name - FileOpen $1 $R0 "w" ; open temp file for writing + FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading + GetTempFileName $R0 # get new temp file name + FileOpen $1 $R0 "w" # open temp file for writing - loop: ; loop through each line - FileRead $0 $2 ; read line from target file - IfErrors done ; end if errors are encountered (end of line) + loop: # loop through each line + FileRead $0 $2 # read line from target file + IfErrors done # end if errors are encountered (end of line) - ${If} $MasterHost_State != "" ; if master is empty - ${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt' - ${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line - ${If} $3 == 0 ; is it in the first... - ${OrIf} $3 == 1 ; or second position (account for comments) - StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master - ${EndIf} ; close if statement - ${EndIf} ; close if statement + ${If} $MasterHost_State != "" # if master is empty + ${AndIf} $MasterHost_State != "salt" # and if master is not 'salt' + ${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line + ${If} $3 == 0 # is it in the first... + ${OrIf} $3 == 1 # or second position (account for comments) + StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master + ${EndIf} # close if statement + ${EndIf} # close if statement - ${If} $MinionName_State != "" ; if minion is empty - ${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname' - ${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line - ${If} $3 == 0 ; is it in the first... - ${OrIf} $3 == 1 ; or the second position (account for comments) - StrCpy $2 "id: $MinionName_State$\r$\n" ; change line - ${EndIf} ; close if statement - ${EndIf} ; close if statement + ${If} $MinionName_State != "" # if minion is empty + ${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname' + ${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line + ${If} $3 == 0 # is it in the first... + ${OrIf} $3 == 1 # or the second position (account for comments) + StrCpy $2 "id: $MinionName_State$\r$\n" # change line + ${EndIf} # close if statement + ${EndIf} # close if statement - FileWrite $1 $2 ; write changed or unchanged line to temp file + FileWrite $1 $2 # write changed or unchanged line to temp file Goto loop done: - FileClose $0 ; close target file - FileClose $1 ; close temp file - Delete "$INSTDIR\conf\minion" ; delete target file - CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file - Delete $R0 ; delete temp file + FileClose $0 # close target file + FileClose $1 # close temp file + Delete "$INSTDIR\conf\minion" # delete target file + CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file + Delete $R0 # delete temp file FunctionEnd Function parseCommandLineSwitches - ; Load the parameters + # Load the parameters ${GetParameters} $R0 - ; Check for start-minion switches - ; /start-service is to be deprecated, so we must check for both + # Check for start-minion switches + # /start-service is to be deprecated, so we must check for both ${GetOptions} $R0 "/start-service=" $R1 ${GetOptions} $R0 "/start-minion=" $R2 # Service: Start Salt Minion ${IfNot} $R2 == "" - ; If start-minion was passed something, then set it + # If start-minion was passed something, then set it StrCpy $StartMinion $R2 ${ElseIfNot} $R1 == "" - ; If start-service was passed something, then set it + # If start-service was passed something, then set it StrCpy $StartMinion $R1 ${Else} - ; Otherwise default to 1 + # Otherwise default to 1 StrCpy $StartMinion 1 ${EndIf} From 914c9f4a16690e394ed9b299eac34ea542b178c8 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Thu, 10 Aug 2017 11:19:40 +0300 Subject: [PATCH 044/348] Yield timed out minions from LocalClient.cmd_iter Fixes #42711 --- salt/client/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index b047e599369..f11ccfd5fa1 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -883,6 +883,7 @@ class LocalClient(object): else: if kwargs.get(u'yield_pub_data'): yield pub_data + kwargs.setdefault('expect_minions', True) for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=self._get_timeout(timeout), From 21c11d07aa581f87bceb6c8fb1676ee2ac12267a Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Wed, 6 Sep 2017 15:16:36 +0300 Subject: [PATCH 045/348] Add yield_all_minions flag to cmd_iter and cmd_iter_no_block --- salt/client/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index f11ccfd5fa1..5d83db76090 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -837,6 +837,7 @@ class LocalClient(object): tgt_type=u'glob', ret=u'', kwarg=None, + yield_all_minions=True, **kwargs): ''' Yields the individual minion returns as they come in @@ -883,7 +884,8 @@ class LocalClient(object): else: if kwargs.get(u'yield_pub_data'): yield pub_data - kwargs.setdefault('expect_minions', True) + if yield_all_minions: + kwargs['expect_minions'] = True for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=self._get_timeout(timeout), @@ -909,6 +911,7 @@ class LocalClient(object): kwarg=None, show_jid=False, verbose=False, + yield_all_minions=True, **kwargs): ''' Yields the individual minion returns as they come in, or None @@ -958,6 +961,8 @@ class LocalClient(object): if not pub_data: yield pub_data else: + if yield_all_minions: + kwargs['expect_minions'] = True for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=timeout, From 85e13b0004217e6ff2352da84e3b8122f3bb5406 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Wed, 13 Sep 2017 16:05:22 +0300 Subject: [PATCH 046/348] Revert "Add yield_all_minions flag to cmd_iter and cmd_iter_no_block" This reverts commit 21c11d07aa581f87bceb6c8fb1676ee2ac12267a. --- salt/client/__init__.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 5d83db76090..f11ccfd5fa1 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -837,7 +837,6 @@ class LocalClient(object): tgt_type=u'glob', ret=u'', kwarg=None, - yield_all_minions=True, **kwargs): ''' Yields the individual minion returns as they come in @@ -884,8 +883,7 @@ class LocalClient(object): else: if kwargs.get(u'yield_pub_data'): yield pub_data - if yield_all_minions: - kwargs['expect_minions'] = True + kwargs.setdefault('expect_minions', True) for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=self._get_timeout(timeout), @@ -911,7 +909,6 @@ class LocalClient(object): kwarg=None, show_jid=False, verbose=False, - yield_all_minions=True, **kwargs): ''' Yields the individual minion returns as they come in, or None @@ -961,8 +958,6 @@ class LocalClient(object): if not pub_data: yield pub_data else: - if yield_all_minions: - kwargs['expect_minions'] = True for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=timeout, From 8fa33a06de9f1275ea34e45452cb0bce25ad46b9 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Wed, 13 Sep 2017 16:05:28 +0300 Subject: [PATCH 047/348] Revert "Yield timed out minions from LocalClient.cmd_iter" This reverts commit 914c9f4a16690e394ed9b299eac34ea542b178c8. --- salt/client/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index f11ccfd5fa1..b047e599369 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -883,7 +883,6 @@ class LocalClient(object): else: if kwargs.get(u'yield_pub_data'): yield pub_data - kwargs.setdefault('expect_minions', True) for fn_ret in self.get_iter_returns(pub_data[u'jid'], pub_data[u'minions'], timeout=self._get_timeout(timeout), From a320f2f154062360c4a93c15096ba79dcb9ef80d Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Wed, 13 Sep 2017 16:09:52 +0300 Subject: [PATCH 048/348] Clarify cmd_iter behavior in docs. --- salt/client/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index b047e599369..da32b4181df 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -844,6 +844,10 @@ class LocalClient(object): The function signature is the same as :py:meth:`cmd` with the following exceptions. + Normally :py:meth:`cmd_iter` does not yield results for minions that + are not connected. If you want it to return results for disconnected + minions set `expect_minions=True` in `kwargs`. + :return: A generator yielding the individual minion returns .. code-block:: python From 34b6c3b65fc448d22270f46a214f3636a24608f5 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 28 Aug 2017 19:33:06 -0500 Subject: [PATCH 049/348] Un-deprecate passing kwargs outside of 'kwarg' param --- salt/client/mixins.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index f5a29a9cbf3..bd69d269bf9 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -359,29 +359,20 @@ class SyncClientMixin(object): # packed into the top level object. The plan is to move away from # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if - # there are no kwargs in the low object passed in - f_call = None - if 'arg' not in low: - f_call = salt.utils.format_call( + # there are no kwargs in the low object passed in. + f_call = {} if 'arg' in low and 'kwarg' in low \ + else salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - args = f_call.get('args', ()) - else: - args = low['arg'] - if 'kwarg' not in low: - log.critical( - 'kwargs must be passed inside the low data within the ' - '\'kwarg\' key. See usage of ' - 'salt.utils.args.parse_input() and ' - 'salt.minion.load_args_and_kwargs() elsewhere in the ' - 'codebase.' - ) - kwargs = {} - else: - kwargs = low['kwarg'] + args = f_call.get('args', ()) \ + if 'arg' not in low \ + else low['arg'] + kwargs = f_call.get('kwargs', {}) \ + if 'kwarg' not in low \ + else low['kwarg'] # Update the event data with loaded args and kwargs data['fun_args'] = list(args) + ([kwargs] if kwargs else []) From 9db3f5ae6dbbf3c616875e8bc16ece6557de99de Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 31 Aug 2017 00:24:11 -0500 Subject: [PATCH 050/348] Unify reactor configuration, fix caller reactors There are 4 types of reactor jobs, and 3 different config schemas for passing arguments: 1. local - positional and keyword args passed in arg/kwarg params, respectively. 2. runner/wheel - passed as individual params directly under the function name. 3. caller - only positional args supported, passed under an "args" param. In addition to being wildly inconsistent, there are several problems with each of the above approaches: - For local jobs, having to know which are positional and keyword arguments is not user-friendly. - For runner/wheel jobs, the fact that the arguments are all passed in the level directly below the function name means that they are dumped directly into the low chunk. This means that if any arguments are passed which conflict with the reserved keywords in the low chunk (name, order, etc.), they will override their counterparts in the low chunk, which may make the Reactor behave unpredictably. To solve these issues, this commit makes the following changes: 1. A new, unified configuration schema has been added, so that arguments are passed identically across all types of reactions. In this new schema, all arguments are passed as named arguments underneath an "args" parameter. Those named arguments are then passed as keyword arguments to the desired function. This works even for positional arguments because Python will automagically pass a keyword argument as its positional counterpart when the name of a positional argument is found in the kwargs. 2. The caller jobs now support both positional and keyword arguments. Backward-compatibility with the old configuration schema has been preserved, so old Reactor SLS files do not break. In addition, you've probably already said to yourself "Hey, caller jobs were _already_ passing their arguments under an "args" param. What gives?" Well, using the old config schema, only positional arguments were supported. So if we detect a list of positional arguments, we treat the input as positional arguments (i.e. old schema), while if the input is a dictionary (or "dictlist"), we treat the input as kwargs (i.e. new schema). --- salt/utils/reactor.py | 231 +++++++++++++++++++++++++++++------------- 1 file changed, 159 insertions(+), 72 deletions(-) diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py index 57c4fd0863d..36971f5c36c 100644 --- a/salt/utils/reactor.py +++ b/salt/utils/reactor.py @@ -7,12 +7,14 @@ import glob import logging # Import salt libs +import salt.client import salt.runner import salt.state import salt.utils import salt.utils.cache import salt.utils.event import salt.utils.process +import salt.wheel import salt.defaults.exitcodes # Import 3rd-party libs @@ -21,6 +23,15 @@ import salt.ext.six as six log = logging.getLogger(__name__) +REACTOR_INTERNAL_KEYWORDS = frozenset([ + '__id__', + '__sls__', + 'name', + 'order', + 'fun', + 'state', +]) + class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.state.Compiler): ''' @@ -29,6 +40,10 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat The reactor has the capability to execute pre-programmed executions as reactions to events ''' + aliases = { + 'cmd': 'local', + } + def __init__(self, opts, log_queue=None): super(Reactor, self).__init__(log_queue=log_queue) local_minion_opts = opts.copy() @@ -171,6 +186,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat return {'status': False, 'comment': 'Reactor does not exists.'} + def resolve_aliases(self, chunks): + ''' + Preserve backward compatibility by rewriting the 'state' key in the low + chunks if it is using a legacy type. + ''' + for idx, _ in enumerate(chunks): + new_state = self.aliases.get(chunks[idx]['state']) + if new_state is not None: + chunks[idx]['state'] = new_state + def reactions(self, tag, data, reactors): ''' Render a list of reactor files and returns a reaction struct @@ -191,6 +216,7 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat except Exception as exc: log.error('Exception trying to compile reactions: {0}'.format(exc), exc_info=True) + self.resolve_aliases(chunks) return chunks def call_reactions(self, chunks): @@ -248,12 +274,19 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat class ReactWrap(object): ''' - Create a wrapper that executes low data for the reaction system + Wrapper that executes low data for the Reactor System ''' # class-wide cache of clients client_cache = None event_user = 'Reactor' + reaction_class = { + 'local': salt.client.LocalClient, + 'runner': salt.runner.RunnerClient, + 'wheel': salt.wheel.Wheel, + 'caller': salt.client.Caller, + } + def __init__(self, opts): self.opts = opts if ReactWrap.client_cache is None: @@ -264,21 +297,49 @@ class ReactWrap(object): queue_size=self.opts['reactor_worker_hwm'] # queue size for those workers ) + def populate_client_cache(self, low): + ''' + Populate the client cache with an instance of the specified type + ''' + reaction_type = low['state'] + if reaction_type not in self.client_cache: + log.debug('Reactor is populating %s client cache', reaction_type) + if reaction_type in ('runner', 'wheel'): + # Reaction types that run locally on the master want the full + # opts passed. + self.client_cache[reaction_type] = \ + self.reaction_class[reaction_type](self.opts) + # The len() function will cause the module functions to load if + # they aren't already loaded. We want to load them so that the + # spawned threads don't need to load them. Loading in the + # spawned threads creates race conditions such as sometimes not + # finding the required function because another thread is in + # the middle of loading the functions. + len(self.client_cache[reaction_type].functions) + else: + # Reactions which use remote pubs only need the conf file when + # instantiating a client instance. + self.client_cache[reaction_type] = \ + self.reaction_class[reaction_type](self.opts['conf_file']) + def run(self, low): ''' - Execute the specified function in the specified state by passing the - low data + Execute a reaction by invoking the proper wrapper func ''' - l_fun = getattr(self, low['state']) + self.populate_client_cache(low) try: - f_call = salt.utils.format_call(l_fun, low) - kwargs = f_call.get('kwargs', {}) - if 'arg' not in kwargs: - kwargs['arg'] = [] - if 'kwarg' not in kwargs: - kwargs['kwarg'] = {} + l_fun = getattr(self, low['state']) + except AttributeError: + log.error( + 'ReactWrap is missing a wrapper function for \'%s\'', + low['state'] + ) - # TODO: Setting the user doesn't seem to work for actual remote publishes + try: + wrap_call = salt.utils.format_call(l_fun, low) + args = wrap_call.get('args', ()) + kwargs = wrap_call.get('kwargs', {}) + # TODO: Setting user doesn't seem to work for actual remote pubs if low['state'] in ('runner', 'wheel'): # Update called function's low data with event user to # segregate events fired by reactor and avoid reaction loops @@ -286,80 +347,106 @@ class ReactWrap(object): # Replace ``state`` kwarg which comes from high data compiler. # It breaks some runner functions and seems unnecessary. kwargs['__state__'] = kwargs.pop('state') + # NOTE: if any additional keys are added here, they will also + # need to be added to filter_kwargs() - l_fun(*f_call.get('args', ()), **kwargs) + if 'args' in kwargs: + # New configuration + reactor_args = kwargs.pop('args') + for item in ('arg', 'kwarg'): + if item in low: + log.warning( + 'Reactor \'%s\' is ignoring \'%s\' param %s due to ' + 'presence of \'args\' param. Check the Reactor System ' + 'documentation for the correct argument format.', + low['__id__'], item, low[item] + ) + if low['state'] == 'caller' \ + and isinstance(reactor_args, list) \ + and not salt.utils.is_dictlist(reactor_args): + # Legacy 'caller' reactors were already using the 'args' + # param, but only supported a list of positional arguments. + # If low['args'] is a list but is *not* a dictlist, then + # this is actually using the legacy configuration. So, put + # the reactor args into kwarg['arg'] so that the wrapper + # interprets them as positional args. + kwargs['arg'] = reactor_args + kwargs['kwarg'] = {} + else: + kwargs['arg'] = () + kwargs['kwarg'] = reactor_args + if not isinstance(kwargs['kwarg'], dict): + kwargs['kwarg'] = salt.utils.repack_dictlist(kwargs['kwarg']) + if not kwargs['kwarg']: + log.error( + 'Reactor \'%s\' failed to execute %s \'%s\': ' + 'Incorrect argument format, check the Reactor System ' + 'documentation for the correct format.', + low['__id__'], low['state'], low['fun'] + ) + return + else: + # Legacy configuration + react_call = {} + if low['state'] in ('runner', 'wheel'): + if 'arg' not in kwargs or 'kwarg' not in kwargs: + # Runner/wheel execute on the master, so we can use + # format_call to get the functions args/kwargs + react_fun = self.client_cache[low['state']].functions.get(low['fun']) + if react_fun is None: + log.error( + 'Reactor \'%s\' failed to execute %s \'%s\': ' + 'function not available', + low['__id__'], low['state'], low['fun'] + ) + return + + react_call = salt.utils.format_call( + react_fun, + low, + expected_extra_kws=REACTOR_INTERNAL_KEYWORDS + ) + + if 'arg' not in kwargs: + kwargs['arg'] = react_call.get('args', ()) + if 'kwarg' not in kwargs: + kwargs['kwarg'] = react_call.get('kwargs', {}) + + # Execute the wrapper with the proper args/kwargs. kwargs['arg'] + # and kwargs['kwarg'] contain the positional and keyword arguments + # that will be passed to the client interface to execute the + # desired runner/wheel/remote-exec/etc. function. + l_fun(*args, **kwargs) + except SystemExit: + log.warning( + 'Reactor \'%s\' attempted to exit. Ignored.', low['__id__'] + ) except Exception: log.error( - 'Failed to execute {0}: {1}\n'.format(low['state'], l_fun), - exc_info=True - ) - - def local(self, *args, **kwargs): - ''' - Wrap LocalClient for running :ref:`execution modules ` - ''' - if 'local' not in self.client_cache: - self.client_cache['local'] = salt.client.LocalClient(self.opts['conf_file']) - try: - self.client_cache['local'].cmd_async(*args, **kwargs) - except SystemExit: - log.warning('Attempt to exit reactor. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) - - cmd = local + 'Reactor \'%s\' failed to execute %s \'%s\'', + low['__id__'], low['state'], low['fun'], exc_info=True + ) def runner(self, fun, **kwargs): ''' Wrap RunnerClient for executing :ref:`runner modules ` ''' - if 'runner' not in self.client_cache: - self.client_cache['runner'] = salt.runner.RunnerClient(self.opts) - # The len() function will cause the module functions to load if - # they aren't already loaded. We want to load them so that the - # spawned threads don't need to load them. Loading in the spawned - # threads creates race conditions such as sometimes not finding - # the required function because another thread is in the middle - # of loading the functions. - len(self.client_cache['runner'].functions) - try: - self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) - except SystemExit: - log.warning('Attempt to exit in reactor by runner. Ignored') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) def wheel(self, fun, **kwargs): ''' Wrap Wheel to enable executing :ref:`wheel modules ` ''' - if 'wheel' not in self.client_cache: - self.client_cache['wheel'] = salt.wheel.Wheel(self.opts) - # The len() function will cause the module functions to load if - # they aren't already loaded. We want to load them so that the - # spawned threads don't need to load them. Loading in the spawned - # threads creates race conditions such as sometimes not finding - # the required function because another thread is in the middle - # of loading the functions. - len(self.client_cache['wheel'].functions) - try: - self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) - except SystemExit: - log.warning('Attempt to in reactor by whell. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) - def caller(self, fun, *args, **kwargs): + def local(self, fun, tgt, **kwargs): ''' - Wrap Caller to enable executing :ref:`caller modules ` + Wrap LocalClient for running :ref:`execution modules ` ''' - log.debug("in caller with fun {0} args {1} kwargs {2}".format(fun, args, kwargs)) - args = kwargs.get('args', []) - if 'caller' not in self.client_cache: - self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file']) - try: - self.client_cache['caller'].function(fun, *args) - except SystemExit: - log.warning('Attempt to exit reactor. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.client_cache['local'].cmd_async(tgt, fun, **kwargs) + + def caller(self, fun, **kwargs): + ''' + Wrap LocalCaller to execute remote exec functions locally on the Minion + ''' + self.client_cache['caller'].cmd(fun, *kwargs['arg'], **kwargs['kwarg']) From 4243a2211d1c2350e72382ee5fb823a4b441ef9f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 31 Aug 2017 23:23:41 -0500 Subject: [PATCH 051/348] Rewrite the reactor unit tests These have been skipped for a while now because they didn't work correctly. The old tests have been scrapped in favor of new ones that test both the old and new config schema. --- tests/unit/utils/test_reactor.py | 602 ++++++++++++++++++++++++++++--- 1 file changed, 542 insertions(+), 60 deletions(-) diff --git a/tests/unit/utils/test_reactor.py b/tests/unit/utils/test_reactor.py index 7a969009771..5c86f766b4f 100644 --- a/tests/unit/utils/test_reactor.py +++ b/tests/unit/utils/test_reactor.py @@ -1,74 +1,556 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import -import time -import shutil -import tempfile +import codecs +import glob +import logging import os - -from contextlib import contextmanager +import textwrap +import yaml import salt.utils -from salt.utils.process import clean_proc +import salt.loader import salt.utils.reactor as reactor -from tests.integration import AdaptedConfigurationTestCaseMixin -from tests.support.paths import TMP from tests.support.unit import TestCase, skipIf -from tests.support.mock import patch, MagicMock +from tests.support.mixins import AdaptedConfigurationTestCaseMixin +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON, + patch, + MagicMock, + Mock, + mock_open, +) + +REACTOR_CONFIG = '''\ +reactor: + - old_runner: + - /srv/reactor/old_runner.sls + - old_wheel: + - /srv/reactor/old_wheel.sls + - old_local: + - /srv/reactor/old_local.sls + - old_cmd: + - /srv/reactor/old_cmd.sls + - old_caller: + - /srv/reactor/old_caller.sls + - new_runner: + - /srv/reactor/new_runner.sls + - new_wheel: + - /srv/reactor/new_wheel.sls + - new_local: + - /srv/reactor/new_local.sls + - new_cmd: + - /srv/reactor/new_cmd.sls + - new_caller: + - /srv/reactor/new_caller.sls +''' + +REACTOR_DATA = { + 'runner': {'data': {'message': 'This is an error'}}, + 'wheel': {'data': {'id': 'foo'}}, + 'local': {'data': {'pkg': 'zsh', 'repo': 'updates'}}, + 'cmd': {'data': {'pkg': 'zsh', 'repo': 'updates'}}, + 'caller': {'data': {'path': '/tmp/foo'}}, +} + +SLS = { + '/srv/reactor/old_runner.sls': textwrap.dedent('''\ + raise_error: + runner.error.error: + - name: Exception + - message: {{ data['data']['message'] }} + '''), + '/srv/reactor/old_wheel.sls': textwrap.dedent('''\ + remove_key: + wheel.key.delete: + - match: {{ data['data']['id'] }} + '''), + '/srv/reactor/old_local.sls': textwrap.dedent('''\ + install_zsh: + local.state.single: + - tgt: test + - arg: + - pkg.installed + - {{ data['data']['pkg'] }} + - kwarg: + fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/old_cmd.sls': textwrap.dedent('''\ + install_zsh: + cmd.state.single: + - tgt: test + - arg: + - pkg.installed + - {{ data['data']['pkg'] }} + - kwarg: + fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/old_caller.sls': textwrap.dedent('''\ + touch_file: + caller.file.touch: + - args: + - {{ data['data']['path'] }} + '''), + '/srv/reactor/new_runner.sls': textwrap.dedent('''\ + raise_error: + runner.error.error: + - args: + - name: Exception + - message: {{ data['data']['message'] }} + '''), + '/srv/reactor/new_wheel.sls': textwrap.dedent('''\ + remove_key: + wheel.key.delete: + - args: + - match: {{ data['data']['id'] }} + '''), + '/srv/reactor/new_local.sls': textwrap.dedent('''\ + install_zsh: + local.state.single: + - tgt: test + - args: + - fun: pkg.installed + - name: {{ data['data']['pkg'] }} + - fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/new_cmd.sls': textwrap.dedent('''\ + install_zsh: + cmd.state.single: + - tgt: test + - args: + - fun: pkg.installed + - name: {{ data['data']['pkg'] }} + - fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/new_caller.sls': textwrap.dedent('''\ + touch_file: + caller.file.touch: + - args: + - name: {{ data['data']['path'] }} + '''), +} + +LOW_CHUNKS = { + # Note that the "name" value in the chunk has been overwritten by the + # "name" argument in the SLS. This is one reason why the new schema was + # needed. + 'old_runner': [{ + 'state': 'runner', + '__id__': 'raise_error', + '__sls__': '/srv/reactor/old_runner.sls', + 'order': 1, + 'fun': 'error.error', + 'name': 'Exception', + 'message': 'This is an error', + }], + 'old_wheel': [{ + 'state': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/old_wheel.sls', + 'order': 1, + 'fun': 'key.delete', + 'match': 'foo', + }], + 'old_local': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_local.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }], + 'old_cmd': [{ + 'state': 'local', # 'cmd' should be aliased to 'local' + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_cmd.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }], + 'old_caller': [{ + 'state': 'caller', + '__id__': 'touch_file', + 'name': 'touch_file', + '__sls__': '/srv/reactor/old_caller.sls', + 'order': 1, + 'fun': 'file.touch', + 'args': ['/tmp/foo'], + }], + 'new_runner': [{ + 'state': 'runner', + '__id__': 'raise_error', + 'name': 'raise_error', + '__sls__': '/srv/reactor/new_runner.sls', + 'order': 1, + 'fun': 'error.error', + 'args': [ + {'name': 'Exception'}, + {'message': 'This is an error'}, + ], + }], + 'new_wheel': [{ + 'state': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/new_wheel.sls', + 'order': 1, + 'fun': 'key.delete', + 'args': [ + {'match': 'foo'}, + ], + }], + 'new_local': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_local.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'args': [ + {'fun': 'pkg.installed'}, + {'name': 'zsh'}, + {'fromrepo': 'updates'}, + ], + }], + 'new_cmd': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_cmd.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'args': [ + {'fun': 'pkg.installed'}, + {'name': 'zsh'}, + {'fromrepo': 'updates'}, + ], + }], + 'new_caller': [{ + 'state': 'caller', + '__id__': 'touch_file', + 'name': 'touch_file', + '__sls__': '/srv/reactor/new_caller.sls', + 'order': 1, + 'fun': 'file.touch', + 'args': [ + {'name': '/tmp/foo'}, + ], + }], +} + +WRAPPER_CALLS = { + 'old_runner': ( + 'error.error', + { + '__state__': 'runner', + '__id__': 'raise_error', + '__sls__': '/srv/reactor/old_runner.sls', + '__user__': 'Reactor', + 'order': 1, + 'arg': [], + 'kwarg': { + 'name': 'Exception', + 'message': 'This is an error', + }, + 'name': 'Exception', + 'message': 'This is an error', + }, + ), + 'old_wheel': ( + 'key.delete', + { + '__state__': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/old_wheel.sls', + 'order': 1, + '__user__': 'Reactor', + 'arg': ['foo'], + 'kwarg': {}, + 'match': 'foo', + }, + ), + 'old_local': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_local.sls', + 'order': 1, + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }, + }, + 'old_cmd': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_cmd.sls', + 'order': 1, + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }, + }, + 'old_caller': { + 'args': ('file.touch', '/tmp/foo'), + 'kwargs': {}, + }, + 'new_runner': ( + 'error.error', + { + '__state__': 'runner', + '__id__': 'raise_error', + 'name': 'raise_error', + '__sls__': '/srv/reactor/new_runner.sls', + '__user__': 'Reactor', + 'order': 1, + 'arg': (), + 'kwarg': { + 'name': 'Exception', + 'message': 'This is an error', + }, + }, + ), + 'new_wheel': ( + 'key.delete', + { + '__state__': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/new_wheel.sls', + 'order': 1, + '__user__': 'Reactor', + 'arg': (), + 'kwarg': {'match': 'foo'}, + }, + ), + 'new_local': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_local.sls', + 'order': 1, + 'arg': (), + 'kwarg': { + 'fun': 'pkg.installed', + 'name': 'zsh', + 'fromrepo': 'updates', + }, + }, + }, + 'new_cmd': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_cmd.sls', + 'order': 1, + 'arg': (), + 'kwarg': { + 'fun': 'pkg.installed', + 'name': 'zsh', + 'fromrepo': 'updates', + }, + }, + }, + 'new_caller': { + 'args': ('file.touch',), + 'kwargs': {'name': '/tmp/foo'}, + }, +} + +log = logging.getLogger(__name__) -@contextmanager -def reactor_process(opts, reactor): - opts = dict(opts) - opts['reactor'] = reactor - proc = reactor.Reactor(opts) - proc.start() - try: - if os.environ.get('TRAVIS_PYTHON_VERSION', None) is not None: - # Travis is slow - time.sleep(10) - else: - time.sleep(2) - yield - finally: - clean_proc(proc) - - -def _args_sideffect(*args, **kwargs): - return args, kwargs - - -@skipIf(True, 'Skipping until its clear what and how is this supposed to be testing') +@skipIf(NO_MOCK, NO_MOCK_REASON) class TestReactor(TestCase, AdaptedConfigurationTestCaseMixin): - def setUp(self): - self.opts = self.get_temp_config('master') - self.tempdir = tempfile.mkdtemp(dir=TMP) - self.sls_name = os.path.join(self.tempdir, 'test.sls') - with salt.utils.fopen(self.sls_name, 'w') as fh: - fh.write(''' -update_fileserver: - runner.fileserver.update -''') + ''' + Tests for constructing the low chunks to be executed via the Reactor + ''' + @classmethod + def setUpClass(cls): + ''' + Load the reactor config for mocking + ''' + cls.opts = cls.get_temp_config('master') + reactor_config = yaml.safe_load(REACTOR_CONFIG) + cls.opts.update(reactor_config) + cls.reactor = reactor.Reactor(cls.opts) + cls.reaction_map = salt.utils.repack_dictlist(reactor_config['reactor']) + renderers = salt.loader.render(cls.opts, {}) + cls.render_pipe = [(renderers[x], '') for x in ('jinja', 'yaml')] - def tearDown(self): - if os.path.isdir(self.tempdir): - shutil.rmtree(self.tempdir) - del self.opts - del self.tempdir - del self.sls_name + @classmethod + def tearDownClass(cls): + del cls.opts + del cls.reactor + del cls.render_pipe - def test_basic(self): - reactor_config = [ - {'salt/tagA': ['/srv/reactor/A.sls']}, - {'salt/tagB': ['/srv/reactor/B.sls']}, - {'*': ['/srv/reactor/all.sls']}, - ] - wrap = reactor.ReactWrap(self.opts) - with patch.object(reactor.ReactWrap, 'local', MagicMock(side_effect=_args_sideffect)): - ret = wrap.run({'fun': 'test.ping', - 'state': 'local', - 'order': 1, - 'name': 'foo_action', - '__id__': 'foo_action'}) - raise Exception(ret) + def test_list_reactors(self): + ''' + Ensure that list_reactors() returns the correct list of reactor SLS + files for each tag. + ''' + for schema in ('old', 'new'): + for rtype in REACTOR_DATA: + tag = '_'.join((schema, rtype)) + self.assertEqual( + self.reactor.list_reactors(tag), + self.reaction_map[tag] + ) + + def test_reactions(self): + ''' + Ensure that the correct reactions are built from the configured SLS + files and tag data. + ''' + for schema in ('old', 'new'): + for rtype in REACTOR_DATA: + tag = '_'.join((schema, rtype)) + log.debug('test_reactions: processing %s', tag) + reactors = self.reactor.list_reactors(tag) + log.debug('test_reactions: %s reactors: %s', tag, reactors) + # No globbing in our example SLS, and the files don't actually + # exist, so mock glob.glob to just return back the path passed + # to it. + with patch.object( + glob, + 'glob', + MagicMock(side_effect=lambda x: [x])): + # The below four mocks are all so that + # salt.template.compile_template() will read the templates + # we've mocked up in the SLS global variable above. + with patch.object( + os.path, 'isfile', + MagicMock(return_value=True)): + with patch.object( + salt.utils, 'is_empty', + MagicMock(return_value=False)): + with patch.object( + codecs, 'open', + mock_open(read_data=SLS[reactors[0]])): + with patch.object( + salt.template, 'template_shebang', + MagicMock(return_value=self.render_pipe)): + reactions = self.reactor.reactions( + tag, + REACTOR_DATA[rtype], + reactors, + ) + log.debug( + 'test_reactions: %s reactions: %s', + tag, reactions + ) + self.assertEqual(reactions, LOW_CHUNKS[tag]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class TestReactWrap(TestCase, AdaptedConfigurationTestCaseMixin): + ''' + Tests that we are formulating the wrapper calls properly + ''' + @classmethod + def setUpClass(cls): + cls.wrap = reactor.ReactWrap(cls.get_temp_config('master')) + + @classmethod + def tearDownClass(cls): + del cls.wrap + + def test_runner(self): + ''' + Test runner reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'runner')) + chunk = LOW_CHUNKS[tag][0] + thread_pool = Mock() + thread_pool.fire_async = Mock() + with patch.object(self.wrap, 'pool', thread_pool): + self.wrap.run(chunk) + thread_pool.fire_async.assert_called_with( + self.wrap.client_cache['runner'].low, + args=WRAPPER_CALLS[tag] + ) + + def test_wheel(self): + ''' + Test wheel reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'wheel')) + chunk = LOW_CHUNKS[tag][0] + thread_pool = Mock() + thread_pool.fire_async = Mock() + with patch.object(self.wrap, 'pool', thread_pool): + self.wrap.run(chunk) + thread_pool.fire_async.assert_called_with( + self.wrap.client_cache['wheel'].low, + args=WRAPPER_CALLS[tag] + ) + + def test_local(self): + ''' + Test local reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'local')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'local': Mock()} + client_cache['local'].cmd_async = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['local'].cmd_async.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) + + def test_cmd(self): + ''' + Test cmd reactions (alias for 'local') using both the old and new + config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'cmd')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'local': Mock()} + client_cache['local'].cmd_async = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['local'].cmd_async.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) + + def test_caller(self): + ''' + Test caller reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'caller')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'caller': Mock()} + client_cache['caller'].cmd = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['caller'].cmd.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) From 20f6f3cc3991074a5d2762493644e5a8bf452126 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 1 Sep 2017 18:35:01 -0500 Subject: [PATCH 052/348] Include a better example for reactor in master conf file --- doc/ref/configuration/master.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index 976919a3439..ecba2b15376 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -4091,7 +4091,9 @@ information. .. code-block:: yaml - reactor: [] + reactor: + - 'salt/minion/*/start': + - salt://reactor/startup_tasks.sls .. conf_master:: reactor_refresh_interval From b85c8510c7650133ebb448c0eed2455f1d31859d Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 1 Sep 2017 18:33:45 -0500 Subject: [PATCH 053/348] Improve the reactor documentation This reorganizes the reactor docs and includes examples of the new reactor SLS config syntax. --- doc/topics/beacons/index.rst | 5 +- doc/topics/reactor/index.rst | 553 ++++++++++++++++++++--------------- 2 files changed, 320 insertions(+), 238 deletions(-) diff --git a/doc/topics/beacons/index.rst b/doc/topics/beacons/index.rst index 6dae8dca091..62991af2f4a 100644 --- a/doc/topics/beacons/index.rst +++ b/doc/topics/beacons/index.rst @@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``: .. note:: You can have only one top level ``reactor`` section, so if one already - exists, add this code to the existing section. See :ref:`Understanding the - Structure of Reactor Formulas ` to learn more about - reactor SLS syntax. + exists, add this code to the existing section. See :ref:`here + ` to learn more about reactor SLS syntax. Start the Salt Master in Debug Mode diff --git a/doc/topics/reactor/index.rst b/doc/topics/reactor/index.rst index 2586245a1aa..de5df946acf 100644 --- a/doc/topics/reactor/index.rst +++ b/doc/topics/reactor/index.rst @@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and other systems about operations. The event system fires events with a very specific criteria. Every event has a -:strong:`tag`. Event tags allow for fast top level filtering of events. In -addition to the tag, each event has a data structure. This data structure is a -dict, which contains information about the event. +**tag**. Event tags allow for fast top-level filtering of events. In addition +to the tag, each event has a data structure. This data structure is a +dictionary, which contains information about the event. .. _reactor-mapping-events: @@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run. the :ref:`querystring syntax ` (e.g. ``salt://reactor/mycustom.sls?saltenv=reactor``). -Reactor sls files are similar to state and pillar sls files. They are -by default yaml + Jinja templates and are passed familiar context variables. +Reactor SLS files are similar to State and Pillar SLS files. They are by +default YAML + Jinja templates and are passed familiar context variables. +Click :ref:`here ` for more detailed information on the +variables availble in Jinja templating. -They differ because of the addition of the ``tag`` and ``data`` variables. - -- The ``tag`` variable is just the tag in the fired event. -- The ``data`` variable is the event's data dict. - -Here is a simple reactor sls: +Here is the SLS for a simple reaction: .. code-block:: jinja @@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor system. The only difference is that the data is matched up to the salt command API and the runner system. In this example, a command is published to the ``mysql1`` minion with a function of :py:func:`state.apply -`. Similarly, a runner can be called: +`, which performs a :ref:`highstate +`. Similarly, a runner can be called: .. code-block:: jinja {% if data['data']['custom_var'] == 'runit' %} call_runit_orch: runner.state.orchestrate: - - mods: _orch.runit + - args: + - mods: orchestrate.runit {% endif %} This example will execute the state.orchestrate runner and intiate an execution -of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using -``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate" -as this is most likely to cause confusion. +of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``. -Writing SLS Files ------------------ +Types of Reactions +================== -Reactor SLS files are stored in the same location as State SLS files. This means -that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are -available to the reactor and orchestrator. +============================== ================================================================================== +Name Description +============================== ================================================================================== +:ref:`local ` Runs a :ref:`remote-execution function ` on targeted minions +:ref:`runner ` Executes a :ref:`runner function ` +:ref:`wheel ` Executes a :ref:`wheel function ` on the master +:ref:`caller ` Runs a :ref:`remote-execution function ` on a masterless minion +============================== ================================================================================== -It is recommended to keep reactor and orchestrator SLS files in their own uniquely -named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``, -``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to -read through this a few years down the road. +.. note:: + The ``local`` and ``caller`` reaction types will be renamed for the Oxygen + release. These reaction types were named after Salt's internal client + interfaces, and are not intuitively named. Both ``local`` and ``caller`` + will continue to work in Reactor SLS files, but for the Oxygen release the + documentation will be updated to reflect the new preferred naming. -The Goal of Writing Reactor SLS Files -===================================== +Where to Put Reactor SLS Files +============================== -Reactor SLS files share the familiar syntax from Salt States but there are -important differences. The goal of a Reactor file is to process a Salt event as -quickly as possible and then to optionally start a **new** process in response. +Reactor SLS files can come both from files local to the master, and from any of +backends enabled via the :conf_master:`fileserver_backend` config option. Files +placed in the Salt fileserver can be referenced using a ``salt://`` URL, just +like they can in State SLS files. -1. The Salt Reactor watches Salt's event bus for new events. -2. The event tag is matched against the list of event tags under the - ``reactor`` section in the Salt Master config. -3. The SLS files for any matches are Rendered into a data structure that - represents one or more function calls. -4. That data structure is given to a pool of worker threads for execution. +It is recommended to place reactor and orchestrator SLS files in their own +uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``, +``reactor/``, etc., to keep them organized. + +.. _reactor-sls: + +Writing Reactor SLS +=================== + +The different reaction types were developed separately and have historically +had different methods for passing arguments. For the 2017.7.2 release a new, +unified configuration schema has been introduced, which applies to all reaction +types. + +The old config schema will continue to be supported, and there is no plan to +deprecate it at this time. + +.. _reactor-local: + +Local Reactions +--------------- + +A ``local`` reaction runs a :ref:`remote-execution function ` +on the targeted minions. + +The old config schema required the positional and keyword arguments to be +manually separated by the user under ``arg`` and ``kwarg`` parameters. However, +this is not very user-friendly, as it forces the user to distinguish which type +of argument is which, and make sure that positional arguments are ordered +properly. Therefore, the new config schema is recommended if the master is +running a supported release. + +The below two examples are equivalent: + ++---------------------------------+-----------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================+=============================+ +| :: | :: | +| | | +| install_zsh: | install_zsh: | +| local.state.single: | local.state.single: | +| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' | +| - tgt_type: grain | - tgt_type: grain | +| - args: | - arg: | +| - fun: pkg.installed | - pkg.installed | +| - name: zsh | - zsh | +| - fromrepo: updates | - kwarg: | +| | fromrepo: updates | ++---------------------------------+-----------------------------+ + +This reaction would be equvalent to running the following Salt command: + +.. code-block:: bash + + salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates + +.. note:: + Any other parameters in the :py:meth:`LocalClient().cmd_async() + ` method can be passed at the same + indentation level as ``tgt``. + +.. note:: + ``tgt_type`` is only required when the target expression defined in ``tgt`` + uses a :ref:`target type ` other than a minion ID glob. + + The ``tgt_type`` argument was named ``expr_form`` in releases prior to + 2017.7.0. + +.. _reactor-runner: + +Runner Reactions +---------------- + +Runner reactions execute :ref:`runner functions ` locally on +the master. + +The old config schema called for passing arguments to the reaction directly +under the name of the runner function. However, this can cause unpredictable +interactions with the Reactor system's internal arguments. It is also possible +to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above +in :ref:`local reactions `, but as noted above this is not very +user-friendly. Therefore, the new config schema is recommended if the master +is running a supported release. + +The below two examples are equivalent: + ++-------------------------------------------------+-------------------------------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================================+=================================================+ +| :: | :: | +| | | +| deploy_app: | deploy_app: | +| runner.state.orchestrate: | runner.state.orchestrate: | +| - args: | - mods: orchestrate.deploy_app | +| - mods: orchestrate.deploy_app | - kwarg: | +| - pillar: | pillar: | +| event_tag: {{ tag }} | event_tag: {{ tag }} | +| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} | ++-------------------------------------------------+-------------------------------------------------+ + +Assuming that the event tag is ``foo``, and the data passed to the event is +``{'bar': 'baz'}``, then this reaction is equvalent to running the following +Salt command: + +.. code-block:: bash + + salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}' + +.. _reactor-wheel: + +Wheel Reactions +--------------- + +Wheel reactions run :ref:`wheel functions ` locally on the +master. + +Like :ref:`runner reactions `, the old config schema called for +wheel reactions to have arguments passed directly under the name of the +:ref:`wheel function ` (or in ``arg`` or ``kwarg`` parameters). + +The below two examples are equivalent: + ++-----------------------------------+---------------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++===================================+=================================+ +| :: | :: | +| | | +| remove_key: | remove_key: | +| wheel.key.delete: | wheel.key.delete: | +| - args: | - match: {{ data['id'] }} | +| - match: {{ data['id'] }} | | ++-----------------------------------+---------------------------------+ + +.. _reactor-caller: + +Caller Reactions +---------------- + +Caller reactions run :ref:`remote-execution functions ` on a +minion daemon's Reactor system. To run a Reactor on the minion, it is necessary +to configure the :mod:`Reactor Engine ` in the minion +config file, and then setup your watched events in a ``reactor`` section in the +minion config file as well. + +.. note:: Masterless Minions use this Reactor + + This is the only way to run the Reactor if you use masterless minions. + +Both the old and new config schemas involve passing arguments under an ``args`` +parameter. However, the old config schema only supports positional arguments. +Therefore, the new config schema is recommended if the masterless minion is +running a supported release. + +The below two examples are equivalent: + ++---------------------------------+---------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================+===========================+ +| :: | :: | +| | | +| touch_file: | touch_file: | +| caller.file.touch: | caller.file.touch: | +| - args: | - args: | +| - name: /tmp/foo | - /tmp/foo | ++---------------------------------+---------------------------+ + +This reaction is equvalent to running the following Salt command: + +.. code-block:: bash + + salt-call file.touch name=/tmp/foo + +Best Practices for Writing Reactor SLS Files +============================================ + +The Reactor works as follows: + +1. The Salt Reactor watches Salt's event bus for new events. +2. Each event's tag is matched against the list of event tags configured under + the :conf_master:`reactor` section in the Salt Master config. +3. The SLS files for any matches are rendered into a data structure that + represents one or more function calls. +4. That data structure is given to a pool of worker threads for execution. Matching and rendering Reactor SLS files is done sequentially in a single -process. Complex Jinja that calls out to slow Execution or Runner modules slows -down the rendering and causes other reactions to pile up behind the current -one. The worker pool is designed to handle complex and long-running processes -such as Salt Orchestrate. +process. For that reason, reactor SLS files should contain few individual +reactions (one, if at all possible). Also, keep in mind that reactions are +fired asynchronously (with the exception of :ref:`caller `) and +do *not* support :ref:`requisites `. -tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process -started by the worker threads can be long-running. Using the reactor to fire -an orchestrate runner would be ideal. +Complex Jinja templating that calls out to slow :ref:`remote-execution +` or :ref:`runner ` functions slows down +the rendering and causes other reactions to pile up behind the current one. The +worker pool is designed to handle complex and long-running processes like +:ref:`orchestration ` jobs. + +Therefore, when complex tasks are in order, :ref:`orchestration +` is a natural fit. Orchestration SLS files can be more +complex, and use requisites. Performing a complex task using orchestration lets +the Reactor system fire off the orchestration job and proceed with processing +other reactions. + +.. _reactor-jinja-context: Jinja Context -------------- +============= -Reactor files only have access to a minimal Jinja context. ``grains`` and -``pillar`` are not available. The ``salt`` object is available for calling -Runner and Execution modules but it should be used sparingly and only for quick -tasks for the reasons mentioned above. +Reactor SLS files only have access to a minimal Jinja context. ``grains`` and +``pillar`` are *not* available. The ``salt`` object is available for calling +:ref:`remote-execution ` or :ref:`runner ` +functions, but it should be used sparingly and only for quick tasks for the +reasons mentioned above. + +In addition to the ``salt`` object, the following variables are available in +the Jinja context: + +- ``tag`` - the tag from the event that triggered execution of the Reactor SLS + file +- ``data`` - the event's data dictionary + +The ``data`` dict will contain an ``id`` key containing the minion ID, if the +event was fired from a minion, and a ``data`` key containing the data passed to +the event. Advanced State System Capabilities ----------------------------------- +================================== -Reactor SLS files, by design, do not support Requisites, ordering, -``onlyif``/``unless`` conditionals and most other powerful constructs from -Salt's State system. +Reactor SLS files, by design, do not support :ref:`requisites `, +ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs +from Salt's State system. Complex Master-side operations are best performed by Salt's Orchestrate system so using the Reactor to kick off an Orchestrate run is a very common pairing. @@ -166,7 +370,7 @@ For example: # /etc/salt/master.d/reactor.conf # A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"} reactor: - - myco/custom/event: + - my/custom/event: - /srv/reactor/some_event.sls .. code-block:: jinja @@ -174,15 +378,15 @@ For example: # /srv/reactor/some_event.sls invoke_orchestrate_file: runner.state.orchestrate: - - mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls - - kwarg: - pillar: - event_tag: {{ tag }} - event_data: {{ data|json() }} + - args: + - mods: orchestrate.do_complex_thing + - pillar: + event_tag: {{ tag }} + event_data: {{ data|json }} .. code-block:: jinja - # /srv/salt/_orch/do_complex_thing.sls + # /srv/salt/orchestrate/do_complex_thing.sls {% set tag = salt.pillar.get('event_tag') %} {% set data = salt.pillar.get('event_data') %} @@ -209,7 +413,7 @@ For example: .. _beacons-and-reactors: Beacons and Reactors --------------------- +==================== An event initiated by a beacon, when it arrives at the master will be wrapped inside a second event, such that the data object containing the beacon @@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file, you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{ data['id'] }}`` as for events initiated directly on the event bus. +Similarly, the data dictionary attached to the event would be located in +``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``. + See the :ref:`beacon documentation ` for examples. -Fire an event -============= +Manually Firing an Event +======================== -To fire an event from a minion call ``event.send`` +From the Master +--------------- + +Use the :py:func:`event.send ` runner: .. code-block:: bash - salt-call event.send 'foo' '{orchestrate: refresh}' + salt-run event.send foo '{orchestrate: refresh}' -After this is called, any reactor sls files matching event tag ``foo`` will -execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``. +From the Minion +--------------- -See :py:mod:`salt.modules.event` for more information. +To fire an event to the master from a minion, call :py:func:`event.send +`: -Knowing what event is being fired -================================= +.. code-block:: bash -The best way to see exactly what events are fired and what data is available in -each event is to use the :py:func:`state.event runner + salt-call event.send foo '{orchestrate: refresh}' + +To fire an event to the minion's local event bus, call :py:func:`event.fire +`: + +.. code-block:: bash + + salt-call event.fire '{orchestrate: refresh}' foo + +Referencing Data Passed in Events +--------------------------------- + +Assuming any of the above examples, any reactor SLS files triggered by watching +the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}`` +equal to ``'refresh'``. + +Getting Information About Events +================================ + +The best way to see exactly what events have been fired and what data is +available in each event is to use the :py:func:`state.event runner `. .. seealso:: :ref:`Common Salt Events ` @@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file). view the result of referencing Jinja variables. If the result is empty then Jinja produced an empty result and the Reactor will ignore it. -.. _reactor-structure: +Passing Event Data to Minions or Orchestration as Pillar +-------------------------------------------------------- -Understanding the Structure of Reactor Formulas -=============================================== - -**I.e., when to use `arg` and `kwarg` and when to specify the function -arguments directly.** - -While the reactor system uses the same basic data structure as the state -system, the functions that will be called using that data structure are -different functions than are called via Salt's state system. The Reactor can -call Runner modules using the `runner` prefix, Wheel modules using the `wheel` -prefix, and can also cause minions to run Execution modules using the `local` -prefix. - -.. versionchanged:: 2014.7.0 - The ``cmd`` prefix was renamed to ``local`` for consistency with other - parts of Salt. A backward-compatible alias was added for ``cmd``. - -The Reactor runs on the master and calls functions that exist on the master. In -the case of Runner and Wheel functions the Reactor can just call those -functions directly since they exist on the master and are run on the master. - -In the case of functions that exist on minions and are run on minions, the -Reactor still needs to call a function on the master in order to send the -necessary data to the minion so the minion can execute that function. - -The Reactor calls functions exposed in :ref:`Salt's Python API documentation -`. and thus the structure of Reactor files very transparently -reflects the function signatures of those functions. - -Calling Execution modules on Minions ------------------------------------- - -The Reactor sends commands down to minions in the exact same way Salt's CLI -interface does. It calls a function locally on the master that sends the name -of the function as well as a list of any arguments and a dictionary of any -keyword arguments that the minion should use to execute that function. - -Specifically, the Reactor calls the async version of :py:meth:`this function -`. You can see that function has 'arg' and 'kwarg' -parameters which are both values that are sent down to the minion. - -Executing remote commands maps to the :strong:`LocalClient` interface which is -used by the :strong:`salt` command. This interface more specifically maps to -the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This -means that the arguments passed are being passed to the :strong:`cmd_async` -method, not the remote method. A field starts with :strong:`local` to use the -:strong:`LocalClient` subsystem. The result is, to execute a remote command, -a reactor formula would look like this: - -.. code-block:: yaml - - clean_tmp: - local.cmd.run: - - tgt: '*' - - arg: - - rm -rf /tmp/* - -The ``arg`` option takes a list of arguments as they would be presented on the -command line, so the above declaration is the same as running this salt -command: - -.. code-block:: bash - - salt '*' cmd.run 'rm -rf /tmp/*' - -Use the ``tgt_type`` argument to specify a matcher: - -.. code-block:: yaml - - clean_tmp: - local.cmd.run: - - tgt: 'os:Ubuntu' - - tgt_type: grain - - arg: - - rm -rf /tmp/* - - - clean_tmp: - local.cmd.run: - - tgt: 'G@roles:hbase_master' - - tgt_type: compound - - arg: - - rm -rf /tmp/* - -.. note:: - The ``tgt_type`` argument was named ``expr_form`` in releases prior to - 2017.7.0 (2016.11.x and earlier). - -Any other parameters in the :py:meth:`LocalClient().cmd() -` method can be specified as well. - -Executing Reactors from the Minion ----------------------------------- - -The minion can be setup to use the Reactor via a reactor engine. This just -sets up and listens to the minions event bus, instead of to the masters. - -The biggest difference is that you have to use the caller method on the -Reactor, which is the equivalent of salt-call, to run your commands. - -:mod:`Reactor Engine setup ` - -.. code-block:: yaml - - clean_tmp: - caller.cmd.run: - - arg: - - rm -rf /tmp/* - -.. note:: Masterless Minions use this Reactor - - This is the only way to run the Reactor if you use masterless minions. - -Calling Runner modules and Wheel modules ----------------------------------------- - -Calling Runner modules and Wheel modules from the Reactor uses a more direct -syntax since the function is being executed locally instead of sending a -command to a remote system to be executed there. There are no 'arg' or 'kwarg' -parameters (unless the Runner function or Wheel function accepts a parameter -with either of those names.) - -For example: - -.. code-block:: yaml - - clear_the_grains_cache_for_all_minions: - runner.cache.clear_grains - -If the :py:func:`the runner takes arguments ` then -they must be specified as keyword arguments. - -.. code-block:: yaml - - spin_up_more_web_machines: - runner.cloud.profile: - - prof: centos_6 - - instances: - - web11 # These VM names would be generated via Jinja in a - - web12 # real-world example. - -To determine the proper names for the arguments, check the documentation -or source code for the runner function you wish to call. - -Passing event data to Minions or Orchestrate as Pillar ------------------------------------------------------- - -An interesting trick to pass data from the Reactor script to +An interesting trick to pass data from the Reactor SLS file to :py:func:`state.apply ` is to pass it as inline Pillar data since both functions take a keyword argument named ``pillar``. @@ -484,10 +567,9 @@ from the event to the state file via inline Pillar. add_new_minion_to_pool: local.state.apply: - tgt: 'haproxy*' - - arg: - - haproxy.refresh_pool - - kwarg: - pillar: + - args: + - mods: haproxy.refresh_pool + - pillar: new_minion: {{ data['id'] }} {% endif %} @@ -503,17 +585,16 @@ This works with Orchestrate files as well: call_some_orchestrate_file: runner.state.orchestrate: - - mods: _orch.some_orchestrate_file - - pillar: - stuff: things + - args: + - mods: orchestrate.some_orchestrate_file + - pillar: + stuff: things Which is equivalent to the following command at the CLI: .. code-block:: bash - salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}' - -This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls. + salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}' Finally, that data is available in the state file using the normal Pillar lookup syntax. The following example is grabbing web server names and IP @@ -564,7 +645,7 @@ includes the minion id, which we can use for matching. - 'salt/minion/ink*/start': - /srv/reactor/auth-complete.sls -In this sls file, we say that if the key was rejected we will delete the key on +In this SLS file, we say that if the key was rejected we will delete the key on the master and then also tell the master to ssh in to the minion and tell it to restart the minion, since a minion process will die if the key is rejected. @@ -580,19 +661,21 @@ authentication every ten seconds by default. {% if not data['result'] and data['id'].startswith('ink') %} minion_remove: wheel.key.delete: - - match: {{ data['id'] }} + - args: + - match: {{ data['id'] }} minion_rejoin: local.cmd.run: - tgt: salt-master.domain.tld - - arg: - - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart' + - args: + - cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart' {% endif %} {# Ink server is sending new key -- accept this key #} {% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %} minion_add: wheel.key.accept: - - match: {{ data['id'] }} + - args: + - match: {{ data['id'] }} {% endif %} No if statements are needed here because we already limited this action to just From a7b4e1f78237dcd45b56e4acd78a70b07fe0f214 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 6 Sep 2017 10:38:45 -0500 Subject: [PATCH 054/348] Simplify client logic --- salt/client/mixins.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index bd69d269bf9..ea2090e2635 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -360,19 +360,18 @@ class SyncClientMixin(object): # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if # there are no kwargs in the low object passed in. - f_call = {} if 'arg' in low and 'kwarg' in low \ - else salt.utils.format_call( + + if 'arg' in low and 'kwarg' in low: + args = low['arg'] + kwargs = low['kwarg'] + else: + f_call = salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - - args = f_call.get('args', ()) \ - if 'arg' not in low \ - else low['arg'] - kwargs = f_call.get('kwargs', {}) \ - if 'kwarg' not in low \ - else low['kwarg'] + args = f_call.get('args', ()) + kwargs = f_call.get('kwargs', {}) # Update the event data with loaded args and kwargs data['fun_args'] = list(args) + ([kwargs] if kwargs else []) From 3118faca0a96ef1e7f40debeea04f9d84a7eec04 Mon Sep 17 00:00:00 2001 From: Peter Sagerson Date: Tue, 12 Sep 2017 13:17:20 -0700 Subject: [PATCH 055/348] acme.cert: avoid IOError on failure. --- salt/states/acme.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/acme.py b/salt/states/acme.py index 1ab6b57dfb4..43649a64262 100644 --- a/salt/states/acme.py +++ b/salt/states/acme.py @@ -116,9 +116,14 @@ def cert(name, if res['result'] is None: ret['changes'] = {} else: + if not __salt__['acme.has'](name): + new = None + else: + new = __salt__['acme.info'](name) + ret['changes'] = { 'old': old, - 'new': __salt__['acme.info'](name) + 'new': new } return ret From 6f6619242fd8d46ac226eeb6d7778594a4e3a076 Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 17:10:42 +0200 Subject: [PATCH 056/348] Fix checking for newline on end of file by properly checking the last byte of the file if the file is non empty. --- salt/modules/ssh.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 022a5bc9166..0f8210392c0 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -740,10 +740,13 @@ def set_auth_key( with salt.utils.fopen(fconfig, 'ab+') as _fh: if new_file is False: # Let's make sure we have a new line at the end of the file - _fh.seek(1024, 2) - if not _fh.read(1024).rstrip(six.b(' ')).endswith(six.b('\n')): - _fh.seek(0, 2) - _fh.write(six.b('\n')) + _fh.seek(0,2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1,2) + if _fh.read(1) != six.b('\n') + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From 923ec62771b933a4b29c2c4c6bcbc21b5c43757d Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 17:35:39 +0200 Subject: [PATCH 057/348] Copy paste typo --- salt/modules/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 0f8210392c0..22ec4a9fb10 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -745,7 +745,7 @@ def set_auth_key( # File isn't empty, check if last byte is a newline # If not, add one _fh.seek(-1,2) - if _fh.read(1) != six.b('\n') + if _fh.read(1) != six.b('\n'): _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) From 406f61ac9ad8f7cb26be99fbe2916b02dc040363 Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 20:38:39 +0200 Subject: [PATCH 058/348] Fix indentation from tabs to spaces --- salt/modules/ssh.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 22ec4a9fb10..a158ed3ece9 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -739,14 +739,14 @@ def set_auth_key( try: with salt.utils.fopen(fconfig, 'ab+') as _fh: if new_file is False: - # Let's make sure we have a new line at the end of the file - _fh.seek(0,2) - if _fh.tell() > 0: - # File isn't empty, check if last byte is a newline - # If not, add one - _fh.seek(-1,2) - if _fh.read(1) != six.b('\n'): - _fh.write(six.b('\n')) + # Let's make sure we have a new line at the end of the file + _fh.seek(0,2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1,2) + if _fh.read(1) != six.b('\n'): + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From c68dd5b8a43b4348d67f0e48c0b6e24fba7138e6 Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Wed, 13 Sep 2017 18:35:49 -0400 Subject: [PATCH 059/348] Lint: fix spacing --- salt/modules/ssh.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index a158ed3ece9..2f48cf7a579 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -739,14 +739,14 @@ def set_auth_key( try: with salt.utils.fopen(fconfig, 'ab+') as _fh: if new_file is False: - # Let's make sure we have a new line at the end of the file - _fh.seek(0,2) - if _fh.tell() > 0: - # File isn't empty, check if last byte is a newline - # If not, add one - _fh.seek(-1,2) - if _fh.read(1) != six.b('\n'): - _fh.write(six.b('\n')) + # Let's make sure we have a new line at the end of the file + _fh.seek(0, 2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1, 2) + if _fh.read(1) != six.b('\n'): + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From 1d6dc6fb727e2c25972acfbb96c62bc21ca79e74 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sun, 3 Sep 2017 17:23:44 +1000 Subject: [PATCH 060/348] Docs are wrong cache_dir (bool) and cache_file (str) cannot be passed on the cli (#2) --- salt/modules/win_pkg.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index f66bd762ee9..1f85f49fcd3 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -913,18 +913,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Version 1.2.3 will apply to packages foo and bar salt '*' pkg.install foo,bar version=1.2.3 - cache_file (str): - A single file to copy down for use with the installer. Copied to the - same location as the installer. Use this over ``cache_dir`` if there - are many files in the directory and you only need a specific file - and don't want to cache additional files that may reside in the - installer directory. Only applies to files on ``salt://`` - - cache_dir (bool): - True will copy the contents of the installer directory. This is - useful for installations that are not a single file. Only applies to - directories on ``salt://`` - extra_install_flags (str): Additional install flags that will be appended to the ``install_flags`` defined in the software definition file. Only From a7c8b9e048d0191beab4f02132db1458a1df8701 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 02:28:16 +1000 Subject: [PATCH 061/348] Update win_pkg.py --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 1f85f49fcd3..1f6f20b8a35 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1204,7 +1204,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): if use_msiexec: cmd = msiexec arguments = ['/i', cached_pkg] - if pkginfo['version_num'].get('allusers', True): + if pkginfo[version_num].get('allusers', True): arguments.append('ALLUSERS="1"') arguments.extend(salt.utils.shlex_split(install_flags)) else: From d4981a2717d2cdbe1a726b6a7624e307ac834312 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 12:31:51 +1000 Subject: [PATCH 062/348] Update doco --- doc/topics/windows/windows-package-manager.rst | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index 063c8b44eb3..cea071e8885 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -480,11 +480,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file :param bool allusers: This parameter is specific to `.msi` installations. It tells `msiexec` to install the software for all users. The default is True. -:param bool cache_dir: If true, the entire directory where the installer resides - will be recursively cached. This is useful for installers that depend on - other files in the same directory for installation. +:param bool cache_dir: If true when installer URL begins with salt://, the + entire directory where the installer resides will be recursively cached. + This is useful for installers that depend on other files in the same + directory for installation. -.. note:: Only applies to salt: installer URLs. +:param str cache_file: + When installer URL begins with salt://, this indicates single file to copy + down for use with the installer. Copied to the same location as the + installer. Use this over ``cache_dir`` if there are many files in the + directory and you only need a specific file and don't want to cache + additional files that may reside in the installer directory. Here's an example for a software package that has dependent files: From c3e16661c35314f4af414e586745c327b9bab44c Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sun, 3 Sep 2017 17:23:44 +1000 Subject: [PATCH 063/348] Docs are wrong cache_dir (bool) and cache_file (str) cannot be passed on the cli (#2) --- salt/modules/win_pkg.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index d3434cc2b7a..3357a874714 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -983,18 +983,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Version 1.2.3 will apply to packages foo and bar salt '*' pkg.install foo,bar version=1.2.3 - cache_file (str): - A single file to copy down for use with the installer. Copied to the - same location as the installer. Use this over ``cache_dir`` if there - are many files in the directory and you only need a specific file - and don't want to cache additional files that may reside in the - installer directory. Only applies to files on ``salt://`` - - cache_dir (bool): - True will copy the contents of the installer directory. This is - useful for installations that are not a single file. Only applies to - directories on ``salt://`` - extra_install_flags (str): Additional install flags that will be appended to the ``install_flags`` defined in the software definition file. Only From 5cdcdbf428234277555f5c58b22e09ec70b2ca0a Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 02:28:16 +1000 Subject: [PATCH 064/348] Update win_pkg.py --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 3357a874714..9ed7e3d7f58 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1274,7 +1274,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): if use_msiexec: cmd = msiexec arguments = ['/i', cached_pkg] - if pkginfo['version_num'].get('allusers', True): + if pkginfo[version_num].get('allusers', True): arguments.append('ALLUSERS="1"') arguments.extend(salt.utils.shlex_split(install_flags)) else: From b3dbafb0357686c90f712acb284cd8473f853f10 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 12:31:51 +1000 Subject: [PATCH 065/348] Update doco --- doc/topics/windows/windows-package-manager.rst | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index 9d1838c807d..20ed60baf68 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -481,11 +481,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file :param bool allusers: This parameter is specific to `.msi` installations. It tells `msiexec` to install the software for all users. The default is True. -:param bool cache_dir: If true, the entire directory where the installer resides - will be recursively cached. This is useful for installers that depend on - other files in the same directory for installation. +:param bool cache_dir: If true when installer URL begins with salt://, the + entire directory where the installer resides will be recursively cached. + This is useful for installers that depend on other files in the same + directory for installation. -.. note:: Only applies to salt: installer URLs. +:param str cache_file: + When installer URL begins with salt://, this indicates single file to copy + down for use with the installer. Copied to the same location as the + installer. Use this over ``cache_dir`` if there are many files in the + directory and you only need a specific file and don't want to cache + additional files that may reside in the installer directory. Here's an example for a software package that has dependent files: From 58f7d051c9fe8d11cd373cfec28caf0f37fa4da9 Mon Sep 17 00:00:00 2001 From: haam3r Date: Thu, 14 Sep 2017 22:40:19 +0300 Subject: [PATCH 066/348] Issue #43479 No runners.config in 2017.7 branch Add extra note about needing to import the runners.config module from the develop branch when running on a 2017.7 release. --- doc/ref/runners/all/salt.runners.mattermost.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/ref/runners/all/salt.runners.mattermost.rst b/doc/ref/runners/all/salt.runners.mattermost.rst index 4a2b8e28c65..7fa1e2f3d44 100644 --- a/doc/ref/runners/all/salt.runners.mattermost.rst +++ b/doc/ref/runners/all/salt.runners.mattermost.rst @@ -1,6 +1,12 @@ salt.runners.mattermost module ============================== +**Note for 2017.7 releases!** + +Due to the `salt.runners.config `_ module not being available in this release series, importing the `salt.runners.config `_ module from the develop branch is required to make this module work. + +Ref: `Mattermost runner failing to retrieve config values due to unavailable config runner #43479 `_ + .. automodule:: salt.runners.mattermost :members: :undoc-members: From bcbf7b4e684df322e44f8039a4dbd0e670d75b96 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 14 Sep 2017 17:26:59 -0600 Subject: [PATCH 067/348] Add logic for test=True --- salt/states/chocolatey.py | 46 ++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index d83f9bddd39..60627e18881 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -97,29 +97,49 @@ def installed(name, version=None, source=None, force=False, pre_versions=False, ret['changes'] = {name: 'Version {0} will be installed' ''.format(version)} else: - ret['changes'] = {name: 'Will be installed'} + ret['changes'] = {name: 'Latest version will be installed'} + # Package installed else: version_info = __salt__['chocolatey.version'](name, check_remote=True) full_name = name - lower_name = name.lower() for pkg in version_info: - if lower_name == pkg.lower(): + if name.lower() == pkg.lower(): full_name = pkg - available_version = version_info[full_name]['available'][0] - version = version if version else available_version + installed_version = version_info[full_name]['installed'][0] - if force: - ret['changes'] = {name: 'Version {0} will be forcibly installed' - ''.format(version)} - elif allow_multiple: - ret['changes'] = {name: 'Version {0} will be installed side by side' - ''.format(version)} + if version: + if salt.utils.compare_versions( + ver1=installed_version, oper="==", ver2=version): + if force: + ret['changes'] = {name: 'Version {0} will be reinstalled' + ''.format(version)} + else: + ret['comment'] = '{0} {1} is already installed' \ + ''.format(name, version) + return ret + else: + if allow_multiple: + ret['changes'] = { + name: 'Version {0} will be installed side by side with ' + 'Version {1} if supported' + ''.format(version, installed_version)} + else: + ret['changes'] = { + name: 'Version {0} will be installed over Existing ' + 'Version {1}'.format(version, installed_version)} + force = True else: - ret['comment'] = 'The Package {0} is already installed'.format(name) - return ret + version = installed_version + if force: + ret['changes'] = {name: 'Version {0} will be reinstalled' + ''.format(version)} + else: + ret['comment'] = '{0} {1} is already installed' \ + ''.format(name, version) + return ret if __opts__['test']: ret['result'] = None From 0e3c4475673badb00059897cc6ee1042a865e126 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 14 Sep 2017 20:36:57 -0500 Subject: [PATCH 068/348] Fix incorrect handling of pkg virtual and os_family grain Several Debian-based distros have the wrong os_family grain, and instead of fixing it in the core grains, the aptpkg __virtual__ has been incorrectly modified to look for the incorrect os_family. This fixes the core grains and changes the aptpkg __virtual__ to look only for the Debian os_family. It also adds a comment in the __virtual__ to clear this up for the future. --- salt/grains/core.py | 4 ++++ salt/modules/aptpkg.py | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index a7e1a22d2af..0a98bc148f3 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1175,6 +1175,10 @@ _OS_FAMILY_MAP = { 'Raspbian': 'Debian', 'Devuan': 'Debian', 'antiX': 'Debian', + 'Kali': 'Debian', + 'neon': 'Debian', + 'Cumulus': 'Debian', + 'Deepin': 'Debian', 'NILinuxRT': 'NILinuxRT', 'NILinuxRT-XFCE': 'NILinuxRT', 'Void': 'Void', diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index 04ddbaf9a2b..01c05481444 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -93,11 +93,15 @@ __virtualname__ = 'pkg' def __virtual__(): ''' - Confirm this module is on a Debian based system + Confirm this module is on a Debian-based system ''' - if __grains__.get('os_family') in ('Kali', 'Debian', 'neon'): - return __virtualname__ - elif __grains__.get('os_family', False) == 'Cumulus': + # If your minion is running an OS which is Debian-based but does not have + # an "os_family" grain of Debian, then the proper fix is NOT to check for + # the minion's "os_family" grain here in the __virtual__. The correct fix + # is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP + # dict in salt/grains/core.py, so that we assign the correct "os_family" + # grain to the minion. + if __grains__.get('os_family') == 'Debian': return __virtualname__ return (False, 'The pkg module could not be loaded: unsupported OS family') From 0e4a744d95ff9761bb9a0fb1952fab2fd61dbe0f Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Fri, 15 Sep 2017 18:47:03 +0300 Subject: [PATCH 069/348] Forward events to all masters syndic connected to. --- salt/minion.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index 6b7c82a8d7f..88ef463d0c1 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2589,6 +2589,8 @@ class SyndicManager(MinionBase): ''' if kwargs is None: kwargs = {} + successful = False + # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master)) @@ -2596,12 +2598,12 @@ class SyndicManager(MinionBase): try: getattr(syndic_future.result(), func)(*args, **kwargs) - return + successful = True except SaltClientError: log.error('Unable to call {0} on {1}, trying another...'.format(func, master)) self._mark_master_dead(master) - continue - log.critical('Unable to call {0} on any masters!'.format(func)) + if not successful: + log.critical('Unable to call {0} on any masters!'.format(func)) def _return_pub_syndic(self, values, master_id=None): ''' From f146399f7a52bcfe0fa2df3c878bb75543d36fea Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 15 Sep 2017 11:07:55 -0600 Subject: [PATCH 070/348] Use posix=False for shlex.split Fixes issue with doublequotes being removed in Windows Removes forced log level so that the command being run will be displayed Consolidates the creation of the uninstall command to avoid duplication --- salt/modules/win_pkg.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 9ed7e3d7f58..2c7a2b5e010 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1276,10 +1276,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): arguments = ['/i', cached_pkg] if pkginfo[version_num].get('allusers', True): arguments.append('ALLUSERS="1"') - arguments.extend(salt.utils.shlex_split(install_flags)) + arguments.extend(salt.utils.shlex_split(install_flags, posix=False)) else: cmd = cached_pkg - arguments = salt.utils.shlex_split(install_flags) + arguments = salt.utils.shlex_split(install_flags, posix=False) # Install the software # Check Use Scheduler Option @@ -1341,7 +1341,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Launch the command result = __salt__['cmd.run_all'](cmd, cache_path, - output_loglevel='quiet', python_shell=False, redirect_stderr=True) if not result['retcode']: @@ -1600,19 +1599,20 @@ def remove(name=None, pkgs=None, version=None, **kwargs): #Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) + # Build cmd and arguments + # cmd and arguments must be separated for use with the task scheduler + if use_msiexec: + cmd = msiexec + arguments = ['/x'] + arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False)) + else: + cmd = expanded_cached_pkg + arguments = salt.utils.shlex_split(uninstall_flags, posix=False) + # Uninstall the software # Check Use Scheduler Option if pkginfo[target].get('use_scheduler', False): - # Build Scheduled Task Parameters - if use_msiexec: - cmd = msiexec - arguments = ['/x'] - arguments.extend(salt.utils.shlex_split(uninstall_flags)) - else: - cmd = expanded_cached_pkg - arguments = salt.utils.shlex_split(uninstall_flags) - # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', @@ -1633,16 +1633,12 @@ def remove(name=None, pkgs=None, version=None, **kwargs): ret[pkgname] = {'uninstall status': 'failed'} else: # Build the install command - cmd = [] - if use_msiexec: - cmd.extend([msiexec, '/x', expanded_cached_pkg]) - else: - cmd.append(expanded_cached_pkg) - cmd.extend(salt.utils.shlex_split(uninstall_flags)) + cmd = [cmd] + cmd.extend(arguments) + # Launch the command result = __salt__['cmd.run_all']( cmd, - output_loglevel='trace', python_shell=False, redirect_stderr=True) if not result['retcode']: From 1546c1ca0468f71b24b720503a1f1a2e9ccf5521 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 15 Sep 2017 11:16:50 -0600 Subject: [PATCH 071/348] Add posix=False to call to salt.utils.shlex_split --- salt/modules/win_pkg.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 1f6f20b8a35..e6c33e5f12e 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1206,10 +1206,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): arguments = ['/i', cached_pkg] if pkginfo[version_num].get('allusers', True): arguments.append('ALLUSERS="1"') - arguments.extend(salt.utils.shlex_split(install_flags)) + arguments.extend(salt.utils.shlex_split(install_flags, posix=False)) else: cmd = cached_pkg - arguments = salt.utils.shlex_split(install_flags) + arguments = salt.utils.shlex_split(install_flags, posix=False) # Install the software # Check Use Scheduler Option @@ -1513,10 +1513,10 @@ def remove(name=None, pkgs=None, version=None, **kwargs): if use_msiexec: cmd = msiexec arguments = ['/x'] - arguments.extend(salt.utils.shlex_split(uninstall_flags)) + arguments.extend(salt.utils.shlex_split(uninstall_flags, posix=False)) else: cmd = expanded_cached_pkg - arguments = salt.utils.shlex_split(uninstall_flags) + arguments = salt.utils.shlex_split(uninstall_flags, posix=False) # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', @@ -1543,7 +1543,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs): cmd.extend([msiexec, '/x', expanded_cached_pkg]) else: cmd.append(expanded_cached_pkg) - cmd.extend(salt.utils.shlex_split(uninstall_flags)) + cmd.extend(salt.utils.shlex_split(uninstall_flags, posix=False)) # Launch the command result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', From 1b0a4d39d23ebbafd09d5a5b66c95aaecde69ce1 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 15 Sep 2017 13:37:53 -0600 Subject: [PATCH 072/348] Fix logic in `/etc/paths.d/salt` detection --- pkg/osx/pkg-scripts/preinstall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index c919cafcb1c..7e92eeab6ad 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -129,7 +129,7 @@ fi ############################################################################### # Remove the salt from the paths.d ############################################################################### -if [ ! -f "/etc/paths.d/salt" ]; then +if [ -f "/etc/paths.d/salt" ]; then echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt" rm "/etc/paths.d/salt" echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" From f33395f1eef85ba08830825ca9ed770485690b33 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 15 Sep 2017 13:41:23 -0600 Subject: [PATCH 073/348] Fix logic in `/etc/paths.d/salt` detection --- pkg/osx/pkg-scripts/preinstall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index 3eb4235107a..8c671c6df97 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -132,7 +132,7 @@ fi ############################################################################### # Remove the salt from the paths.d ############################################################################### -if [ ! -f "/etc/paths.d/salt" ]; then +if [ -f "/etc/paths.d/salt" ]; then echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt" rm "/etc/paths.d/salt" echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" From 56be5c35eb20b99da7be924f1784edbc18994759 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 15 Sep 2017 16:08:21 -0600 Subject: [PATCH 074/348] Improve logic for handling chocolatey states --- salt/states/chocolatey.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index 60627e18881..141d5e7d593 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -114,11 +114,15 @@ def installed(name, version=None, source=None, force=False, pre_versions=False, if salt.utils.compare_versions( ver1=installed_version, oper="==", ver2=version): if force: - ret['changes'] = {name: 'Version {0} will be reinstalled' - ''.format(version)} + ret['changes'] = { + name: 'Version {0} will be reinstalled'.format(version)} + ret['comment'] = 'Reinstall {0} {1}' \ + ''.format(full_name, version) else: ret['comment'] = '{0} {1} is already installed' \ ''.format(name, version) + if __opts__['test']: + ret['result'] = None return ret else: if allow_multiple: @@ -126,19 +130,27 @@ def installed(name, version=None, source=None, force=False, pre_versions=False, name: 'Version {0} will be installed side by side with ' 'Version {1} if supported' ''.format(version, installed_version)} + ret['comment'] = 'Install {0} {1} side-by-side with {0} {2}' \ + ''.format(full_name, version, installed_version) else: ret['changes'] = { - name: 'Version {0} will be installed over Existing ' - 'Version {1}'.format(version, installed_version)} + name: 'Version {0} will be installed over Version {1} ' + ''.format(version, installed_version)} + ret['comment'] = 'Install {0} {1} over {0} {2}' \ + ''.format(full_name, version, installed_version) force = True else: version = installed_version if force: - ret['changes'] = {name: 'Version {0} will be reinstalled' - ''.format(version)} + ret['changes'] = { + name: 'Version {0} will be reinstalled'.format(version)} + ret['comment'] = 'Reinstall {0} {1}' \ + ''.format(full_name, version) else: ret['comment'] = '{0} {1} is already installed' \ ''.format(name, version) + if __opts__['test']: + ret['result'] = None return ret if __opts__['test']: From 54216177c1f76c04d46482f6c33a00a53fc1e47e Mon Sep 17 00:00:00 2001 From: "Z. Liu" Date: Sat, 16 Sep 2017 10:42:37 +0800 Subject: [PATCH 075/348] _search_name is '' if acl type is other --- salt/states/linux_acl.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index a6a54a7fcdc..285a37ba379 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -81,11 +81,12 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root + # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if acl_name == '': - _search_name = __current_perms[name].get('comment').get(_acl_type) + _search_name = __current_perms[name].get('comment').get(_acl_type, '') else: _search_name = acl_name @@ -150,11 +151,12 @@ def absent(name, acl_type, acl_name='', perms='', recurse=False): # applied to the user/group that owns the file, e.g., # default:group::rwx would be listed as default:group:root:rwx # In this case, if acl_name is empty, we really want to search for root + # but still uses '' for other # We search through the dictionary getfacl returns for the owner of the # file if acl_name is empty. if acl_name == '': - _search_name = __current_perms[name].get('comment').get(_acl_type) + _search_name = __current_perms[name].get('comment').get(_acl_type, '') else: _search_name = acl_name From 2ccabe296e20f8db06afa1c66968481d8893228a Mon Sep 17 00:00:00 2001 From: Yagnik Date: Tue, 27 Jun 2017 14:23:35 +0530 Subject: [PATCH 076/348] Add support for encrypted tag --- salt/serializers/yaml.py | 16 ++++++++++++++++ tests/unit/serializers/test_serializers.py | 6 ++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/salt/serializers/yaml.py b/salt/serializers/yaml.py index 2fad384d1bb..e893c3f3898 100644 --- a/salt/serializers/yaml.py +++ b/salt/serializers/yaml.py @@ -77,10 +77,25 @@ def serialize(obj, **options): raise SerializationError(error) +class EncryptedString(str): + + yaml_tag = u'!encrypted' + + @staticmethod + def yaml_constructor(loader, tag, node): + return EncryptedString(loader.construct_scalar(node)) + + @staticmethod + def yaml_dumper(dumper, data): + return dumper.represent_scalar(EncryptedString.yaml_tag, data.__str__()) + + class Loader(BaseLoader): # pylint: disable=W0232 '''Overwrites Loader as not for pollute legacy Loader''' pass + +Loader.add_multi_constructor(EncryptedString.yaml_tag, EncryptedString.yaml_constructor) Loader.add_multi_constructor('tag:yaml.org,2002:null', Loader.construct_yaml_null) Loader.add_multi_constructor('tag:yaml.org,2002:bool', Loader.construct_yaml_bool) Loader.add_multi_constructor('tag:yaml.org,2002:int', Loader.construct_yaml_int) @@ -100,6 +115,7 @@ class Dumper(BaseDumper): # pylint: disable=W0232 '''Overwrites Dumper as not for pollute legacy Dumper''' pass +Dumper.add_multi_representer(EncryptedString, EncryptedString.yaml_dumper) Dumper.add_multi_representer(type(None), Dumper.represent_none) Dumper.add_multi_representer(str, Dumper.represent_str) if six.PY2: diff --git a/tests/unit/serializers/test_serializers.py b/tests/unit/serializers/test_serializers.py index 4f4890e06e0..980405f8b81 100644 --- a/tests/unit/serializers/test_serializers.py +++ b/tests/unit/serializers/test_serializers.py @@ -18,6 +18,7 @@ import salt.serializers.yaml as yaml import salt.serializers.yamlex as yamlex import salt.serializers.msgpack as msgpack import salt.serializers.python as python +from salt.serializers.yaml import EncryptedString from salt.serializers import SerializationError from salt.utils.odict import OrderedDict @@ -43,10 +44,11 @@ class TestSerializers(TestCase): @skipIf(not yaml.available, SKIP_MESSAGE % 'yaml') def test_serialize_yaml(self): data = { - "foo": "bar" + "foo": "bar", + "encrypted_data": EncryptedString("foo") } serialized = yaml.serialize(data) - assert serialized == '{foo: bar}', serialized + assert serialized == '{encrypted_data: !encrypted foo, foo: bar}', serialized deserialized = yaml.deserialize(serialized) assert deserialized == data, deserialized From 1bd263cd51ba7de36ca430250d1961c2bfe8ece5 Mon Sep 17 00:00:00 2001 From: Wedge Jarrad Date: Sat, 16 Sep 2017 22:49:08 -0700 Subject: [PATCH 077/348] Clean up doc formatting in selinux state & module Reformat fcontext methods so that the online documentation will render properly. Add versionadded directives to the fcontext methods added in 2017.7.0. --- salt/modules/selinux.py | 108 +++++++++++++++++++++++++++------------- salt/states/selinux.py | 65 ++++++++++++++++-------- 2 files changed, 118 insertions(+), 55 deletions(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index d227b12eb48..208eee03f5a 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -374,8 +374,10 @@ def list_semod(): def _validate_filetype(filetype): ''' - Checks if the given filetype is a valid SELinux filetype specification. - Throws an SaltInvocationError if it isn't. + .. versionadded:: 2017.7.0 + + Checks if the given filetype is a valid SELinux filetype + specification. Throws an SaltInvocationError if it isn't. ''' if filetype not in _SELINUX_FILETYPES.keys(): raise SaltInvocationError('Invalid filetype given: {0}'.format(filetype)) @@ -384,6 +386,8 @@ def _validate_filetype(filetype): def _context_dict_to_string(context): ''' + .. versionadded:: 2017.7.0 + Converts an SELinux file context from a dict to a string. ''' return '{sel_user}:{sel_role}:{sel_type}:{sel_level}'.format(**context) @@ -391,6 +395,8 @@ def _context_dict_to_string(context): def _context_string_to_dict(context): ''' + .. versionadded:: 2017.7.0 + Converts an SELinux file context from string to dict. ''' if not re.match('[^:]+:[^:]+:[^:]+:[^:]+$', context): @@ -405,8 +411,11 @@ def _context_string_to_dict(context): def filetype_id_to_string(filetype='a'): ''' - Translates SELinux filetype single-letter representation - to a more human-readable version (which is also used in `semanage fcontext -l`). + .. versionadded:: 2017.7.0 + + Translates SELinux filetype single-letter representation to a more + human-readable version (which is also used in `semanage fcontext + -l`). ''' _validate_filetype(filetype) return _SELINUX_FILETYPES.get(filetype, 'error') @@ -414,20 +423,27 @@ def filetype_id_to_string(filetype='a'): def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_level=None): ''' - Returns the current entry in the SELinux policy list as a dictionary. - Returns None if no exact match was found + .. versionadded:: 2017.7.0 + + Returns the current entry in the SELinux policy list as a + dictionary. Returns None if no exact match was found. + Returned keys are: - - filespec (the name supplied and matched) - - filetype (the descriptive name of the filetype supplied) - - sel_user, sel_role, sel_type, sel_level (the selinux context) + + * filespec (the name supplied and matched) + * filetype (the descriptive name of the filetype supplied) + * sel_user, sel_role, sel_type, sel_level (the selinux context) + For a more in-depth explanation of the selinux context, go to https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Security-Enhanced_Linux/chap-Security-Enhanced_Linux-SELinux_Contexts.html - name: filespec of the file or directory. Regex syntax is allowed. - filetype: The SELinux filetype specification. - Use one of [a, f, d, c, b, s, l, p]. - See also `man semanage-fcontext`. - Defaults to 'a' (all files) + name + filespec of the file or directory. Regex syntax is allowed. + + filetype + The SELinux filetype specification. Use one of [a, f, d, c, b, + s, l, p]. See also `man semanage-fcontext`. Defaults to 'a' + (all files). CLI Example: @@ -460,20 +476,34 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None): ''' - Sets or deletes the SELinux policy for a given filespec and other optional parameters. - Returns the result of the call to semanage. - Note that you don't have to remove an entry before setting a new one for a given - filespec and filetype, as adding one with semanage automatically overwrites a - previously configured SELinux context. + .. versionadded:: 2017.7.0 - name: filespec of the file or directory. Regex syntax is allowed. - file_type: The SELinux filetype specification. - Use one of [a, f, d, c, b, s, l, p]. - See also ``man semanage-fcontext``. - Defaults to 'a' (all files) - sel_type: SELinux context type. There are many. - sel_user: SELinux user. Use ``semanage login -l`` to determine which ones are available to you - sel_level: The MLS range of the SELinux context. + Sets or deletes the SELinux policy for a given filespec and other + optional parameters. + + Returns the result of the call to semanage. + + Note that you don't have to remove an entry before setting a new + one for a given filespec and filetype, as adding one with semanage + automatically overwrites a previously configured SELinux context. + + name + filespec of the file or directory. Regex syntax is allowed. + + file_type + The SELinux filetype specification. Use one of [a, f, d, c, b, + s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a' + (all files). + + sel_type + SELinux context type. There are many. + + sel_user + SELinux user. Use ``semanage login -l`` to determine which ones + are available to you. + + sel_level + The MLS range of the SELinux context. CLI Example: @@ -499,10 +529,14 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se def fcontext_policy_is_applied(name, recursive=False): ''' - Returns an empty string if the SELinux policy for a given filespec is applied, - returns string with differences in policy and actual situation otherwise. + .. versionadded:: 2017.7.0 - name: filespec of the file or directory. Regex syntax is allowed. + Returns an empty string if the SELinux policy for a given filespec + is applied, returns string with differences in policy and actual + situation otherwise. + + name + filespec of the file or directory. Regex syntax is allowed. CLI Example: @@ -519,11 +553,17 @@ def fcontext_policy_is_applied(name, recursive=False): def fcontext_apply_policy(name, recursive=False): ''' - Applies SElinux policies to filespec using `restorecon [-R] filespec`. - Returns dict with changes if succesful, the output of the restorecon command otherwise. + .. versionadded:: 2017.7.0 - name: filespec of the file or directory. Regex syntax is allowed. - recursive: Recursively apply SELinux policies. + Applies SElinux policies to filespec using `restorecon [-R] + filespec`. Returns dict with changes if succesful, the output of + the restorecon command otherwise. + + name + filespec of the file or directory. Regex syntax is allowed. + + recursive + Recursively apply SELinux policies. CLI Example: diff --git a/salt/states/selinux.py b/salt/states/selinux.py index 8187ea8338d..3c2a3ee8178 100644 --- a/salt/states/selinux.py +++ b/salt/states/selinux.py @@ -310,17 +310,27 @@ def module_remove(name): def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_level=None): ''' - Makes sure a SELinux policy for a given filespec (name), - filetype and SELinux context type is present. + .. versionadded:: 2017.7.0 - name: filespec of the file or directory. Regex syntax is allowed. - sel_type: SELinux context type. There are many. - filetype: The SELinux filetype specification. - Use one of [a, f, d, c, b, s, l, p]. - See also `man semanage-fcontext`. - Defaults to 'a' (all files) - sel_user: The SELinux user. - sel_level: The SELinux MLS range + Makes sure a SELinux policy for a given filespec (name), filetype + and SELinux context type is present. + + name + filespec of the file or directory. Regex syntax is allowed. + + sel_type + SELinux context type. There are many. + + filetype + The SELinux filetype specification. Use one of [a, f, d, c, b, + s, l, p]. See also `man semanage-fcontext`. Defaults to 'a' + (all files). + + sel_user + The SELinux user. + + sel_level + The SELinux MLS range. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} new_state = {} @@ -383,17 +393,27 @@ def fcontext_policy_present(name, sel_type, filetype='a', sel_user=None, sel_lev def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel_level=None): ''' - Makes sure an SELinux file context policy for a given filespec (name), - filetype and SELinux context type is absent. + .. versionadded:: 2017.7.0 - name: filespec of the file or directory. Regex syntax is allowed. - filetype: The SELinux filetype specification. - Use one of [a, f, d, c, b, s, l, p]. - See also `man semanage-fcontext`. - Defaults to 'a' (all files). - sel_type: The SELinux context type. There are many. - sel_user: The SELinux user. - sel_level: The SELinux MLS range + Makes sure an SELinux file context policy for a given filespec + (name), filetype and SELinux context type is absent. + + name + filespec of the file or directory. Regex syntax is allowed. + + filetype + The SELinux filetype specification. Use one of [a, f, d, c, b, + s, l, p]. See also `man semanage-fcontext`. Defaults to 'a' + (all files). + + sel_type + The SELinux context type. There are many. + + sel_user + The SELinux user. + + sel_level + The SELinux MLS range. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} new_state = {} @@ -433,7 +453,10 @@ def fcontext_policy_absent(name, filetype='a', sel_type=None, sel_user=None, sel def fcontext_policy_applied(name, recursive=False): ''' - Checks and makes sure the SELinux policies for a given filespec are applied. + .. versionadded:: 2017.7.0 + + Checks and makes sure the SELinux policies for a given filespec are + applied. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} From 4171d11838611dcf5c7d9950fd457a3171c14437 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Mon, 18 Sep 2017 14:50:35 +1000 Subject: [PATCH 078/348] utils.files.safe_filepath add support to override the os default directory separator Note this function is not currently in use, separate PR will trigger the use of this function --- salt/utils/files.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/salt/utils/files.py b/salt/utils/files.py index 8d463756d9b..2dce8926021 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -271,6 +271,8 @@ def safe_filename_leaf(file_basename): windows is \\ / : * ? " < > | posix is / .. versionadded:: 2017.7.2 + + :codeauthor: Damon Atkins ''' def _replace(re_obj): return urllib.quote(re_obj.group(0), safe=u'') @@ -283,16 +285,24 @@ def safe_filename_leaf(file_basename): return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) -def safe_filepath(file_path_name): +def safe_filepath(file_path_name, dir_sep=None): ''' Input the full path and filename, splits on directory separator and calls safe_filename_leaf for - each part of the path. + each part of the path. dir_sep allows coder to force a directory separate to a particular character .. versionadded:: 2017.7.2 + + :codeauthor: Damon Atkins ''' + if not dir_sep: + dir_sep = os.sep + # Normally if file_path_name or dir_sep is Unicode then the output will be Unicode + # This code ensure the output type is the same as file_path_name + if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type): + dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3 + # splitdrive only set drive on windows platform (drive, path) = os.path.splitdrive(file_path_name) - path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) + path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)]) if drive: - return os.sep.join([drive, path]) - else: - return path + path = dir_sep.join([drive, path]) + return path From 00e9637738ffbcb0e532bf67bc644a0125b3d3ef Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Mon, 18 Sep 2017 12:43:53 +0300 Subject: [PATCH 079/348] corrected lint errors --- salt/cloud/clouds/xen.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index dffff9aa4b2..558c7cacb61 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -157,7 +157,8 @@ def _get_session(): user, 'XXX-pw-redacted-XXX', originator)) - session.xenapi.login_with_password(user, password, api_version, originator) + session.xenapi.login_with_password( + user, password, api_version, originator) except XenAPI.Failure as ex: ''' if the server on the url is not the pool master, the pool master's @@ -172,7 +173,8 @@ def _get_session(): user, 'XXX-pw-redacted-XXX', originator)) - session.xenapi.login_with_password(user,password,api_version,originator) + session.xenapi.login_with_password( + user, password, api_version, originator) return session @@ -198,12 +200,12 @@ def list_nodes(): log.debug('VM {}, doesnt have base_template_name attribute'.format( record['name_label'])) ret[record['name_label']] = {'id': record['uuid'], - 'image': base_template_name, - 'name': record['name_label'], - 'size': record['memory_dynamic_max'], - 'state': record['power_state'], - 'private_ips': get_vm_ip(record['name_label'], session), - 'public_ips': None} + 'image': base_template_name, + 'name': record['name_label'], + 'size': record['memory_dynamic_max'], + 'state': record['power_state'], + 'private_ips': get_vm_ip(record['name_label'], session), + 'public_ips': None} return ret From 60e6958bd15c2287f656bb3b22be051b29993e39 Mon Sep 17 00:00:00 2001 From: Levi Dahl Michelsen Date: Mon, 18 Sep 2017 12:09:57 +0200 Subject: [PATCH 080/348] Added versionadded comment --- salt/pillar/rethinkdb_pillar.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index 0a4793205f6..309fcaf7ef3 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -2,6 +2,8 @@ ''' Provide external pillar data from RethinkDB +.. versionadded:: Oxygen + :depends: rethinkdb (on the salt-master) From 36429003945f7c2ead45aa223e9f4b21d2af574c Mon Sep 17 00:00:00 2001 From: Levi Dahl Michelsen Date: Mon, 18 Sep 2017 12:11:17 +0200 Subject: [PATCH 081/348] Fixed indentation mismatch in ext_pillar docstring --- salt/pillar/rethinkdb_pillar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index 309fcaf7ef3..8f600d809e0 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -84,7 +84,7 @@ def ext_pillar(minion_id, ''' Collect minion external pillars from a RethinkDB database -Arguments: + Arguments: * `table`: The RethinkDB table containing external pillar information. Defaults to ``'pillar'`` * `id_field`: Field in document containing the minion id. From df60501a80a3228dc5cda6e24e0ce188ea7de21a Mon Sep 17 00:00:00 2001 From: Levi Dahl Michelsen Date: Mon, 18 Sep 2017 12:16:18 +0200 Subject: [PATCH 082/348] Removed import shorthand name for rethinkdb module --- salt/pillar/rethinkdb_pillar.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index 8f600d809e0..bf7c8162218 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -49,7 +49,7 @@ import logging # Import 3rd party libraries try: - import rethinkdb as r + import rethinkdb HAS_RETHINKDB = True except ImportError: HAS_RETHINKDB = False @@ -105,11 +105,11 @@ def ext_pillar(minion_id, .format(host, port, username)) # Connect to the database - conn = r.connect(host=host, - port=port, - db=database, - user=username, - password=password) + conn = rethinkdb.connect(host=host, + port=port, + db=database, + user=username, + password=password) data = None @@ -121,10 +121,11 @@ def ext_pillar(minion_id, table, id_field, minion_id)) if field: - data = r.table(table).filter( + data = rethinkdb.table(table).filter( {id_field: minion_id}).pluck(field).run(conn) else: - data = r.table(table).filter({id_field: minion_id}).run(conn) + data = rethinkdb.table(table).filter( + {id_field: minion_id}).run(conn) else: log.debug('ext_pillar.rethinkdb: looking up pillar. ' @@ -132,9 +133,10 @@ def ext_pillar(minion_id, table, minion_id)) if field: - data = r.table(table).get(minion_id).pluck(field).run(conn) + data = rethinkdb.table(table).get(minion_id).pluck(field).run( + conn) else: - data = r.table(table).get(minion_id).run(conn) + data = rethinkdb.table(table).get(minion_id).run(conn) finally: if conn.is_open(): @@ -158,4 +160,4 @@ def ext_pillar(minion_id, else: # No document found in the database log.debug('ext_pillar.rethinkdb: no document found') - return {} + return {} \ No newline at end of file From fc269b06843af997588adc100f114dc41f7d38b8 Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Mon, 18 Sep 2017 15:12:11 +0200 Subject: [PATCH 083/348] Add missing devices to RAID array Implements #40100 --- salt/modules/mdadm.py | 36 +++++++++++++ salt/states/mdadm.py | 119 +++++++++++++++++++++++++++++++----------- 2 files changed, 124 insertions(+), 31 deletions(-) diff --git a/salt/modules/mdadm.py b/salt/modules/mdadm.py index 0b453a26898..334cd46e731 100644 --- a/salt/modules/mdadm.py +++ b/salt/modules/mdadm.py @@ -356,3 +356,39 @@ def assemble(name, return cmd elif test_mode is False: return __salt__['cmd.run'](cmd, python_shell=False) + +def examine(device): + ''' + Show detail for a specified RAID component device + + CLI Example: + + .. code-block:: bash + + salt '*' raid.examine '/dev/sda1' + ''' + res = __salt__['cmd.run_stdout']('mdadm -Y -E {0}'.format(device), output_loglevel='trace', python_shell=False) + ret = {} + + for line in res.splitlines(): + name, var = line.partition("=")[::2] + ret[name] = var + return ret + + +def add(name, device): + ''' + Add new device to RAID array. + + CLI Example: + + .. code-block:: bash + + salt '*' raid.add /dev/md0 /dev/sda1 + + ''' + + cmd = 'mdadm --manage {0} --add {1}'.format(name, device) + if __salt__['cmd.retcode'](cmd) == 0: + return True + return False diff --git a/salt/states/mdadm.py b/salt/states/mdadm.py index 2b1c8340877..067c5c4c2f5 100644 --- a/salt/states/mdadm.py +++ b/salt/states/mdadm.py @@ -88,69 +88,126 @@ def present(name, # Device exists raids = __salt__['raid.list']() - if raids.get(name): - ret['comment'] = 'Raid {0} already present'.format(name) - return ret + present = raids.get(name) # Decide whether to create or assemble - can_assemble = {} - for dev in devices: - # mdadm -E exits with 0 iff all devices given are part of an array - cmd = 'mdadm -E {0}'.format(dev) - can_assemble[dev] = __salt__['cmd.retcode'](cmd) == 0 + missing = [] + uuid_dict = {} + new_devices = [] - if True in six.itervalues(can_assemble) and False in six.itervalues(can_assemble): - in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if x[1]]) - not_in_raid = sorted([x[0] for x in six.iteritems(can_assemble) if not x[1]]) - ret['comment'] = 'Devices are a mix of RAID constituents ({0}) and '\ - 'non-RAID-constituents({1}).'.format(in_raid, not_in_raid) + for dev in devices: + if dev == 'missing' or not __salt__['file.access'](dev, 'f'): + missing.append(dev) + continue + superblock = __salt__['raid.examine'](dev) + + if 'MD_UUID' in superblock: + uuid = superblock['MD_UUID'] + if uuid not in uuid_dict: + uuid_dict[uuid] = [] + uuid_dict[uuid].append(dev) + else: + new_devices.append(dev) + + if len(uuid_dict) > 1: + ret['comment'] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.'.format(uuid_dict.keys()) ret['result'] = False return ret - elif next(six.itervalues(can_assemble)): + elif len(uuid_dict) == 1: + uuid = uuid_dict.keys()[0] + if present and present['uuid'] != uuid: + ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid']) + ret['result'] = False + return ret + + devices_with_superblock = uuid_dict[uuid] + else: + devices_with_superblock = [] + + if present: + do_assemble = False + do_create = False + elif len(devices_with_superblock) > 0: do_assemble = True + do_create = False verb = 'assembled' else: + if len(new_devices) == 0: + ret['comment'] = 'All devices are missing: {0}.'.format(missing) + ret['result'] = False + return ret do_assemble = False + do_create = True verb = 'created' # If running with test use the test_mode with create or assemble if __opts__['test']: if do_assemble: res = __salt__['raid.assemble'](name, - devices, + devices_with_superblock, test_mode=True, **kwargs) - else: + elif do_create: res = __salt__['raid.create'](name, level, - devices, + new_devices + ['missing'] * len(missing), test_mode=True, **kwargs) - ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res) - ret['result'] = None + + if present: + ret['comment'] = 'Raid {0} already present.'.format(name) + + if do_assemble or do_create: + ret['comment'] = 'Raid will be {0} with: {1}'.format(verb, res) + ret['result'] = None + + if (do_assemble or present) and len(new_devices) > 0: + ret['comment'] += ' New devices will be added: {0}'.format(new_devices) + ret['result'] = None + + if len(missing) > 0: + ret['comment'] += ' Missing devices: {0}'.format(missing) + return ret # Attempt to create or assemble the array if do_assemble: __salt__['raid.assemble'](name, - devices, + devices_with_superblock, **kwargs) - else: + elif do_create: __salt__['raid.create'](name, level, - devices, + new_devices + ['missing'] * len(missing), **kwargs) - raids = __salt__['raid.list']() - changes = raids.get(name) - if changes: - ret['comment'] = 'Raid {0} {1}.'.format(name, verb) - ret['changes'] = changes - # Saving config - __salt__['raid.save_config']() + if not present: + raids = __salt__['raid.list']() + changes = raids.get(name) + if changes: + ret['comment'] = 'Raid {0} {1}.'.format(name, verb) + ret['changes'] = changes + # Saving config + __salt__['raid.save_config']() + else: + ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb) + ret['result'] = False else: - ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb) - ret['result'] = False + ret['comment'] = 'Raid {0} already present.'.format(name) + + if (do_assemble or present) and len(new_devices) > 0: + for d in new_devices: + res = __salt__['raid.add'](name, d) + if not res: + ret['comment'] += ' Unable to add {0} to {1}.\n'.format(d, name) + ret['result'] = False + else: + ret['comment'] += ' Added new device {0} to {1}.\n'.format(d, name) + if ret['result']: + ret['changes']['added'] = new_devices + + if len(missing) > 0: + ret['comment'] += ' Missing devices: {0}'.format(missing) return ret From 21966e7ce82d9c1f565ea5fb76320202e74d64d0 Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Mon, 18 Sep 2017 16:34:59 +0300 Subject: [PATCH 084/348] cloud.action: list_nodes_min returns all instances --- salt/cloud/clouds/ec2.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index f47d2d93c30..a6382a08601 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -3472,16 +3472,15 @@ def list_nodes_min(location=None, call=None): for instance in instances: if isinstance(instance['instancesSet']['item'], list): - for item in instance['instancesSet']['item']: - state = item['instanceState']['name'] - name = _extract_name_tag(item) - id = item['instanceId'] + items = instance['instancesSet']['item'] else: - item = instance['instancesSet']['item'] + items = [instance['instancesSet']['item']] + + for item in items: state = item['instanceState']['name'] name = _extract_name_tag(item) id = item['instanceId'] - ret[name] = {'state': state, 'id': id} + ret[name] = {'state': state, 'id': id} return ret From fb579321a912ee3615284d77f37c61a60e3a1338 Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Fri, 15 Sep 2017 10:00:17 -0500 Subject: [PATCH 085/348] Add back lost logic for multifunc_ordered PR #38168 was merged but some of the merged logic was subseqently lost. Add back the lost logic so that the feature may work again. Signed-off-by: Sergey Kizunov --- salt/minion.py | 50 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index 6b7c82a8d7f..c56010bad98 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1600,13 +1600,24 @@ class Minion(MinionBase): minion side execution. ''' salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) - ret = { - 'return': {}, - 'retcode': {}, - 'success': {} - } - for ind in range(0, len(data['fun'])): - ret['success'][data['fun'][ind]] = False + multifunc_ordered = opts.get('multifunc_ordered', False) + num_funcs = len(data['fun']) + if multifunc_ordered: + ret = { + 'return': [None] * num_funcs, + 'retcode': [None] * num_funcs, + 'success': [False] * num_funcs + } + else: + ret = { + 'return': {}, + 'retcode': {}, + 'success': {} + } + + for ind in range(0, num_funcs): + if not multifunc_ordered: + ret['success'][data['fun'][ind]] = False try: if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): # this minion is blacked out. Only allow saltutil.refresh_pillar @@ -1621,12 +1632,20 @@ class Minion(MinionBase): data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 - ret['return'][data['fun'][ind]] = func(*args, **kwargs) - ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get( - 'retcode', - 0 - ) - ret['success'][data['fun'][ind]] = True + if multifunc_ordered: + ret['return'][ind] = func(*args, **kwargs) + ret['retcode'][ind] = minion_instance.functions.pack['__context__'].get( + 'retcode', + 0 + ) + ret['success'][ind] = True + else: + ret['return'][data['fun'][ind]] = func(*args, **kwargs) + ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get( + 'retcode', + 0 + ) + ret['success'][data['fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() log.warning( @@ -1634,7 +1653,10 @@ class Minion(MinionBase): exc ) ) - ret['return'][data['fun'][ind]] = trb + if multifunc_ordered: + ret['return'][ind] = trb + else: + ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] From 9fe32f8b6e6e6075346bf4754bea17400ea8ef42 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Mon, 28 Aug 2017 12:46:26 +0300 Subject: [PATCH 086/348] Regex support for user names in external_auth config. --- salt/auth/__init__.py | 41 ++++------------------------------------- salt/config/__init__.py | 5 +++++ salt/utils/minions.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 37 deletions(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 73e4c98f8ae..b24bbd49263 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -377,46 +377,13 @@ class LoadAuth(object): eauth_config = self.opts['external_auth'][eauth] if not groups: groups = [] - group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups - - # First we need to know if the user is allowed to proceed via any of their group memberships. - group_auth_match = False - for group_config in group_perm_keys: - if group_config.rstrip('%') in groups: - group_auth_match = True - break - # If a group_auth_match is set it means only that we have a - # user which matches at least one or more of the groups defined - # in the configuration file. - - external_auth_in_db = False - for entry in eauth_config: - if entry.startswith('^'): - external_auth_in_db = True - break - - # If neither a catchall, a named membership or a group - # membership is found, there is no need to continue. Simply - # deny the user access. - if not ((name in eauth_config) | - ('*' in eauth_config) | - group_auth_match | external_auth_in_db): - # Auth successful, but no matching user found in config - log.warning('Authorization failure occurred.') - return None # We now have an authenticated session and it is time to determine # what the user has access to. - auth_list = [] - if name in eauth_config: - auth_list = eauth_config[name] - elif '*' in eauth_config: - auth_list = eauth_config['*'] - if group_auth_match: - auth_list = self.ckminions.fill_auth_list_from_groups( - eauth_config, - groups, - auth_list) + auth_list = self.ckminions.fill_auth_list( + eauth_config, + name, + groups) auth_list = self.__process_acl(load, auth_list) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index e4982744cda..c558768d1dd 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -717,6 +717,10 @@ VALID_OPTS = { 'fileserver_limit_traversal': bool, 'fileserver_verify_config': bool, + # Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is + # applied only if the user didn't matched by other matchers. + 'permissive_acl': bool, + # Optionally enables keeping the calculated user's auth list in the token file. 'keep_acl_in_token': bool, @@ -1466,6 +1470,7 @@ DEFAULT_MASTER_OPTS = { 'external_auth': {}, 'token_expire': 43200, 'token_expire_user_override': False, + 'permissive_acl': False, 'keep_acl_in_token': False, 'eauth_acl_module': '', 'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'), diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 8afa41698c5..f84ad50e1d8 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -985,10 +985,37 @@ class CkMinions(object): auth_list.append(matcher) return auth_list + def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None): + ''' + Returns a list of authorisation matchers that a user is eligible for. + This list is a combination of the provided personal matchers plus the + matchers of any group the user is in. + ''' + if auth_list is None: + auth_list = [] + if permissive is None: + permissive = self.opts.get('permissive_acl') + name_matched = False + for match in auth_provider: + if match == '*' and not permissive: + continue + if match.endswith('%'): + if match.rstrip('%') in groups: + auth_list.extend(auth_provider[match]) + else: + if salt.utils.expr_match(match, name): + name_matched = True + auth_list.extend(auth_provider[match]) + if not permissive and not name_matched and '*' in auth_provider: + auth_list.extend(auth_provider['*']) + return auth_list + def wheel_check(self, auth_list, fun): ''' Check special API permissions ''' + if not auth_list: + return False comps = fun.split('.') if len(comps) != 2: return False @@ -1020,6 +1047,8 @@ class CkMinions(object): ''' Check special API permissions ''' + if not auth_list: + return False comps = fun.split('.') if len(comps) != 2: return False @@ -1051,6 +1080,8 @@ class CkMinions(object): ''' Check special API permissions ''' + if not auth_list: + return False if form != 'cloud': comps = fun.split('.') if len(comps) != 2: From 14bf2dd8fff191fbbb73c7a3e5b9b570de250385 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 12 Sep 2017 23:10:06 +0300 Subject: [PATCH 087/348] Support regex in publisher_acl. --- doc/ref/publisheracl.rst | 3 +++ salt/daemons/masterapi.py | 31 +++++++++++++++++++------------ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/doc/ref/publisheracl.rst b/doc/ref/publisheracl.rst index eda868b5d28..5549c3c92a8 100644 --- a/doc/ref/publisheracl.rst +++ b/doc/ref/publisheracl.rst @@ -25,6 +25,9 @@ configuration: - web*: - test.* - pkg.* + # Allow managers to use saltutil module functions + manager_.*: + - saltutil.* Permission Issues ----------------- diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index d47a5c3aa64..f501f41938d 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -204,6 +204,14 @@ def clean_old_jobs(opts): def mk_key(opts, user): + if HAS_PWD: + uid = None + try: + uid = pwd.getpwnam(user).pw_uid + except KeyError: + # User doesn't exist in the system + if opts['client_acl_verify']: + return None if salt.utils.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. @@ -231,9 +239,9 @@ def mk_key(opts, user): # Write access is necessary since on subsequent runs, if the file # exists, it needs to be written to again. Windows enforces this. os.chmod(keyfile, 0o600) - if HAS_PWD: + if HAS_PWD and uid is not None: try: - os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) + os.chown(keyfile, uid, -1) except OSError: # The master is not being run as root and can therefore not # chown the key file @@ -248,27 +256,26 @@ def access_keys(opts): ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. - users = [] keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) + for user in acl_users: + log.info('Preparing the %s key for local communication', user) + key = mk_key(opts, user) + if key is not None: + keys[user] = key + + # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function') for user in pwd.getpwall(): - users.append(user.pw_name) - log.profile('End pwd.getpwall() call in masterarpi access_keys function') - for user in acl_users: - log.info('Preparing the %s key for local communication', user) - keys[user] = mk_key(opts, user) - - # Check other users matching ACL patterns - if HAS_PWD: - for user in users: + user = user.pw_name if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) + log.profile('End pwd.getpwall() call in masterarpi access_keys function') return keys From b1b4dafd396c8a92c83f6e3219b4cf05bab38b2a Mon Sep 17 00:00:00 2001 From: Andrew Colin Kissa Date: Mon, 18 Sep 2017 18:05:55 +0200 Subject: [PATCH 088/348] Fix CSR not recreated if key changes --- salt/modules/x509.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/modules/x509.py b/salt/modules/x509.py index ae5f8c7723b..b63188dd7d3 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -625,6 +625,8 @@ def read_csr(csr): # Get size returns in bytes. The world thinks of key sizes in bits. 'Subject': _parse_subject(csr.get_subject()), 'Subject Hash': _dec2hex(csr.get_subject().as_hash()), + 'Public Key Hash': hashlib.sha1(csr.get_pubkey().get_modulus())\ + .hexdigest() } ret['X509v3 Extensions'] = _get_csr_extensions(csr) From 117a0ddbbc11ff35dfce7e48f3519e82943b7865 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 18 Sep 2017 11:09:36 -0700 Subject: [PATCH 089/348] Updating the documentation to call out the requirement for the getfacl and setfacl binaries --- salt/modules/linux_acl.py | 3 +++ salt/states/linux_acl.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/salt/modules/linux_acl.py b/salt/modules/linux_acl.py index a7fa3cbd1cb..5969b24ea99 100644 --- a/salt/modules/linux_acl.py +++ b/salt/modules/linux_acl.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- ''' Support for Linux File Access Control Lists + +The Linux ACL module requires the `getfacl` and `setfacl` binaries. + ''' from __future__ import absolute_import diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index a6a54a7fcdc..4e3c7049b96 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -2,6 +2,8 @@ ''' Linux File Access Control Lists +The Linux ACL state module requires the `getfacl` and `setfacl` binaries. + Ensure a Linux ACL is present .. code-block:: yaml From 1a619708c1ab5f1c7383dee12bc8274c302fb8ad Mon Sep 17 00:00:00 2001 From: Mike Place Date: Mon, 18 Sep 2017 13:44:44 -0600 Subject: [PATCH 090/348] Enhance engines docs Add a note about formatting to make it more clear. --- doc/topics/engines/index.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/topics/engines/index.rst b/doc/topics/engines/index.rst index 5fbc99a0b93..c9a8ef52352 100644 --- a/doc/topics/engines/index.rst +++ b/doc/topics/engines/index.rst @@ -27,7 +27,12 @@ Salt engines are configured under an ``engines`` top-level section in your Salt port: 5959 proto: tcp -Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. +Salt engines must be in the Salt path, or you can add the ``engines_dirs`` option in your Salt master configuration with a list of directories under which Salt attempts to find Salt engines. This option should be formatted as a list of directories to search, such as: + +.. code-block:: yaml + + engines_dirs: + - /home/bob/engines Writing an Engine ================= From 4afb179bade9834e8d9980a66a978209e439d3a3 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 28 Aug 2017 19:33:06 -0500 Subject: [PATCH 091/348] Un-deprecate passing kwargs outside of 'kwarg' param --- salt/client/mixins.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index f5a29a9cbf3..bd69d269bf9 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -359,29 +359,20 @@ class SyncClientMixin(object): # packed into the top level object. The plan is to move away from # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if - # there are no kwargs in the low object passed in - f_call = None - if 'arg' not in low: - f_call = salt.utils.format_call( + # there are no kwargs in the low object passed in. + f_call = {} if 'arg' in low and 'kwarg' in low \ + else salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - args = f_call.get('args', ()) - else: - args = low['arg'] - if 'kwarg' not in low: - log.critical( - 'kwargs must be passed inside the low data within the ' - '\'kwarg\' key. See usage of ' - 'salt.utils.args.parse_input() and ' - 'salt.minion.load_args_and_kwargs() elsewhere in the ' - 'codebase.' - ) - kwargs = {} - else: - kwargs = low['kwarg'] + args = f_call.get('args', ()) \ + if 'arg' not in low \ + else low['arg'] + kwargs = f_call.get('kwargs', {}) \ + if 'kwarg' not in low \ + else low['kwarg'] # Update the event data with loaded args and kwargs data['fun_args'] = list(args) + ([kwargs] if kwargs else []) From 2a35ab7f39846f695b27a347dc189b7cd2307112 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 31 Aug 2017 00:24:11 -0500 Subject: [PATCH 092/348] Unify reactor configuration, fix caller reactors There are 4 types of reactor jobs, and 3 different config schemas for passing arguments: 1. local - positional and keyword args passed in arg/kwarg params, respectively. 2. runner/wheel - passed as individual params directly under the function name. 3. caller - only positional args supported, passed under an "args" param. In addition to being wildly inconsistent, there are several problems with each of the above approaches: - For local jobs, having to know which are positional and keyword arguments is not user-friendly. - For runner/wheel jobs, the fact that the arguments are all passed in the level directly below the function name means that they are dumped directly into the low chunk. This means that if any arguments are passed which conflict with the reserved keywords in the low chunk (name, order, etc.), they will override their counterparts in the low chunk, which may make the Reactor behave unpredictably. To solve these issues, this commit makes the following changes: 1. A new, unified configuration schema has been added, so that arguments are passed identically across all types of reactions. In this new schema, all arguments are passed as named arguments underneath an "args" parameter. Those named arguments are then passed as keyword arguments to the desired function. This works even for positional arguments because Python will automagically pass a keyword argument as its positional counterpart when the name of a positional argument is found in the kwargs. 2. The caller jobs now support both positional and keyword arguments. Backward-compatibility with the old configuration schema has been preserved, so old Reactor SLS files do not break. In addition, you've probably already said to yourself "Hey, caller jobs were _already_ passing their arguments under an "args" param. What gives?" Well, using the old config schema, only positional arguments were supported. So if we detect a list of positional arguments, we treat the input as positional arguments (i.e. old schema), while if the input is a dictionary (or "dictlist"), we treat the input as kwargs (i.e. new schema). --- salt/utils/reactor.py | 231 +++++++++++++++++++++++++++++------------- 1 file changed, 159 insertions(+), 72 deletions(-) diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py index 57c4fd0863d..36971f5c36c 100644 --- a/salt/utils/reactor.py +++ b/salt/utils/reactor.py @@ -7,12 +7,14 @@ import glob import logging # Import salt libs +import salt.client import salt.runner import salt.state import salt.utils import salt.utils.cache import salt.utils.event import salt.utils.process +import salt.wheel import salt.defaults.exitcodes # Import 3rd-party libs @@ -21,6 +23,15 @@ import salt.ext.six as six log = logging.getLogger(__name__) +REACTOR_INTERNAL_KEYWORDS = frozenset([ + '__id__', + '__sls__', + 'name', + 'order', + 'fun', + 'state', +]) + class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.state.Compiler): ''' @@ -29,6 +40,10 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat The reactor has the capability to execute pre-programmed executions as reactions to events ''' + aliases = { + 'cmd': 'local', + } + def __init__(self, opts, log_queue=None): super(Reactor, self).__init__(log_queue=log_queue) local_minion_opts = opts.copy() @@ -171,6 +186,16 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat return {'status': False, 'comment': 'Reactor does not exists.'} + def resolve_aliases(self, chunks): + ''' + Preserve backward compatibility by rewriting the 'state' key in the low + chunks if it is using a legacy type. + ''' + for idx, _ in enumerate(chunks): + new_state = self.aliases.get(chunks[idx]['state']) + if new_state is not None: + chunks[idx]['state'] = new_state + def reactions(self, tag, data, reactors): ''' Render a list of reactor files and returns a reaction struct @@ -191,6 +216,7 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat except Exception as exc: log.error('Exception trying to compile reactions: {0}'.format(exc), exc_info=True) + self.resolve_aliases(chunks) return chunks def call_reactions(self, chunks): @@ -248,12 +274,19 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat class ReactWrap(object): ''' - Create a wrapper that executes low data for the reaction system + Wrapper that executes low data for the Reactor System ''' # class-wide cache of clients client_cache = None event_user = 'Reactor' + reaction_class = { + 'local': salt.client.LocalClient, + 'runner': salt.runner.RunnerClient, + 'wheel': salt.wheel.Wheel, + 'caller': salt.client.Caller, + } + def __init__(self, opts): self.opts = opts if ReactWrap.client_cache is None: @@ -264,21 +297,49 @@ class ReactWrap(object): queue_size=self.opts['reactor_worker_hwm'] # queue size for those workers ) + def populate_client_cache(self, low): + ''' + Populate the client cache with an instance of the specified type + ''' + reaction_type = low['state'] + if reaction_type not in self.client_cache: + log.debug('Reactor is populating %s client cache', reaction_type) + if reaction_type in ('runner', 'wheel'): + # Reaction types that run locally on the master want the full + # opts passed. + self.client_cache[reaction_type] = \ + self.reaction_class[reaction_type](self.opts) + # The len() function will cause the module functions to load if + # they aren't already loaded. We want to load them so that the + # spawned threads don't need to load them. Loading in the + # spawned threads creates race conditions such as sometimes not + # finding the required function because another thread is in + # the middle of loading the functions. + len(self.client_cache[reaction_type].functions) + else: + # Reactions which use remote pubs only need the conf file when + # instantiating a client instance. + self.client_cache[reaction_type] = \ + self.reaction_class[reaction_type](self.opts['conf_file']) + def run(self, low): ''' - Execute the specified function in the specified state by passing the - low data + Execute a reaction by invoking the proper wrapper func ''' - l_fun = getattr(self, low['state']) + self.populate_client_cache(low) try: - f_call = salt.utils.format_call(l_fun, low) - kwargs = f_call.get('kwargs', {}) - if 'arg' not in kwargs: - kwargs['arg'] = [] - if 'kwarg' not in kwargs: - kwargs['kwarg'] = {} + l_fun = getattr(self, low['state']) + except AttributeError: + log.error( + 'ReactWrap is missing a wrapper function for \'%s\'', + low['state'] + ) - # TODO: Setting the user doesn't seem to work for actual remote publishes + try: + wrap_call = salt.utils.format_call(l_fun, low) + args = wrap_call.get('args', ()) + kwargs = wrap_call.get('kwargs', {}) + # TODO: Setting user doesn't seem to work for actual remote pubs if low['state'] in ('runner', 'wheel'): # Update called function's low data with event user to # segregate events fired by reactor and avoid reaction loops @@ -286,80 +347,106 @@ class ReactWrap(object): # Replace ``state`` kwarg which comes from high data compiler. # It breaks some runner functions and seems unnecessary. kwargs['__state__'] = kwargs.pop('state') + # NOTE: if any additional keys are added here, they will also + # need to be added to filter_kwargs() - l_fun(*f_call.get('args', ()), **kwargs) + if 'args' in kwargs: + # New configuration + reactor_args = kwargs.pop('args') + for item in ('arg', 'kwarg'): + if item in low: + log.warning( + 'Reactor \'%s\' is ignoring \'%s\' param %s due to ' + 'presence of \'args\' param. Check the Reactor System ' + 'documentation for the correct argument format.', + low['__id__'], item, low[item] + ) + if low['state'] == 'caller' \ + and isinstance(reactor_args, list) \ + and not salt.utils.is_dictlist(reactor_args): + # Legacy 'caller' reactors were already using the 'args' + # param, but only supported a list of positional arguments. + # If low['args'] is a list but is *not* a dictlist, then + # this is actually using the legacy configuration. So, put + # the reactor args into kwarg['arg'] so that the wrapper + # interprets them as positional args. + kwargs['arg'] = reactor_args + kwargs['kwarg'] = {} + else: + kwargs['arg'] = () + kwargs['kwarg'] = reactor_args + if not isinstance(kwargs['kwarg'], dict): + kwargs['kwarg'] = salt.utils.repack_dictlist(kwargs['kwarg']) + if not kwargs['kwarg']: + log.error( + 'Reactor \'%s\' failed to execute %s \'%s\': ' + 'Incorrect argument format, check the Reactor System ' + 'documentation for the correct format.', + low['__id__'], low['state'], low['fun'] + ) + return + else: + # Legacy configuration + react_call = {} + if low['state'] in ('runner', 'wheel'): + if 'arg' not in kwargs or 'kwarg' not in kwargs: + # Runner/wheel execute on the master, so we can use + # format_call to get the functions args/kwargs + react_fun = self.client_cache[low['state']].functions.get(low['fun']) + if react_fun is None: + log.error( + 'Reactor \'%s\' failed to execute %s \'%s\': ' + 'function not available', + low['__id__'], low['state'], low['fun'] + ) + return + + react_call = salt.utils.format_call( + react_fun, + low, + expected_extra_kws=REACTOR_INTERNAL_KEYWORDS + ) + + if 'arg' not in kwargs: + kwargs['arg'] = react_call.get('args', ()) + if 'kwarg' not in kwargs: + kwargs['kwarg'] = react_call.get('kwargs', {}) + + # Execute the wrapper with the proper args/kwargs. kwargs['arg'] + # and kwargs['kwarg'] contain the positional and keyword arguments + # that will be passed to the client interface to execute the + # desired runner/wheel/remote-exec/etc. function. + l_fun(*args, **kwargs) + except SystemExit: + log.warning( + 'Reactor \'%s\' attempted to exit. Ignored.', low['__id__'] + ) except Exception: log.error( - 'Failed to execute {0}: {1}\n'.format(low['state'], l_fun), - exc_info=True - ) - - def local(self, *args, **kwargs): - ''' - Wrap LocalClient for running :ref:`execution modules ` - ''' - if 'local' not in self.client_cache: - self.client_cache['local'] = salt.client.LocalClient(self.opts['conf_file']) - try: - self.client_cache['local'].cmd_async(*args, **kwargs) - except SystemExit: - log.warning('Attempt to exit reactor. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) - - cmd = local + 'Reactor \'%s\' failed to execute %s \'%s\'', + low['__id__'], low['state'], low['fun'], exc_info=True + ) def runner(self, fun, **kwargs): ''' Wrap RunnerClient for executing :ref:`runner modules ` ''' - if 'runner' not in self.client_cache: - self.client_cache['runner'] = salt.runner.RunnerClient(self.opts) - # The len() function will cause the module functions to load if - # they aren't already loaded. We want to load them so that the - # spawned threads don't need to load them. Loading in the spawned - # threads creates race conditions such as sometimes not finding - # the required function because another thread is in the middle - # of loading the functions. - len(self.client_cache['runner'].functions) - try: - self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) - except SystemExit: - log.warning('Attempt to exit in reactor by runner. Ignored') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs)) def wheel(self, fun, **kwargs): ''' Wrap Wheel to enable executing :ref:`wheel modules ` ''' - if 'wheel' not in self.client_cache: - self.client_cache['wheel'] = salt.wheel.Wheel(self.opts) - # The len() function will cause the module functions to load if - # they aren't already loaded. We want to load them so that the - # spawned threads don't need to load them. Loading in the spawned - # threads creates race conditions such as sometimes not finding - # the required function because another thread is in the middle - # of loading the functions. - len(self.client_cache['wheel'].functions) - try: - self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) - except SystemExit: - log.warning('Attempt to in reactor by whell. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.pool.fire_async(self.client_cache['wheel'].low, args=(fun, kwargs)) - def caller(self, fun, *args, **kwargs): + def local(self, fun, tgt, **kwargs): ''' - Wrap Caller to enable executing :ref:`caller modules ` + Wrap LocalClient for running :ref:`execution modules ` ''' - log.debug("in caller with fun {0} args {1} kwargs {2}".format(fun, args, kwargs)) - args = kwargs.get('args', []) - if 'caller' not in self.client_cache: - self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file']) - try: - self.client_cache['caller'].function(fun, *args) - except SystemExit: - log.warning('Attempt to exit reactor. Ignored.') - except Exception as exc: - log.warning('Exception caught by reactor: {0}'.format(exc)) + self.client_cache['local'].cmd_async(tgt, fun, **kwargs) + + def caller(self, fun, **kwargs): + ''' + Wrap LocalCaller to execute remote exec functions locally on the Minion + ''' + self.client_cache['caller'].cmd(fun, *kwargs['arg'], **kwargs['kwarg']) From 531cac610e1d38d266f675dd92e930b0fc267ce0 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 31 Aug 2017 23:23:41 -0500 Subject: [PATCH 093/348] Rewrite the reactor unit tests These have been skipped for a while now because they didn't work correctly. The old tests have been scrapped in favor of new ones that test both the old and new config schema. --- tests/unit/utils/test_reactor.py | 602 ++++++++++++++++++++++++++++--- 1 file changed, 542 insertions(+), 60 deletions(-) diff --git a/tests/unit/utils/test_reactor.py b/tests/unit/utils/test_reactor.py index 7a969009771..5c86f766b4f 100644 --- a/tests/unit/utils/test_reactor.py +++ b/tests/unit/utils/test_reactor.py @@ -1,74 +1,556 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import -import time -import shutil -import tempfile +import codecs +import glob +import logging import os - -from contextlib import contextmanager +import textwrap +import yaml import salt.utils -from salt.utils.process import clean_proc +import salt.loader import salt.utils.reactor as reactor -from tests.integration import AdaptedConfigurationTestCaseMixin -from tests.support.paths import TMP from tests.support.unit import TestCase, skipIf -from tests.support.mock import patch, MagicMock +from tests.support.mixins import AdaptedConfigurationTestCaseMixin +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON, + patch, + MagicMock, + Mock, + mock_open, +) + +REACTOR_CONFIG = '''\ +reactor: + - old_runner: + - /srv/reactor/old_runner.sls + - old_wheel: + - /srv/reactor/old_wheel.sls + - old_local: + - /srv/reactor/old_local.sls + - old_cmd: + - /srv/reactor/old_cmd.sls + - old_caller: + - /srv/reactor/old_caller.sls + - new_runner: + - /srv/reactor/new_runner.sls + - new_wheel: + - /srv/reactor/new_wheel.sls + - new_local: + - /srv/reactor/new_local.sls + - new_cmd: + - /srv/reactor/new_cmd.sls + - new_caller: + - /srv/reactor/new_caller.sls +''' + +REACTOR_DATA = { + 'runner': {'data': {'message': 'This is an error'}}, + 'wheel': {'data': {'id': 'foo'}}, + 'local': {'data': {'pkg': 'zsh', 'repo': 'updates'}}, + 'cmd': {'data': {'pkg': 'zsh', 'repo': 'updates'}}, + 'caller': {'data': {'path': '/tmp/foo'}}, +} + +SLS = { + '/srv/reactor/old_runner.sls': textwrap.dedent('''\ + raise_error: + runner.error.error: + - name: Exception + - message: {{ data['data']['message'] }} + '''), + '/srv/reactor/old_wheel.sls': textwrap.dedent('''\ + remove_key: + wheel.key.delete: + - match: {{ data['data']['id'] }} + '''), + '/srv/reactor/old_local.sls': textwrap.dedent('''\ + install_zsh: + local.state.single: + - tgt: test + - arg: + - pkg.installed + - {{ data['data']['pkg'] }} + - kwarg: + fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/old_cmd.sls': textwrap.dedent('''\ + install_zsh: + cmd.state.single: + - tgt: test + - arg: + - pkg.installed + - {{ data['data']['pkg'] }} + - kwarg: + fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/old_caller.sls': textwrap.dedent('''\ + touch_file: + caller.file.touch: + - args: + - {{ data['data']['path'] }} + '''), + '/srv/reactor/new_runner.sls': textwrap.dedent('''\ + raise_error: + runner.error.error: + - args: + - name: Exception + - message: {{ data['data']['message'] }} + '''), + '/srv/reactor/new_wheel.sls': textwrap.dedent('''\ + remove_key: + wheel.key.delete: + - args: + - match: {{ data['data']['id'] }} + '''), + '/srv/reactor/new_local.sls': textwrap.dedent('''\ + install_zsh: + local.state.single: + - tgt: test + - args: + - fun: pkg.installed + - name: {{ data['data']['pkg'] }} + - fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/new_cmd.sls': textwrap.dedent('''\ + install_zsh: + cmd.state.single: + - tgt: test + - args: + - fun: pkg.installed + - name: {{ data['data']['pkg'] }} + - fromrepo: {{ data['data']['repo'] }} + '''), + '/srv/reactor/new_caller.sls': textwrap.dedent('''\ + touch_file: + caller.file.touch: + - args: + - name: {{ data['data']['path'] }} + '''), +} + +LOW_CHUNKS = { + # Note that the "name" value in the chunk has been overwritten by the + # "name" argument in the SLS. This is one reason why the new schema was + # needed. + 'old_runner': [{ + 'state': 'runner', + '__id__': 'raise_error', + '__sls__': '/srv/reactor/old_runner.sls', + 'order': 1, + 'fun': 'error.error', + 'name': 'Exception', + 'message': 'This is an error', + }], + 'old_wheel': [{ + 'state': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/old_wheel.sls', + 'order': 1, + 'fun': 'key.delete', + 'match': 'foo', + }], + 'old_local': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_local.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }], + 'old_cmd': [{ + 'state': 'local', # 'cmd' should be aliased to 'local' + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_cmd.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }], + 'old_caller': [{ + 'state': 'caller', + '__id__': 'touch_file', + 'name': 'touch_file', + '__sls__': '/srv/reactor/old_caller.sls', + 'order': 1, + 'fun': 'file.touch', + 'args': ['/tmp/foo'], + }], + 'new_runner': [{ + 'state': 'runner', + '__id__': 'raise_error', + 'name': 'raise_error', + '__sls__': '/srv/reactor/new_runner.sls', + 'order': 1, + 'fun': 'error.error', + 'args': [ + {'name': 'Exception'}, + {'message': 'This is an error'}, + ], + }], + 'new_wheel': [{ + 'state': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/new_wheel.sls', + 'order': 1, + 'fun': 'key.delete', + 'args': [ + {'match': 'foo'}, + ], + }], + 'new_local': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_local.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'args': [ + {'fun': 'pkg.installed'}, + {'name': 'zsh'}, + {'fromrepo': 'updates'}, + ], + }], + 'new_cmd': [{ + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_cmd.sls', + 'order': 1, + 'tgt': 'test', + 'fun': 'state.single', + 'args': [ + {'fun': 'pkg.installed'}, + {'name': 'zsh'}, + {'fromrepo': 'updates'}, + ], + }], + 'new_caller': [{ + 'state': 'caller', + '__id__': 'touch_file', + 'name': 'touch_file', + '__sls__': '/srv/reactor/new_caller.sls', + 'order': 1, + 'fun': 'file.touch', + 'args': [ + {'name': '/tmp/foo'}, + ], + }], +} + +WRAPPER_CALLS = { + 'old_runner': ( + 'error.error', + { + '__state__': 'runner', + '__id__': 'raise_error', + '__sls__': '/srv/reactor/old_runner.sls', + '__user__': 'Reactor', + 'order': 1, + 'arg': [], + 'kwarg': { + 'name': 'Exception', + 'message': 'This is an error', + }, + 'name': 'Exception', + 'message': 'This is an error', + }, + ), + 'old_wheel': ( + 'key.delete', + { + '__state__': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/old_wheel.sls', + 'order': 1, + '__user__': 'Reactor', + 'arg': ['foo'], + 'kwarg': {}, + 'match': 'foo', + }, + ), + 'old_local': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_local.sls', + 'order': 1, + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }, + }, + 'old_cmd': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/old_cmd.sls', + 'order': 1, + 'arg': ['pkg.installed', 'zsh'], + 'kwarg': {'fromrepo': 'updates'}, + }, + }, + 'old_caller': { + 'args': ('file.touch', '/tmp/foo'), + 'kwargs': {}, + }, + 'new_runner': ( + 'error.error', + { + '__state__': 'runner', + '__id__': 'raise_error', + 'name': 'raise_error', + '__sls__': '/srv/reactor/new_runner.sls', + '__user__': 'Reactor', + 'order': 1, + 'arg': (), + 'kwarg': { + 'name': 'Exception', + 'message': 'This is an error', + }, + }, + ), + 'new_wheel': ( + 'key.delete', + { + '__state__': 'wheel', + '__id__': 'remove_key', + 'name': 'remove_key', + '__sls__': '/srv/reactor/new_wheel.sls', + 'order': 1, + '__user__': 'Reactor', + 'arg': (), + 'kwarg': {'match': 'foo'}, + }, + ), + 'new_local': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_local.sls', + 'order': 1, + 'arg': (), + 'kwarg': { + 'fun': 'pkg.installed', + 'name': 'zsh', + 'fromrepo': 'updates', + }, + }, + }, + 'new_cmd': { + 'args': ('test', 'state.single'), + 'kwargs': { + 'state': 'local', + '__id__': 'install_zsh', + 'name': 'install_zsh', + '__sls__': '/srv/reactor/new_cmd.sls', + 'order': 1, + 'arg': (), + 'kwarg': { + 'fun': 'pkg.installed', + 'name': 'zsh', + 'fromrepo': 'updates', + }, + }, + }, + 'new_caller': { + 'args': ('file.touch',), + 'kwargs': {'name': '/tmp/foo'}, + }, +} + +log = logging.getLogger(__name__) -@contextmanager -def reactor_process(opts, reactor): - opts = dict(opts) - opts['reactor'] = reactor - proc = reactor.Reactor(opts) - proc.start() - try: - if os.environ.get('TRAVIS_PYTHON_VERSION', None) is not None: - # Travis is slow - time.sleep(10) - else: - time.sleep(2) - yield - finally: - clean_proc(proc) - - -def _args_sideffect(*args, **kwargs): - return args, kwargs - - -@skipIf(True, 'Skipping until its clear what and how is this supposed to be testing') +@skipIf(NO_MOCK, NO_MOCK_REASON) class TestReactor(TestCase, AdaptedConfigurationTestCaseMixin): - def setUp(self): - self.opts = self.get_temp_config('master') - self.tempdir = tempfile.mkdtemp(dir=TMP) - self.sls_name = os.path.join(self.tempdir, 'test.sls') - with salt.utils.fopen(self.sls_name, 'w') as fh: - fh.write(''' -update_fileserver: - runner.fileserver.update -''') + ''' + Tests for constructing the low chunks to be executed via the Reactor + ''' + @classmethod + def setUpClass(cls): + ''' + Load the reactor config for mocking + ''' + cls.opts = cls.get_temp_config('master') + reactor_config = yaml.safe_load(REACTOR_CONFIG) + cls.opts.update(reactor_config) + cls.reactor = reactor.Reactor(cls.opts) + cls.reaction_map = salt.utils.repack_dictlist(reactor_config['reactor']) + renderers = salt.loader.render(cls.opts, {}) + cls.render_pipe = [(renderers[x], '') for x in ('jinja', 'yaml')] - def tearDown(self): - if os.path.isdir(self.tempdir): - shutil.rmtree(self.tempdir) - del self.opts - del self.tempdir - del self.sls_name + @classmethod + def tearDownClass(cls): + del cls.opts + del cls.reactor + del cls.render_pipe - def test_basic(self): - reactor_config = [ - {'salt/tagA': ['/srv/reactor/A.sls']}, - {'salt/tagB': ['/srv/reactor/B.sls']}, - {'*': ['/srv/reactor/all.sls']}, - ] - wrap = reactor.ReactWrap(self.opts) - with patch.object(reactor.ReactWrap, 'local', MagicMock(side_effect=_args_sideffect)): - ret = wrap.run({'fun': 'test.ping', - 'state': 'local', - 'order': 1, - 'name': 'foo_action', - '__id__': 'foo_action'}) - raise Exception(ret) + def test_list_reactors(self): + ''' + Ensure that list_reactors() returns the correct list of reactor SLS + files for each tag. + ''' + for schema in ('old', 'new'): + for rtype in REACTOR_DATA: + tag = '_'.join((schema, rtype)) + self.assertEqual( + self.reactor.list_reactors(tag), + self.reaction_map[tag] + ) + + def test_reactions(self): + ''' + Ensure that the correct reactions are built from the configured SLS + files and tag data. + ''' + for schema in ('old', 'new'): + for rtype in REACTOR_DATA: + tag = '_'.join((schema, rtype)) + log.debug('test_reactions: processing %s', tag) + reactors = self.reactor.list_reactors(tag) + log.debug('test_reactions: %s reactors: %s', tag, reactors) + # No globbing in our example SLS, and the files don't actually + # exist, so mock glob.glob to just return back the path passed + # to it. + with patch.object( + glob, + 'glob', + MagicMock(side_effect=lambda x: [x])): + # The below four mocks are all so that + # salt.template.compile_template() will read the templates + # we've mocked up in the SLS global variable above. + with patch.object( + os.path, 'isfile', + MagicMock(return_value=True)): + with patch.object( + salt.utils, 'is_empty', + MagicMock(return_value=False)): + with patch.object( + codecs, 'open', + mock_open(read_data=SLS[reactors[0]])): + with patch.object( + salt.template, 'template_shebang', + MagicMock(return_value=self.render_pipe)): + reactions = self.reactor.reactions( + tag, + REACTOR_DATA[rtype], + reactors, + ) + log.debug( + 'test_reactions: %s reactions: %s', + tag, reactions + ) + self.assertEqual(reactions, LOW_CHUNKS[tag]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class TestReactWrap(TestCase, AdaptedConfigurationTestCaseMixin): + ''' + Tests that we are formulating the wrapper calls properly + ''' + @classmethod + def setUpClass(cls): + cls.wrap = reactor.ReactWrap(cls.get_temp_config('master')) + + @classmethod + def tearDownClass(cls): + del cls.wrap + + def test_runner(self): + ''' + Test runner reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'runner')) + chunk = LOW_CHUNKS[tag][0] + thread_pool = Mock() + thread_pool.fire_async = Mock() + with patch.object(self.wrap, 'pool', thread_pool): + self.wrap.run(chunk) + thread_pool.fire_async.assert_called_with( + self.wrap.client_cache['runner'].low, + args=WRAPPER_CALLS[tag] + ) + + def test_wheel(self): + ''' + Test wheel reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'wheel')) + chunk = LOW_CHUNKS[tag][0] + thread_pool = Mock() + thread_pool.fire_async = Mock() + with patch.object(self.wrap, 'pool', thread_pool): + self.wrap.run(chunk) + thread_pool.fire_async.assert_called_with( + self.wrap.client_cache['wheel'].low, + args=WRAPPER_CALLS[tag] + ) + + def test_local(self): + ''' + Test local reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'local')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'local': Mock()} + client_cache['local'].cmd_async = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['local'].cmd_async.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) + + def test_cmd(self): + ''' + Test cmd reactions (alias for 'local') using both the old and new + config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'cmd')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'local': Mock()} + client_cache['local'].cmd_async = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['local'].cmd_async.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) + + def test_caller(self): + ''' + Test caller reactions using both the old and new config schema + ''' + for schema in ('old', 'new'): + tag = '_'.join((schema, 'caller')) + chunk = LOW_CHUNKS[tag][0] + client_cache = {'caller': Mock()} + client_cache['caller'].cmd = Mock() + with patch.object(self.wrap, 'client_cache', client_cache): + self.wrap.run(chunk) + client_cache['caller'].cmd.assert_called_with( + *WRAPPER_CALLS[tag]['args'], + **WRAPPER_CALLS[tag]['kwargs'] + ) From 7a2f12b96a2a9f80f1967a187e0c719464c9dea4 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 1 Sep 2017 18:35:01 -0500 Subject: [PATCH 094/348] Include a better example for reactor in master conf file --- doc/ref/configuration/master.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index 976919a3439..ecba2b15376 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -4091,7 +4091,9 @@ information. .. code-block:: yaml - reactor: [] + reactor: + - 'salt/minion/*/start': + - salt://reactor/startup_tasks.sls .. conf_master:: reactor_refresh_interval From b5f10696c2e5ef5823caa7adcf7204bdfba5748d Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 1 Sep 2017 18:33:45 -0500 Subject: [PATCH 095/348] Improve the reactor documentation This reorganizes the reactor docs and includes examples of the new reactor SLS config syntax. --- doc/topics/beacons/index.rst | 5 +- doc/topics/reactor/index.rst | 553 ++++++++++++++++++++--------------- 2 files changed, 320 insertions(+), 238 deletions(-) diff --git a/doc/topics/beacons/index.rst b/doc/topics/beacons/index.rst index 6dae8dca091..62991af2f4a 100644 --- a/doc/topics/beacons/index.rst +++ b/doc/topics/beacons/index.rst @@ -253,9 +253,8 @@ in ``/etc/salt/master.d/reactor.conf``: .. note:: You can have only one top level ``reactor`` section, so if one already - exists, add this code to the existing section. See :ref:`Understanding the - Structure of Reactor Formulas ` to learn more about - reactor SLS syntax. + exists, add this code to the existing section. See :ref:`here + ` to learn more about reactor SLS syntax. Start the Salt Master in Debug Mode diff --git a/doc/topics/reactor/index.rst b/doc/topics/reactor/index.rst index 2586245a1aa..de5df946acf 100644 --- a/doc/topics/reactor/index.rst +++ b/doc/topics/reactor/index.rst @@ -27,9 +27,9 @@ event bus is an open system used for sending information notifying Salt and other systems about operations. The event system fires events with a very specific criteria. Every event has a -:strong:`tag`. Event tags allow for fast top level filtering of events. In -addition to the tag, each event has a data structure. This data structure is a -dict, which contains information about the event. +**tag**. Event tags allow for fast top-level filtering of events. In addition +to the tag, each event has a data structure. This data structure is a +dictionary, which contains information about the event. .. _reactor-mapping-events: @@ -65,15 +65,12 @@ and each event tag has a list of reactor SLS files to be run. the :ref:`querystring syntax ` (e.g. ``salt://reactor/mycustom.sls?saltenv=reactor``). -Reactor sls files are similar to state and pillar sls files. They are -by default yaml + Jinja templates and are passed familiar context variables. +Reactor SLS files are similar to State and Pillar SLS files. They are by +default YAML + Jinja templates and are passed familiar context variables. +Click :ref:`here ` for more detailed information on the +variables availble in Jinja templating. -They differ because of the addition of the ``tag`` and ``data`` variables. - -- The ``tag`` variable is just the tag in the fired event. -- The ``data`` variable is the event's data dict. - -Here is a simple reactor sls: +Here is the SLS for a simple reaction: .. code-block:: jinja @@ -90,71 +87,278 @@ data structure and compiler used for the state system is used for the reactor system. The only difference is that the data is matched up to the salt command API and the runner system. In this example, a command is published to the ``mysql1`` minion with a function of :py:func:`state.apply -`. Similarly, a runner can be called: +`, which performs a :ref:`highstate +`. Similarly, a runner can be called: .. code-block:: jinja {% if data['data']['custom_var'] == 'runit' %} call_runit_orch: runner.state.orchestrate: - - mods: _orch.runit + - args: + - mods: orchestrate.runit {% endif %} This example will execute the state.orchestrate runner and intiate an execution -of the runit orchestrator located at ``/srv/salt/_orch/runit.sls``. Using -``_orch/`` is any arbitrary path but it is recommended to avoid using "orchestrate" -as this is most likely to cause confusion. +of the ``runit`` orchestrator located at ``/srv/salt/orchestrate/runit.sls``. -Writing SLS Files ------------------ +Types of Reactions +================== -Reactor SLS files are stored in the same location as State SLS files. This means -that both ``file_roots`` and ``gitfs_remotes`` impact what SLS files are -available to the reactor and orchestrator. +============================== ================================================================================== +Name Description +============================== ================================================================================== +:ref:`local ` Runs a :ref:`remote-execution function ` on targeted minions +:ref:`runner ` Executes a :ref:`runner function ` +:ref:`wheel ` Executes a :ref:`wheel function ` on the master +:ref:`caller ` Runs a :ref:`remote-execution function ` on a masterless minion +============================== ================================================================================== -It is recommended to keep reactor and orchestrator SLS files in their own uniquely -named subdirectories such as ``_orch/``, ``orch/``, ``_orchestrate/``, ``react/``, -``_reactor/``, etc. Keeping a unique name helps prevent confusion when trying to -read through this a few years down the road. +.. note:: + The ``local`` and ``caller`` reaction types will be renamed for the Oxygen + release. These reaction types were named after Salt's internal client + interfaces, and are not intuitively named. Both ``local`` and ``caller`` + will continue to work in Reactor SLS files, but for the Oxygen release the + documentation will be updated to reflect the new preferred naming. -The Goal of Writing Reactor SLS Files -===================================== +Where to Put Reactor SLS Files +============================== -Reactor SLS files share the familiar syntax from Salt States but there are -important differences. The goal of a Reactor file is to process a Salt event as -quickly as possible and then to optionally start a **new** process in response. +Reactor SLS files can come both from files local to the master, and from any of +backends enabled via the :conf_master:`fileserver_backend` config option. Files +placed in the Salt fileserver can be referenced using a ``salt://`` URL, just +like they can in State SLS files. -1. The Salt Reactor watches Salt's event bus for new events. -2. The event tag is matched against the list of event tags under the - ``reactor`` section in the Salt Master config. -3. The SLS files for any matches are Rendered into a data structure that - represents one or more function calls. -4. That data structure is given to a pool of worker threads for execution. +It is recommended to place reactor and orchestrator SLS files in their own +uniquely-named subdirectories such as ``orch/``, ``orchestrate/``, ``react/``, +``reactor/``, etc., to keep them organized. + +.. _reactor-sls: + +Writing Reactor SLS +=================== + +The different reaction types were developed separately and have historically +had different methods for passing arguments. For the 2017.7.2 release a new, +unified configuration schema has been introduced, which applies to all reaction +types. + +The old config schema will continue to be supported, and there is no plan to +deprecate it at this time. + +.. _reactor-local: + +Local Reactions +--------------- + +A ``local`` reaction runs a :ref:`remote-execution function ` +on the targeted minions. + +The old config schema required the positional and keyword arguments to be +manually separated by the user under ``arg`` and ``kwarg`` parameters. However, +this is not very user-friendly, as it forces the user to distinguish which type +of argument is which, and make sure that positional arguments are ordered +properly. Therefore, the new config schema is recommended if the master is +running a supported release. + +The below two examples are equivalent: + ++---------------------------------+-----------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================+=============================+ +| :: | :: | +| | | +| install_zsh: | install_zsh: | +| local.state.single: | local.state.single: | +| - tgt: 'kernel:Linux' | - tgt: 'kernel:Linux' | +| - tgt_type: grain | - tgt_type: grain | +| - args: | - arg: | +| - fun: pkg.installed | - pkg.installed | +| - name: zsh | - zsh | +| - fromrepo: updates | - kwarg: | +| | fromrepo: updates | ++---------------------------------+-----------------------------+ + +This reaction would be equvalent to running the following Salt command: + +.. code-block:: bash + + salt -G 'kernel:Linux' state.single pkg.installed name=zsh fromrepo=updates + +.. note:: + Any other parameters in the :py:meth:`LocalClient().cmd_async() + ` method can be passed at the same + indentation level as ``tgt``. + +.. note:: + ``tgt_type`` is only required when the target expression defined in ``tgt`` + uses a :ref:`target type ` other than a minion ID glob. + + The ``tgt_type`` argument was named ``expr_form`` in releases prior to + 2017.7.0. + +.. _reactor-runner: + +Runner Reactions +---------------- + +Runner reactions execute :ref:`runner functions ` locally on +the master. + +The old config schema called for passing arguments to the reaction directly +under the name of the runner function. However, this can cause unpredictable +interactions with the Reactor system's internal arguments. It is also possible +to pass positional and keyword arguments under ``arg`` and ``kwarg`` like above +in :ref:`local reactions `, but as noted above this is not very +user-friendly. Therefore, the new config schema is recommended if the master +is running a supported release. + +The below two examples are equivalent: + ++-------------------------------------------------+-------------------------------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================================+=================================================+ +| :: | :: | +| | | +| deploy_app: | deploy_app: | +| runner.state.orchestrate: | runner.state.orchestrate: | +| - args: | - mods: orchestrate.deploy_app | +| - mods: orchestrate.deploy_app | - kwarg: | +| - pillar: | pillar: | +| event_tag: {{ tag }} | event_tag: {{ tag }} | +| event_data: {{ data['data']|json }} | event_data: {{ data['data']|json }} | ++-------------------------------------------------+-------------------------------------------------+ + +Assuming that the event tag is ``foo``, and the data passed to the event is +``{'bar': 'baz'}``, then this reaction is equvalent to running the following +Salt command: + +.. code-block:: bash + + salt-run state.orchestrate mods=orchestrate.deploy_app pillar='{"event_tag": "foo", "event_data": {"bar": "baz"}}' + +.. _reactor-wheel: + +Wheel Reactions +--------------- + +Wheel reactions run :ref:`wheel functions ` locally on the +master. + +Like :ref:`runner reactions `, the old config schema called for +wheel reactions to have arguments passed directly under the name of the +:ref:`wheel function ` (or in ``arg`` or ``kwarg`` parameters). + +The below two examples are equivalent: + ++-----------------------------------+---------------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++===================================+=================================+ +| :: | :: | +| | | +| remove_key: | remove_key: | +| wheel.key.delete: | wheel.key.delete: | +| - args: | - match: {{ data['id'] }} | +| - match: {{ data['id'] }} | | ++-----------------------------------+---------------------------------+ + +.. _reactor-caller: + +Caller Reactions +---------------- + +Caller reactions run :ref:`remote-execution functions ` on a +minion daemon's Reactor system. To run a Reactor on the minion, it is necessary +to configure the :mod:`Reactor Engine ` in the minion +config file, and then setup your watched events in a ``reactor`` section in the +minion config file as well. + +.. note:: Masterless Minions use this Reactor + + This is the only way to run the Reactor if you use masterless minions. + +Both the old and new config schemas involve passing arguments under an ``args`` +parameter. However, the old config schema only supports positional arguments. +Therefore, the new config schema is recommended if the masterless minion is +running a supported release. + +The below two examples are equivalent: + ++---------------------------------+---------------------------+ +| Supported in 2017.7.2 and later | Supported in all releases | ++=================================+===========================+ +| :: | :: | +| | | +| touch_file: | touch_file: | +| caller.file.touch: | caller.file.touch: | +| - args: | - args: | +| - name: /tmp/foo | - /tmp/foo | ++---------------------------------+---------------------------+ + +This reaction is equvalent to running the following Salt command: + +.. code-block:: bash + + salt-call file.touch name=/tmp/foo + +Best Practices for Writing Reactor SLS Files +============================================ + +The Reactor works as follows: + +1. The Salt Reactor watches Salt's event bus for new events. +2. Each event's tag is matched against the list of event tags configured under + the :conf_master:`reactor` section in the Salt Master config. +3. The SLS files for any matches are rendered into a data structure that + represents one or more function calls. +4. That data structure is given to a pool of worker threads for execution. Matching and rendering Reactor SLS files is done sequentially in a single -process. Complex Jinja that calls out to slow Execution or Runner modules slows -down the rendering and causes other reactions to pile up behind the current -one. The worker pool is designed to handle complex and long-running processes -such as Salt Orchestrate. +process. For that reason, reactor SLS files should contain few individual +reactions (one, if at all possible). Also, keep in mind that reactions are +fired asynchronously (with the exception of :ref:`caller `) and +do *not* support :ref:`requisites `. -tl;dr: Rendering Reactor SLS files MUST be simple and quick. The new process -started by the worker threads can be long-running. Using the reactor to fire -an orchestrate runner would be ideal. +Complex Jinja templating that calls out to slow :ref:`remote-execution +` or :ref:`runner ` functions slows down +the rendering and causes other reactions to pile up behind the current one. The +worker pool is designed to handle complex and long-running processes like +:ref:`orchestration ` jobs. + +Therefore, when complex tasks are in order, :ref:`orchestration +` is a natural fit. Orchestration SLS files can be more +complex, and use requisites. Performing a complex task using orchestration lets +the Reactor system fire off the orchestration job and proceed with processing +other reactions. + +.. _reactor-jinja-context: Jinja Context -------------- +============= -Reactor files only have access to a minimal Jinja context. ``grains`` and -``pillar`` are not available. The ``salt`` object is available for calling -Runner and Execution modules but it should be used sparingly and only for quick -tasks for the reasons mentioned above. +Reactor SLS files only have access to a minimal Jinja context. ``grains`` and +``pillar`` are *not* available. The ``salt`` object is available for calling +:ref:`remote-execution ` or :ref:`runner ` +functions, but it should be used sparingly and only for quick tasks for the +reasons mentioned above. + +In addition to the ``salt`` object, the following variables are available in +the Jinja context: + +- ``tag`` - the tag from the event that triggered execution of the Reactor SLS + file +- ``data`` - the event's data dictionary + +The ``data`` dict will contain an ``id`` key containing the minion ID, if the +event was fired from a minion, and a ``data`` key containing the data passed to +the event. Advanced State System Capabilities ----------------------------------- +================================== -Reactor SLS files, by design, do not support Requisites, ordering, -``onlyif``/``unless`` conditionals and most other powerful constructs from -Salt's State system. +Reactor SLS files, by design, do not support :ref:`requisites `, +ordering, ``onlyif``/``unless`` conditionals and most other powerful constructs +from Salt's State system. Complex Master-side operations are best performed by Salt's Orchestrate system so using the Reactor to kick off an Orchestrate run is a very common pairing. @@ -166,7 +370,7 @@ For example: # /etc/salt/master.d/reactor.conf # A custom event containing: {"foo": "Foo!", "bar: "bar*", "baz": "Baz!"} reactor: - - myco/custom/event: + - my/custom/event: - /srv/reactor/some_event.sls .. code-block:: jinja @@ -174,15 +378,15 @@ For example: # /srv/reactor/some_event.sls invoke_orchestrate_file: runner.state.orchestrate: - - mods: _orch.do_complex_thing # /srv/salt/_orch/do_complex_thing.sls - - kwarg: - pillar: - event_tag: {{ tag }} - event_data: {{ data|json() }} + - args: + - mods: orchestrate.do_complex_thing + - pillar: + event_tag: {{ tag }} + event_data: {{ data|json }} .. code-block:: jinja - # /srv/salt/_orch/do_complex_thing.sls + # /srv/salt/orchestrate/do_complex_thing.sls {% set tag = salt.pillar.get('event_tag') %} {% set data = salt.pillar.get('event_data') %} @@ -209,7 +413,7 @@ For example: .. _beacons-and-reactors: Beacons and Reactors --------------------- +==================== An event initiated by a beacon, when it arrives at the master will be wrapped inside a second event, such that the data object containing the beacon @@ -219,27 +423,52 @@ For example, to access the ``id`` field of the beacon event in a reactor file, you will need to reference ``{{ data['data']['id'] }}`` rather than ``{{ data['id'] }}`` as for events initiated directly on the event bus. +Similarly, the data dictionary attached to the event would be located in +``{{ data['data']['data'] }}`` instead of ``{{ data['data'] }}``. + See the :ref:`beacon documentation ` for examples. -Fire an event -============= +Manually Firing an Event +======================== -To fire an event from a minion call ``event.send`` +From the Master +--------------- + +Use the :py:func:`event.send ` runner: .. code-block:: bash - salt-call event.send 'foo' '{orchestrate: refresh}' + salt-run event.send foo '{orchestrate: refresh}' -After this is called, any reactor sls files matching event tag ``foo`` will -execute with ``{{ data['data']['orchestrate'] }}`` equal to ``'refresh'``. +From the Minion +--------------- -See :py:mod:`salt.modules.event` for more information. +To fire an event to the master from a minion, call :py:func:`event.send +`: -Knowing what event is being fired -================================= +.. code-block:: bash -The best way to see exactly what events are fired and what data is available in -each event is to use the :py:func:`state.event runner + salt-call event.send foo '{orchestrate: refresh}' + +To fire an event to the minion's local event bus, call :py:func:`event.fire +`: + +.. code-block:: bash + + salt-call event.fire '{orchestrate: refresh}' foo + +Referencing Data Passed in Events +--------------------------------- + +Assuming any of the above examples, any reactor SLS files triggered by watching +the event tag ``foo`` will execute with ``{{ data['data']['orchestrate'] }}`` +equal to ``'refresh'``. + +Getting Information About Events +================================ + +The best way to see exactly what events have been fired and what data is +available in each event is to use the :py:func:`state.event runner `. .. seealso:: :ref:`Common Salt Events ` @@ -308,156 +537,10 @@ rendered SLS file (or any errors generated while rendering the SLS file). view the result of referencing Jinja variables. If the result is empty then Jinja produced an empty result and the Reactor will ignore it. -.. _reactor-structure: +Passing Event Data to Minions or Orchestration as Pillar +-------------------------------------------------------- -Understanding the Structure of Reactor Formulas -=============================================== - -**I.e., when to use `arg` and `kwarg` and when to specify the function -arguments directly.** - -While the reactor system uses the same basic data structure as the state -system, the functions that will be called using that data structure are -different functions than are called via Salt's state system. The Reactor can -call Runner modules using the `runner` prefix, Wheel modules using the `wheel` -prefix, and can also cause minions to run Execution modules using the `local` -prefix. - -.. versionchanged:: 2014.7.0 - The ``cmd`` prefix was renamed to ``local`` for consistency with other - parts of Salt. A backward-compatible alias was added for ``cmd``. - -The Reactor runs on the master and calls functions that exist on the master. In -the case of Runner and Wheel functions the Reactor can just call those -functions directly since they exist on the master and are run on the master. - -In the case of functions that exist on minions and are run on minions, the -Reactor still needs to call a function on the master in order to send the -necessary data to the minion so the minion can execute that function. - -The Reactor calls functions exposed in :ref:`Salt's Python API documentation -`. and thus the structure of Reactor files very transparently -reflects the function signatures of those functions. - -Calling Execution modules on Minions ------------------------------------- - -The Reactor sends commands down to minions in the exact same way Salt's CLI -interface does. It calls a function locally on the master that sends the name -of the function as well as a list of any arguments and a dictionary of any -keyword arguments that the minion should use to execute that function. - -Specifically, the Reactor calls the async version of :py:meth:`this function -`. You can see that function has 'arg' and 'kwarg' -parameters which are both values that are sent down to the minion. - -Executing remote commands maps to the :strong:`LocalClient` interface which is -used by the :strong:`salt` command. This interface more specifically maps to -the :strong:`cmd_async` method inside of the :strong:`LocalClient` class. This -means that the arguments passed are being passed to the :strong:`cmd_async` -method, not the remote method. A field starts with :strong:`local` to use the -:strong:`LocalClient` subsystem. The result is, to execute a remote command, -a reactor formula would look like this: - -.. code-block:: yaml - - clean_tmp: - local.cmd.run: - - tgt: '*' - - arg: - - rm -rf /tmp/* - -The ``arg`` option takes a list of arguments as they would be presented on the -command line, so the above declaration is the same as running this salt -command: - -.. code-block:: bash - - salt '*' cmd.run 'rm -rf /tmp/*' - -Use the ``tgt_type`` argument to specify a matcher: - -.. code-block:: yaml - - clean_tmp: - local.cmd.run: - - tgt: 'os:Ubuntu' - - tgt_type: grain - - arg: - - rm -rf /tmp/* - - - clean_tmp: - local.cmd.run: - - tgt: 'G@roles:hbase_master' - - tgt_type: compound - - arg: - - rm -rf /tmp/* - -.. note:: - The ``tgt_type`` argument was named ``expr_form`` in releases prior to - 2017.7.0 (2016.11.x and earlier). - -Any other parameters in the :py:meth:`LocalClient().cmd() -` method can be specified as well. - -Executing Reactors from the Minion ----------------------------------- - -The minion can be setup to use the Reactor via a reactor engine. This just -sets up and listens to the minions event bus, instead of to the masters. - -The biggest difference is that you have to use the caller method on the -Reactor, which is the equivalent of salt-call, to run your commands. - -:mod:`Reactor Engine setup ` - -.. code-block:: yaml - - clean_tmp: - caller.cmd.run: - - arg: - - rm -rf /tmp/* - -.. note:: Masterless Minions use this Reactor - - This is the only way to run the Reactor if you use masterless minions. - -Calling Runner modules and Wheel modules ----------------------------------------- - -Calling Runner modules and Wheel modules from the Reactor uses a more direct -syntax since the function is being executed locally instead of sending a -command to a remote system to be executed there. There are no 'arg' or 'kwarg' -parameters (unless the Runner function or Wheel function accepts a parameter -with either of those names.) - -For example: - -.. code-block:: yaml - - clear_the_grains_cache_for_all_minions: - runner.cache.clear_grains - -If the :py:func:`the runner takes arguments ` then -they must be specified as keyword arguments. - -.. code-block:: yaml - - spin_up_more_web_machines: - runner.cloud.profile: - - prof: centos_6 - - instances: - - web11 # These VM names would be generated via Jinja in a - - web12 # real-world example. - -To determine the proper names for the arguments, check the documentation -or source code for the runner function you wish to call. - -Passing event data to Minions or Orchestrate as Pillar ------------------------------------------------------- - -An interesting trick to pass data from the Reactor script to +An interesting trick to pass data from the Reactor SLS file to :py:func:`state.apply ` is to pass it as inline Pillar data since both functions take a keyword argument named ``pillar``. @@ -484,10 +567,9 @@ from the event to the state file via inline Pillar. add_new_minion_to_pool: local.state.apply: - tgt: 'haproxy*' - - arg: - - haproxy.refresh_pool - - kwarg: - pillar: + - args: + - mods: haproxy.refresh_pool + - pillar: new_minion: {{ data['id'] }} {% endif %} @@ -503,17 +585,16 @@ This works with Orchestrate files as well: call_some_orchestrate_file: runner.state.orchestrate: - - mods: _orch.some_orchestrate_file - - pillar: - stuff: things + - args: + - mods: orchestrate.some_orchestrate_file + - pillar: + stuff: things Which is equivalent to the following command at the CLI: .. code-block:: bash - salt-run state.orchestrate _orch.some_orchestrate_file pillar='{stuff: things}' - -This expects to find a file at /srv/salt/_orch/some_orchestrate_file.sls. + salt-run state.orchestrate orchestrate.some_orchestrate_file pillar='{stuff: things}' Finally, that data is available in the state file using the normal Pillar lookup syntax. The following example is grabbing web server names and IP @@ -564,7 +645,7 @@ includes the minion id, which we can use for matching. - 'salt/minion/ink*/start': - /srv/reactor/auth-complete.sls -In this sls file, we say that if the key was rejected we will delete the key on +In this SLS file, we say that if the key was rejected we will delete the key on the master and then also tell the master to ssh in to the minion and tell it to restart the minion, since a minion process will die if the key is rejected. @@ -580,19 +661,21 @@ authentication every ten seconds by default. {% if not data['result'] and data['id'].startswith('ink') %} minion_remove: wheel.key.delete: - - match: {{ data['id'] }} + - args: + - match: {{ data['id'] }} minion_rejoin: local.cmd.run: - tgt: salt-master.domain.tld - - arg: - - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart' + - args: + - cmd: ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "{{ data['id'] }}" 'sleep 10 && /etc/init.d/salt-minion restart' {% endif %} {# Ink server is sending new key -- accept this key #} {% if 'act' in data and data['act'] == 'pend' and data['id'].startswith('ink') %} minion_add: wheel.key.accept: - - match: {{ data['id'] }} + - args: + - match: {{ data['id'] }} {% endif %} No if statements are needed here because we already limited this action to just From 7abd07fa07d19dc23eb621d545977a65e69f5729 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 6 Sep 2017 10:38:45 -0500 Subject: [PATCH 096/348] Simplify client logic --- salt/client/mixins.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index bd69d269bf9..ea2090e2635 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -360,19 +360,18 @@ class SyncClientMixin(object): # that since the caller knows what is an arg vs a kwarg, but while # we make the transition we will load "kwargs" using format_call if # there are no kwargs in the low object passed in. - f_call = {} if 'arg' in low and 'kwarg' in low \ - else salt.utils.format_call( + + if 'arg' in low and 'kwarg' in low: + args = low['arg'] + kwargs = low['kwarg'] + else: + f_call = salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - - args = f_call.get('args', ()) \ - if 'arg' not in low \ - else low['arg'] - kwargs = f_call.get('kwargs', {}) \ - if 'kwarg' not in low \ - else low['kwarg'] + args = f_call.get('args', ()) + kwargs = f_call.get('kwargs', {}) # Update the event data with loaded args and kwargs data['fun_args'] = list(args) + ([kwargs] if kwargs else []) From e076e9b6340fa7647b7029ed48e73f7764b3ae91 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Fri, 15 Sep 2017 18:47:03 +0300 Subject: [PATCH 097/348] Forward events to all masters syndic connected to. --- salt/minion.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index c9cfc6cb1fd..394b11a2e80 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2588,6 +2588,8 @@ class SyndicManager(MinionBase): ''' if kwargs is None: kwargs = {} + successful = False + # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master)) @@ -2595,12 +2597,12 @@ class SyndicManager(MinionBase): try: getattr(syndic_future.result(), func)(*args, **kwargs) - return + successful = True except SaltClientError: log.error('Unable to call {0} on {1}, trying another...'.format(func, master)) self._mark_master_dead(master) - continue - log.critical('Unable to call {0} on any masters!'.format(func)) + if not successful: + log.critical('Unable to call {0} on any masters!'.format(func)) def _return_pub_syndic(self, values, master_id=None): ''' From e5297e386975a2fd68418e0fa8b881950c17ef33 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 18 Sep 2017 16:19:15 -0400 Subject: [PATCH 098/348] Add reason to linux_acl state loading failure --- salt/states/linux_acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index 4e3c7049b96..fec2e58eacc 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -49,7 +49,7 @@ def __virtual__(): if salt.utils.which('getfacl') and salt.utils.which('setfacl'): return __virtualname__ - return False + return False, 'The linux_acl state cannot be loaded: the getfacl or setfacl binary is not in the path.' def present(name, acl_type, acl_name='', perms='', recurse=False): From 35cf69bc50b837d83dd596ee1a8d32a8f3ac4e18 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Tue, 19 Sep 2017 09:53:27 +0200 Subject: [PATCH 099/348] Moved exception Salt core The timeout exception is now part of exceptions.py and no longer solely defined in the module. --- salt/exceptions.py | 6 ++++++ salt/modules/kubernetes.py | 9 +++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/salt/exceptions.py b/salt/exceptions.py index 256537dd77f..00111df1048 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -265,6 +265,12 @@ class SaltCacheError(SaltException): ''' +class TimeoutError(SaltException): + ''' + Thrown when an opration cannot be completet within a given time limit. + ''' + + class SaltReqTimeoutError(SaltException): ''' Thrown when a salt master request call fails to return within the timeout diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index aa06645660d..b5628d41d32 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -48,6 +48,7 @@ from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems import salt.utils import salt.utils.templates +from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: @@ -82,15 +83,11 @@ def __virtual__(): return False, 'python kubernetes library not found' -class TimeoutException(Exception): - pass - - if salt.utils.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): - raise TimeoutException + raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: @@ -723,7 +720,7 @@ def delete_deployment(name, namespace='default', **kwargs): sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 - except TimeoutException: + except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling From 2d810690b6438a58d55341ed7814df830e6e01cd Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Tue, 19 Sep 2017 11:01:27 +0200 Subject: [PATCH 100/348] Fix pylint errors --- salt/modules/mdadm.py | 1 + salt/states/mdadm.py | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/modules/mdadm.py b/salt/modules/mdadm.py index 334cd46e731..354ece93baf 100644 --- a/salt/modules/mdadm.py +++ b/salt/modules/mdadm.py @@ -357,6 +357,7 @@ def assemble(name, elif test_mode is False: return __salt__['cmd.run'](cmd, python_shell=False) + def examine(device): ''' Show detail for a specified RAID component device diff --git a/salt/states/mdadm.py b/salt/states/mdadm.py index 067c5c4c2f5..4588d859fab 100644 --- a/salt/states/mdadm.py +++ b/salt/states/mdadm.py @@ -25,9 +25,6 @@ import logging # Import salt libs import salt.utils.path -# Import 3rd-party libs -from salt.ext import six - # Set up logger log = logging.getLogger(__name__) @@ -116,7 +113,7 @@ def present(name, elif len(uuid_dict) == 1: uuid = uuid_dict.keys()[0] if present and present['uuid'] != uuid: - ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid']) + ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid']) ret['result'] = False return ret @@ -193,7 +190,7 @@ def present(name, ret['comment'] = 'Raid {0} failed to be {1}.'.format(name, verb) ret['result'] = False else: - ret['comment'] = 'Raid {0} already present.'.format(name) + ret['comment'] = 'Raid {0} already present.'.format(name) if (do_assemble or present) and len(new_devices) > 0: for d in new_devices: From a2b61f7cd2d3367d4167b25de5e52cb557eb1026 Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Tue, 19 Sep 2017 13:24:51 -0400 Subject: [PATCH 101/348] INFRA-5292 - small fix for boto_iam AWS rate limiting errors --- salt/modules/boto_iam.py | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index 97eccd6616e..e83fdffd0ce 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -2148,6 +2148,7 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, salt myminion boto_iam.list_entities_for_policy mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + retries = 30 params = {} for arg in ('path_prefix', 'entity_filter'): @@ -2155,21 +2156,26 @@ def list_entities_for_policy(policy_name, path_prefix=None, entity_filter=None, params[arg] = locals()[arg] policy_arn = _get_policy_arn(policy_name, region, key, keyid, profile) - try: - allret = { - 'policy_groups': [], - 'policy_users': [], - 'policy_roles': [], - } - for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params): - for k, v in six.iteritems(allret): - v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k)) - return allret - except boto.exception.BotoServerError as e: - log.debug(e) - msg = 'Failed to list {0} policy entities.' - log.error(msg.format(policy_name)) - return {} + while retries: + try: + allret = { + 'policy_groups': [], + 'policy_users': [], + 'policy_roles': [], + } + for ret in __utils__['boto.paged_call'](conn.list_entities_for_policy, policy_arn=policy_arn, **params): + for k, v in six.iteritems(allret): + v.extend(ret.get('list_entities_for_policy_response', {}).get('list_entities_for_policy_result', {}).get(k)) + return allret + except boto.exception.BotoServerError as e: + if e.error_code == 'Throttling': + log.debug("Throttled by AWS API, will retry in 5 seconds...") + time.sleep(5) + retries -= 1 + continue + log.error('Failed to list {0} policy entities: {1}'.format(policy_name, e.message)) + return {} + return {} def list_attached_user_policies(user_name, path_prefix=None, entity_filter=None, From e62a359a0ce1d45fbdbaca0faf2447f9a6894fa1 Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Tue, 19 Sep 2017 15:06:23 -0400 Subject: [PATCH 102/348] INFRA-5492 - dang it, I had this and forgot to cut and paste it over :-/ --- salt/modules/boto_iam.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index e83fdffd0ce..9575156f831 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -42,6 +42,7 @@ from __future__ import absolute_import import logging import json import yaml +import time # Import salt libs import salt.ext.six as six From f84b50a06b6694678dce21c27af5bd9e0ee6a9bf Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 19 Sep 2017 09:36:51 -0600 Subject: [PATCH 103/348] results and columns are lists for mysql returns --- salt/modules/mysql.py | 20 ++- .../files/file/base/mysql/select_query.sql | 7 + .../files/file/base/mysql/update_query.sql | 3 + tests/integration/modules/test_mysql.py | 131 +++++++++++++++++- 4 files changed, 155 insertions(+), 6 deletions(-) create mode 100644 tests/integration/files/file/base/mysql/select_query.sql create mode 100644 tests/integration/files/file/base/mysql/update_query.sql diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py index e97525ad087..6f42801a403 100644 --- a/salt/modules/mysql.py +++ b/salt/modules/mysql.py @@ -687,11 +687,20 @@ def file_query(database, file_name, **connection_args): .. versionadded:: 2017.7.0 + database + + database to run script inside + + file_name + + File name of the script. This can be on the minion, or a file that is reachable by the fileserver + CLI Example: .. code-block:: bash salt '*' mysql.file_query mydb file_name=/tmp/sqlfile.sql + salt '*' mysql.file_query mydb file_name=salt://sqlfile.sql Return data: @@ -700,6 +709,9 @@ def file_query(database, file_name, **connection_args): {'query time': {'human': '39.0ms', 'raw': '0.03899'}, 'rows affected': 1L} ''' + if any(file_name.startswith(proto) for proto in ('salt://', 'http://', 'https://', 'swift://', 's3://')): + file_name = __salt__['cp.cache_file'](file_name) + if os.path.exists(file_name): with salt.utils.fopen(file_name, 'r') as ifile: contents = ifile.read() @@ -708,7 +720,7 @@ def file_query(database, file_name, **connection_args): return False query_string = "" - ret = {'rows returned': 0, 'columns': 0, 'results': 0, 'rows affected': 0, 'query time': {'raw': 0}} + ret = {'rows returned': 0, 'columns': [], 'results': [], 'rows affected': 0, 'query time': {'raw': 0}} for line in contents.splitlines(): if re.match(r'--', line): # ignore sql comments continue @@ -728,16 +740,16 @@ def file_query(database, file_name, **connection_args): if 'rows returned' in query_result: ret['rows returned'] += query_result['rows returned'] if 'columns' in query_result: - ret['columns'] += query_result['columns'] + ret['columns'].append(query_result['columns']) if 'results' in query_result: - ret['results'] += query_result['results'] + ret['results'].append(query_result['results']) if 'rows affected' in query_result: ret['rows affected'] += query_result['rows affected'] ret['query time']['human'] = str(round(float(ret['query time']['raw']), 2)) + 's' ret['query time']['raw'] = round(float(ret['query time']['raw']), 5) # Remove empty keys in ret - ret = dict((k, v) for k, v in six.iteritems(ret) if v) + ret = {k: v for k, v in six.iteritems(ret) if v} return ret diff --git a/tests/integration/files/file/base/mysql/select_query.sql b/tests/integration/files/file/base/mysql/select_query.sql new file mode 100644 index 00000000000..10cf4850fd5 --- /dev/null +++ b/tests/integration/files/file/base/mysql/select_query.sql @@ -0,0 +1,7 @@ +CREATE TABLE test_select (a INT); +insert into test_select values (1); +insert into test_select values (3); +insert into test_select values (4); +insert into test_select values (5); +update test_select set a=2 where a=1; +select * from test_select; diff --git a/tests/integration/files/file/base/mysql/update_query.sql b/tests/integration/files/file/base/mysql/update_query.sql new file mode 100644 index 00000000000..34cee2dab14 --- /dev/null +++ b/tests/integration/files/file/base/mysql/update_query.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_update (a INT); +insert into test_update values (1); +update test_update set a=2 where a=1; diff --git a/tests/integration/modules/test_mysql.py b/tests/integration/modules/test_mysql.py index 20b79da9083..0cffdb37fae 100644 --- a/tests/integration/modules/test_mysql.py +++ b/tests/integration/modules/test_mysql.py @@ -1280,6 +1280,7 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin): testdb1 = 'tes.t\'"saltdb' testdb2 = 't_st `(:=salt%b)' testdb3 = 'test `(:=salteeb)' + test_file_query_db = 'test_query' table1 = 'foo' table2 = "foo `\'%_bar" users = { @@ -1391,13 +1392,19 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin): name=self.testdb1, connection_user=self.user, connection_pass=self.password, - ) + ) self.run_function( 'mysql.db_remove', name=self.testdb2, connection_user=self.user, connection_pass=self.password, - ) + ) + self.run_function( + 'mysql.db_remove', + name=self.test_file_query_db, + connection_user=self.user, + connection_pass=self.password, + ) def _userCreation(self, uname, @@ -1627,3 +1634,123 @@ class MysqlModuleUserGrantTest(ModuleCase, SaltReturnAssertsMixin): "GRANT USAGE ON *.* TO ''@'localhost'", "GRANT DELETE ON `test ``(:=salteeb)`.* TO ''@'localhost'" ]) + + +@skipIf( + NO_MYSQL, + 'Please install MySQL bindings and a MySQL Server before running' + 'MySQL integration tests.' +) +class MysqlModuleFileQueryTest(ModuleCase, SaltReturnAssertsMixin): + ''' + Test file query module + ''' + + user = 'root' + password = 'poney' + testdb = 'test_file_query' + + @destructiveTest + def setUp(self): + ''' + Test presence of MySQL server, enforce a root password, create users + ''' + super(MysqlModuleFileQueryTest, self).setUp() + NO_MYSQL_SERVER = True + # now ensure we know the mysql root password + # one of theses two at least should work + ret1 = self.run_state( + 'cmd.run', + name='mysqladmin --host="localhost" -u ' + + self.user + + ' flush-privileges password "' + + self.password + + '"' + ) + ret2 = self.run_state( + 'cmd.run', + name='mysqladmin --host="localhost" -u ' + + self.user + + ' --password="' + + self.password + + '" flush-privileges password "' + + self.password + + '"' + ) + key, value = ret2.popitem() + if value['result']: + NO_MYSQL_SERVER = False + else: + self.skipTest('No MySQL Server running, or no root access on it.') + # Create some users and a test db + self.run_function( + 'mysql.db_create', + name=self.testdb, + connection_user=self.user, + connection_pass=self.password, + connection_db='mysql', + ) + + @destructiveTest + def tearDown(self): + ''' + Removes created users and db + ''' + self.run_function( + 'mysql.db_remove', + name=self.testdb, + connection_user=self.user, + connection_pass=self.password, + connection_db='mysql', + ) + + @destructiveTest + def test_update_file_query(self): + ''' + Test query without any output + ''' + ret = self.run_function( + 'mysql.file_query', + database=self.testdb, + file_name='salt://mysql/update_query.sql', + character_set='utf8', + collate='utf8_general_ci', + connection_user=self.user, + connection_pass=self.password + ) + self.assertTrue('query time' in ret) + ret.pop('query time') + self.assertEqual(ret, {'rows affected': 2}) + + @destructiveTest + def test_select_file_query(self): + ''' + Test query with table output + ''' + ret = self.run_function( + 'mysql.file_query', + database=self.testdb, + file_name='salt://mysql/select_query.sql', + character_set='utf8', + collate='utf8_general_ci', + connection_user=self.user, + connection_pass=self.password + ) + expected = { + 'rows affected': 5, + 'rows returned': 4, + 'results': [ + [ + ['2'], + ['3'], + ['4'], + ['5'] + ] + ], + 'columns': [ + ['a'] + ], + } + self.assertTrue('query time' in ret) + ret.pop('query time') + self.assertEqual(ret, expected) From 2e67d2c298d129bf40fff8c6c8454606dde5ab18 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 19 Sep 2017 17:41:14 -0600 Subject: [PATCH 104/348] Added newline at the end of the file This is needed to satisfy the linter. --- salt/pillar/rethinkdb_pillar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/rethinkdb_pillar.py b/salt/pillar/rethinkdb_pillar.py index bf7c8162218..cf4b2c56f8f 100644 --- a/salt/pillar/rethinkdb_pillar.py +++ b/salt/pillar/rethinkdb_pillar.py @@ -160,4 +160,4 @@ def ext_pillar(minion_id, else: # No document found in the database log.debug('ext_pillar.rethinkdb: no document found') - return {} \ No newline at end of file + return {} From 56cd88dfa5d69c939cd23fdbccca0d9338a873d9 Mon Sep 17 00:00:00 2001 From: Andrei Belov Date: Wed, 20 Sep 2017 08:07:13 +0300 Subject: [PATCH 105/348] Several fixes for RDS DB parameter group management In particular: - it is now possible to manage all the parameters in a group, without limiting to MaxRecords=100 (thanks to pagination); - update_parameter_group() now composes valid JSON payload, automatically substitutes boolean values to 'on' / 'off' strings; - parameter_present() now shows actual error message produced by ModifyDBParameterGroup API call. --- salt/modules/boto_rds.py | 35 ++++++++++++++++++++--------------- salt/states/boto_rds.py | 13 ++++++++----- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/salt/modules/boto_rds.py b/salt/modules/boto_rds.py index f57b9633deb..cf778bd86e3 100644 --- a/salt/modules/boto_rds.py +++ b/salt/modules/boto_rds.py @@ -505,10 +505,17 @@ def update_parameter_group(name, parameters, apply_method="pending-reboot", param_list = [] for key, value in six.iteritems(parameters): - item = (key, value, apply_method) + item = odict.OrderedDict() + item.update({'ParameterName': key}) + item.update({'ApplyMethod': apply_method}) + if type(value) is bool: + item.update({'ParameterValue': 'on' if value else 'off'}) + else: + item.update({'ParameterValue': str(value)}) param_list.append(item) - if not len(param_list): - return {'results': False} + + if not len(param_list): + return {'results': False} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) @@ -843,6 +850,7 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None, 'message': 'Could not establish a connection to RDS'} kwargs = {} + kwargs.update({'DBParameterGroupName': name}) for key in ('Marker', 'Source'): if locals()[key] is not None: kwargs[key] = str(locals()[key]) @@ -850,26 +858,23 @@ def describe_parameters(name, Source=None, MaxRecords=None, Marker=None, if locals()['MaxRecords'] is not None: kwargs['MaxRecords'] = int(locals()['MaxRecords']) - r = conn.describe_db_parameters(DBParameterGroupName=name, **kwargs) + pag = conn.get_paginator('describe_db_parameters') + pit = pag.paginate(**kwargs) - if not r: - return {'result': False, - 'message': 'Failed to get RDS parameters for group {0}.' - .format(name)} - - results = r['Parameters'] keys = ['ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifieable', 'MinimumEngineVersion', 'ApplyMethod'] parameters = odict.OrderedDict() ret = {'result': True} - for result in results: - data = odict.OrderedDict() - for k in keys: - data[k] = result.get(k) - parameters[result.get('ParameterName')] = data + for p in pit: + for result in p['Parameters']: + data = odict.OrderedDict() + for k in keys: + data[k] = result.get(k) + + parameters[result.get('ParameterName')] = data ret['parameters'] = parameters return ret diff --git a/salt/states/boto_rds.py b/salt/states/boto_rds.py index c3bc7661556..c35eea58485 100644 --- a/salt/states/boto_rds.py +++ b/salt/states/boto_rds.py @@ -697,7 +697,10 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N changed = {} for items in parameters: for k, value in items.items(): - params[k] = value + if type(value) is bool: + params[k] = 'on' if value else 'off' + else: + params[k] = str(value) logging.debug('Parameters from user are : {0}.'.format(params)) options = __salt__['boto_rds.describe_parameters'](name=name, region=region, key=key, keyid=keyid, profile=profile) if not options.get('result'): @@ -705,8 +708,8 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N ret['comment'] = os.linesep.join([ret['comment'], 'Faled to get parameters for group {0}.'.format(name)]) return ret for parameter in options['parameters'].values(): - if parameter['ParameterName'] in params and str(params.get(parameter['ParameterName'])) != str(parameter['ParameterValue']): - logging.debug('Values that are being compared are {0}:{1} .'.format(params.get(parameter['ParameterName']), parameter['ParameterValue'])) + if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != str(parameter['ParameterValue']): + logging.debug('Values that are being compared for {0} are {1}:{2} .'.format(parameter['ParameterName'], params.get(parameter['ParameterName']), parameter['ParameterValue'])) changed[parameter['ParameterName']] = params.get(parameter['ParameterName']) if len(changed) > 0: if __opts__['test']: @@ -715,9 +718,9 @@ def parameter_present(name, db_parameter_group_family, description, parameters=N return ret update = __salt__['boto_rds.update_parameter_group'](name, parameters=changed, apply_method=apply_method, tags=tags, region=region, key=key, keyid=keyid, profile=profile) - if not update: + if 'error' in update: ret['result'] = False - ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}.'.format(changed, name)]) + ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}:'.format(changed, name), update['error']['message']]) return ret ret['changes']['Parameters'] = changed ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are changed.'.format(changed, name)]) From 4e8da3045f11b11f95b02d45f04b6c411f5cf0e5 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 20 Sep 2017 10:12:32 +0200 Subject: [PATCH 106/348] Fixed logic for windows fallback Silly error - should have been the other way around. --- salt/modules/kubernetes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index b5628d41d32..1afa8d8569b 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -83,7 +83,7 @@ def __virtual__(): return False, 'python kubernetes library not found' -if salt.utils.is_windows(): +if not salt.utils.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): @@ -713,7 +713,7 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - if salt.utils.is_windows(): + if not salt.utils.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: From 3a089e450f93ad8b218bf45c7c00e09cd291d2b0 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 20 Sep 2017 14:26:06 +0200 Subject: [PATCH 107/348] Added tests for pid-file deletion in DaemonMixIn This is a follow up on this PR: https://github.com/saltstack/salt/pull/43366 Since we can get an OSError durin PIDfile deletion with non-root users, it would make sense to also test for this. So here are the two test cases. One with an OSError and the other one without. --- tests/unit/utils/parsers_test.py | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tests/unit/utils/parsers_test.py b/tests/unit/utils/parsers_test.py index f6cdb2c9c02..6a7b674727a 100644 --- a/tests/unit/utils/parsers_test.py +++ b/tests/unit/utils/parsers_test.py @@ -21,6 +21,7 @@ import salt.utils.parsers import salt.log.setup as log import salt.config import salt.syspaths +from salt.utils.parsers import DaemonMixIn ensure_in_syspath('../../') @@ -803,6 +804,62 @@ class SaltRunOptionParserTestCase(LogSettingsParserTests): self.parser = salt.utils.parsers.SaltRunOptionParser +@skipIf(NO_MOCK, NO_MOCK_REASON) +class DaemonMixInTestCase(LogSettingsParserTests): + ''' + Tests parsing Salt Master options + ''' + def setUp(self): + ''' + Setting up + ''' + # Set defaults + self.default_config = salt.config.DEFAULT_MASTER_OPTS + + # Log file + self.log_file = '/tmp/salt_run_parser_test' + # Function to patch + self.config_func = 'salt.config.master_config' + + # Mock log setup + self.setup_log() + + # Assign parser + self.parser = salt.utils.parsers.SaltRunOptionParser + + # Set PID + self.pid = '/some/fake.pid' + + # Setup mixin + self.mixin = DaemonMixIn() + self.mixin.info = None + self.mixin.config = {} + self.mixin.config['pidfile'] = self.pid + + def test_pid_file_deletion(self): + ''' + PIDfile deletion without exception. + ''' + with patch('os.unlink', MagicMock()) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert self.mixin.info.call_count == 0 + assert os_unlink.call_count == 1 + + def test_pid_file_deletion_with_oserror(self): + ''' + PIDfile deletion with exception + ''' + with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert os_unlink.call_count == 1 + self.mixin.info.assert_called_with( + 'PIDfile could not be deleted: {}'.format(self.pid)) + + @skipIf(NO_MOCK, NO_MOCK_REASON) class SaltSSHOptionParserTestCase(LogSettingsParserTests): ''' @@ -944,4 +1001,5 @@ if __name__ == '__main__': SaltCloudParserTestCase, SPMParserTestCase, SaltAPIParserTestCase, + DaemonMixInTestCase, needs_daemon=False) From 08fba98735b7e32ebb7259a2e6ade34153969eee Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 20 Sep 2017 15:37:24 +0200 Subject: [PATCH 108/348] Fixed several issues with the test * Removed redundant import. * No longer inheriting from LogSettingsParserTests. * Replaced test class description with somethin meaninful. * Fixed identation. I've also moved the class to the bottom, since all the classes inheriting from LogSettingsParserTests are in the block above. --- tests/unit/utils/parsers_test.py | 99 ++++++++++++++------------------ 1 file changed, 42 insertions(+), 57 deletions(-) diff --git a/tests/unit/utils/parsers_test.py b/tests/unit/utils/parsers_test.py index 6a7b674727a..ab3abf86ba8 100644 --- a/tests/unit/utils/parsers_test.py +++ b/tests/unit/utils/parsers_test.py @@ -21,7 +21,6 @@ import salt.utils.parsers import salt.log.setup as log import salt.config import salt.syspaths -from salt.utils.parsers import DaemonMixIn ensure_in_syspath('../../') @@ -804,62 +803,6 @@ class SaltRunOptionParserTestCase(LogSettingsParserTests): self.parser = salt.utils.parsers.SaltRunOptionParser -@skipIf(NO_MOCK, NO_MOCK_REASON) -class DaemonMixInTestCase(LogSettingsParserTests): - ''' - Tests parsing Salt Master options - ''' - def setUp(self): - ''' - Setting up - ''' - # Set defaults - self.default_config = salt.config.DEFAULT_MASTER_OPTS - - # Log file - self.log_file = '/tmp/salt_run_parser_test' - # Function to patch - self.config_func = 'salt.config.master_config' - - # Mock log setup - self.setup_log() - - # Assign parser - self.parser = salt.utils.parsers.SaltRunOptionParser - - # Set PID - self.pid = '/some/fake.pid' - - # Setup mixin - self.mixin = DaemonMixIn() - self.mixin.info = None - self.mixin.config = {} - self.mixin.config['pidfile'] = self.pid - - def test_pid_file_deletion(self): - ''' - PIDfile deletion without exception. - ''' - with patch('os.unlink', MagicMock()) as os_unlink: - with patch('os.path.isfile', MagicMock(return_value=True)): - with patch.object(self.mixin, 'info', MagicMock()): - self.mixin._mixin_before_exit() - assert self.mixin.info.call_count == 0 - assert os_unlink.call_count == 1 - - def test_pid_file_deletion_with_oserror(self): - ''' - PIDfile deletion with exception - ''' - with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink: - with patch('os.path.isfile', MagicMock(return_value=True)): - with patch.object(self.mixin, 'info', MagicMock()): - self.mixin._mixin_before_exit() - assert os_unlink.call_count == 1 - self.mixin.info.assert_called_with( - 'PIDfile could not be deleted: {}'.format(self.pid)) - - @skipIf(NO_MOCK, NO_MOCK_REASON) class SaltSSHOptionParserTestCase(LogSettingsParserTests): ''' @@ -983,6 +926,48 @@ class SaltAPIParserTestCase(LogSettingsParserTests): self.parser = salt.utils.parsers.SaltAPIParser +@skipIf(NO_MOCK, NO_MOCK_REASON) +class DaemonMixInTestCase(TestCase): + ''' + Tests the PIDfile deletion in the DaemonMixIn. + ''' + + def setUp(self): + ''' + Setting up + ''' + # Set PID + self.pid = '/some/fake.pid' + + # Setup mixin + self.mixin = salt.utils.parsers.DaemonMixIn() + self.mixin.info = None + self.mixin.config = {} + self.mixin.config['pidfile'] = self.pid + + def test_pid_file_deletion(self): + ''' + PIDfile deletion without exception. + ''' + with patch('os.unlink', MagicMock()) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert self.mixin.info.call_count == 0 + assert os_unlink.call_count == 1 + + def test_pid_file_deletion_with_oserror(self): + ''' + PIDfile deletion with exception + ''' + with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink: + with patch('os.path.isfile', MagicMock(return_value=True)): + with patch.object(self.mixin, 'info', MagicMock()): + self.mixin._mixin_before_exit() + assert os_unlink.call_count == 1 + self.mixin.info.assert_called_with( + 'PIDfile could not be deleted: {}'.format(self.pid)) + # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests From 96f39a420b974f8658de182d3af72b2a7e9f8b9b Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 20 Sep 2017 16:35:01 +0200 Subject: [PATCH 109/348] Fixed linting Fix for "String format call with un-indexed curly braces". --- tests/unit/utils/parsers_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/utils/parsers_test.py b/tests/unit/utils/parsers_test.py index ab3abf86ba8..254daa7e8c4 100644 --- a/tests/unit/utils/parsers_test.py +++ b/tests/unit/utils/parsers_test.py @@ -966,7 +966,7 @@ class DaemonMixInTestCase(TestCase): self.mixin._mixin_before_exit() assert os_unlink.call_count == 1 self.mixin.info.assert_called_with( - 'PIDfile could not be deleted: {}'.format(self.pid)) + 'PIDfile could not be deleted: {0}'.format(self.pid)) # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests From 54842b501272c1730f7be9bec2bb1d5ce7187933 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 20 Sep 2017 16:58:17 -0400 Subject: [PATCH 110/348] Handle VPC/Subnet ID not found errors in boto_vpc module If a VPC or Subnet ID is not found when calling functions that are supposed to be checking for vpc/subnet ID existence, the return should be consistent by returning booleans/None instead of returning the NotFound error from AWS. The surrounding code blocks indicate that this is expected as well as unit test assertions. The moto library had a bug in it where it wasn't raising "x.NotFound" errors when it should have been. The latest version of moto has fixed this bug, causing our tests to fail since the boto_vpc module is not handling the "x.NotFound" errors separately from the generic BotoServerErrors. This fixes the test failures in the branch tests that were caused by upgrading the moto version to the latest release. --- salt/modules/boto_vpc.py | 102 ++++++++++++++++++++++----------------- 1 file changed, 59 insertions(+), 43 deletions(-) diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index f18ae2d68a9..bebaafdd578 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -598,9 +598,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None, try: vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags, region=region, key=key, keyid=keyid, profile=profile) - return {'exists': bool(vpc_ids)} - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound': + # VPC was not found: handle the error and return False. + return {'exists': False} + return {'error': boto_err} + + return {'exists': bool(vpc_ids)} def create(cidr_block, instance_tenancy=None, vpc_name=None, @@ -722,27 +727,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None, try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile) - if not vpc_id: + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound': + # VPC was not found: handle the error and return None. return {'vpc': None} + return {'error': boto_err} - filter_parameters = {'vpc_ids': vpc_id} + if not vpc_id: + return {'vpc': None} + filter_parameters = {'vpc_ids': vpc_id} + + try: vpcs = conn.get_all_vpcs(**filter_parameters) + except BotoServerError as err: + return {'error': salt.utils.boto.get_error(err)} - if vpcs: - vpc = vpcs[0] # Found! - log.debug('Found VPC: {0}'.format(vpc.id)) + if vpcs: + vpc = vpcs[0] # Found! + log.debug('Found VPC: {0}'.format(vpc.id)) - keys = ('id', 'cidr_block', 'is_default', 'state', 'tags', - 'dhcp_options_id', 'instance_tenancy') - _r = dict([(k, getattr(vpc, k)) for k in keys]) - _r.update({'region': getattr(vpc, 'region').name}) - return {'vpc': _r} - else: - return {'vpc': None} - - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + keys = ('id', 'cidr_block', 'is_default', 'state', 'tags', + 'dhcp_options_id', 'instance_tenancy') + _r = dict([(k, getattr(vpc, k)) for k in keys]) + _r.update({'region': getattr(vpc, 'region').name}) + return {'vpc': _r} + else: + return {'vpc': None} def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None, @@ -808,7 +820,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None Given subnet properties, find and return matching subnet ids ''' - if not any(subnet_name, tags, cidr): + if not any([subnet_name, tags, cidr]): raise SaltInvocationError('At least one of the following must be ' 'specified: subnet_name, cidr or tags.') @@ -926,34 +938,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None, try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - filter_parameters = {'filters': {}} + except BotoServerError as err: + return {'error': salt.utils.boto.get_error(err)} - if subnet_id: - filter_parameters['subnet_ids'] = [subnet_id] - - if subnet_name: - filter_parameters['filters']['tag:Name'] = subnet_name - - if cidr: - filter_parameters['filters']['cidr'] = cidr - - if tags: - for tag_name, tag_value in six.iteritems(tags): - filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value - - if zones: - filter_parameters['filters']['availability_zone'] = zones + filter_parameters = {'filters': {}} + if subnet_id: + filter_parameters['subnet_ids'] = [subnet_id] + if subnet_name: + filter_parameters['filters']['tag:Name'] = subnet_name + if cidr: + filter_parameters['filters']['cidr'] = cidr + if tags: + for tag_name, tag_value in six.iteritems(tags): + filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value + if zones: + filter_parameters['filters']['availability_zone'] = zones + try: subnets = conn.get_all_subnets(**filter_parameters) - log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets)) - if subnets: - log.info('Subnet {0} exists.'.format(subnet_name or subnet_id)) - return {'exists': True} - else: - log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id)) + except BotoServerError as err: + boto_err = salt.utils.boto.get_error(err) + if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound': + # Subnet was not found: handle the error and return False. return {'exists': False} - except BotoServerError as e: - return {'error': salt.utils.boto.get_error(e)} + return {'error': boto_err} + + log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets)) + if subnets: + log.info('Subnet {0} exists.'.format(subnet_name or subnet_id)) + return {'exists': True} + else: + log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id)) + return {'exists': False} def get_subnet_association(subnets, region=None, key=None, keyid=None, From 625eabb83f4ec7bd1e2ef529ba36d4d809d069ab Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Wed, 20 Sep 2017 14:32:47 +0200 Subject: [PATCH 111/348] multiprocessing minion option: documentation fixes --- doc/man/salt.7 | 1 + doc/ref/configuration/minion.rst | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/man/salt.7 b/doc/man/salt.7 index 7d4f5c2ed50..7bc0ab64d36 100644 --- a/doc/man/salt.7 +++ b/doc/man/salt.7 @@ -10795,6 +10795,7 @@ cmd_whitelist_glob: .UNINDENT .UNINDENT .SS Thread Settings +.SS \fBmultiprocessing\fP .sp Default: \fBTrue\fP .sp diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index ded0b726992..31317a06fc6 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2199,11 +2199,14 @@ Thread Settings .. conf_minion:: multiprocessing +``multiprocessing`` +------- + Default: ``True`` -If `multiprocessing` is enabled when a minion receives a +If ``multiprocessing`` is enabled when a minion receives a publication a new process is spawned and the command is executed therein. -Conversely, if `multiprocessing` is disabled the new publication will be run +Conversely, if ``multiprocessing`` is disabled the new publication will be run executed in a thread. From 039d2369487bdfa7963909a7b11fa627070ec8d8 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 21 Sep 2017 16:44:53 +0300 Subject: [PATCH 112/348] Fixed `list` and `contains` redis cache logic. Wrong keys was used to retrieve the data. --- salt/cache/redis_cache.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py index b02a0851e5c..0f52dfd6ad2 100644 --- a/salt/cache/redis_cache.py +++ b/salt/cache/redis_cache.py @@ -421,18 +421,17 @@ def list_(bank): Lists entries stored in the specified bank. ''' redis_server = _get_redis_server() - bank_keys_redis_key = _get_bank_keys_redis_key(bank) - bank_keys = None + bank_redis_key = _get_bank_redis_key(bank) try: - bank_keys = redis_server.smembers(bank_keys_redis_key) + banks = redis_server.smembers(bank_redis_key) except (RedisConnectionError, RedisResponseError) as rerr: - mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key, + mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key, rerr=rerr) log.error(mesg) raise SaltCacheError(mesg) - if not bank_keys: + if not banks: return [] - return list(bank_keys) + return list(banks) def contains(bank, key): @@ -440,15 +439,14 @@ def contains(bank, key): Checks if the specified bank contains the specified key. ''' redis_server = _get_redis_server() - bank_keys_redis_key = _get_bank_keys_redis_key(bank) - bank_keys = None + bank_redis_key = _get_bank_redis_key(bank) try: - bank_keys = redis_server.smembers(bank_keys_redis_key) + banks = redis_server.smembers(bank_redis_key) except (RedisConnectionError, RedisResponseError) as rerr: - mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key, + mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key, rerr=rerr) log.error(mesg) raise SaltCacheError(mesg) - if not bank_keys: + if not banks: return False - return key in bank_keys + return key in banks From 3fb42bc238a7bf2e379686cfbf33db06884ffa58 Mon Sep 17 00:00:00 2001 From: matt Date: Fri, 8 Sep 2017 17:10:07 +0200 Subject: [PATCH 113/348] Fix env_order in state.py Fixes #42165 --- salt/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/state.py b/salt/state.py index 729e740f5fb..bb6f71b1499 100644 --- a/salt/state.py +++ b/salt/state.py @@ -2906,7 +2906,7 @@ class BaseHighState(object): Returns: {'saltenv': ['state1', 'state2', ...]} ''' - matches = {} + matches = DefaultOrderedDict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, body in six.iteritems(top): if self.opts['environment']: From d91c47c6f0422ec4c0e3d851a2b2275cb59d506b Mon Sep 17 00:00:00 2001 From: Raymond Piller Date: Wed, 20 Sep 2017 21:41:08 -0500 Subject: [PATCH 114/348] Salt Repo has Deb 9 and 8 --- doc/topics/installation/debian.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/installation/debian.rst b/doc/topics/installation/debian.rst index 36a47fa8ffe..369991ebaa4 100644 --- a/doc/topics/installation/debian.rst +++ b/doc/topics/installation/debian.rst @@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described Installation from the Official SaltStack Repository =================================================== -Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the +Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the Official SaltStack repository. Instructions are at https://repo.saltstack.com/#debian. From 2fd88e94fabbd9d1110689772fcdb6a62a2b168d Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 10:11:28 -0500 Subject: [PATCH 115/348] Fix RST headers for runners (2016.11 branch) To conform with the rest of the rst files for runner docs, they should only contain the module name. --- doc/ref/runners/all/salt.runners.auth.rst | 4 ++-- doc/ref/runners/all/salt.runners.event.rst | 4 ++-- doc/ref/runners/all/salt.runners.smartos_vmadm.rst | 4 ++-- doc/ref/runners/all/salt.runners.vistara.rst | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/ref/runners/all/salt.runners.auth.rst b/doc/ref/runners/all/salt.runners.auth.rst index b82907d4d11..a3d933f2e43 100644 --- a/doc/ref/runners/all/salt.runners.auth.rst +++ b/doc/ref/runners/all/salt.runners.auth.rst @@ -1,5 +1,5 @@ -salt.runners.auth module -======================== +salt.runners.auth +================= .. automodule:: salt.runners.auth :members: diff --git a/doc/ref/runners/all/salt.runners.event.rst b/doc/ref/runners/all/salt.runners.event.rst index 9b07aa9988b..c2d505a1f2b 100644 --- a/doc/ref/runners/all/salt.runners.event.rst +++ b/doc/ref/runners/all/salt.runners.event.rst @@ -1,5 +1,5 @@ -salt.runners.event module -========================= +salt.runners.event +================== .. automodule:: salt.runners.event :members: diff --git a/doc/ref/runners/all/salt.runners.smartos_vmadm.rst b/doc/ref/runners/all/salt.runners.smartos_vmadm.rst index 5ee3d03eb1d..7b5a7c4834e 100644 --- a/doc/ref/runners/all/salt.runners.smartos_vmadm.rst +++ b/doc/ref/runners/all/salt.runners.smartos_vmadm.rst @@ -1,5 +1,5 @@ -salt.runners.smartos_vmadm module -================================= +salt.runners.smartos_vmadm +========================== .. automodule:: salt.runners.smartos_vmadm :members: diff --git a/doc/ref/runners/all/salt.runners.vistara.rst b/doc/ref/runners/all/salt.runners.vistara.rst index a66b06f6d2e..0f1400f4c7b 100644 --- a/doc/ref/runners/all/salt.runners.vistara.rst +++ b/doc/ref/runners/all/salt.runners.vistara.rst @@ -1,5 +1,5 @@ -salt.runners.vistara module -=========================== +salt.runners.vistara +==================== .. automodule:: salt.runners.vistara :members: From c0a79c70a447271fef957f735f53405b01d1b720 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 10:27:12 -0500 Subject: [PATCH 116/348] Fix RST headers for runners (2017.7 branch) To conform with the rest of the rst files for runner docs, they should only contain the module name. --- doc/ref/runners/all/salt.runners.digicertapi.rst | 4 ++-- doc/ref/runners/all/salt.runners.mattermost.rst | 4 ++-- doc/ref/runners/all/salt.runners.vault.rst | 4 ++-- doc/ref/runners/all/salt.runners.venafiapi.rst | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/ref/runners/all/salt.runners.digicertapi.rst b/doc/ref/runners/all/salt.runners.digicertapi.rst index 10919c8a91e..280fc059faf 100644 --- a/doc/ref/runners/all/salt.runners.digicertapi.rst +++ b/doc/ref/runners/all/salt.runners.digicertapi.rst @@ -1,5 +1,5 @@ -salt.runners.digicertapi module -=============================== +salt.runners.digicertapi +======================== .. automodule:: salt.runners.digicertapi :members: diff --git a/doc/ref/runners/all/salt.runners.mattermost.rst b/doc/ref/runners/all/salt.runners.mattermost.rst index 7fa1e2f3d44..c33a9f459b5 100644 --- a/doc/ref/runners/all/salt.runners.mattermost.rst +++ b/doc/ref/runners/all/salt.runners.mattermost.rst @@ -1,5 +1,5 @@ -salt.runners.mattermost module -============================== +salt.runners.mattermost +======================= **Note for 2017.7 releases!** diff --git a/doc/ref/runners/all/salt.runners.vault.rst b/doc/ref/runners/all/salt.runners.vault.rst index 7c424f24ee3..434774b0dd2 100644 --- a/doc/ref/runners/all/salt.runners.vault.rst +++ b/doc/ref/runners/all/salt.runners.vault.rst @@ -1,5 +1,5 @@ -salt.runners.vault module -========================= +salt.runners.vault +================== .. automodule:: salt.runners.vault :members: diff --git a/doc/ref/runners/all/salt.runners.venafiapi.rst b/doc/ref/runners/all/salt.runners.venafiapi.rst index 9fd9c41de45..d7e4d545eb6 100644 --- a/doc/ref/runners/all/salt.runners.venafiapi.rst +++ b/doc/ref/runners/all/salt.runners.venafiapi.rst @@ -1,5 +1,5 @@ -salt.runners.venafiapi module -============================= +salt.runners.venafiapi +====================== .. automodule:: salt.runners.venafiapi :members: From 6fcb7e7739cc88d7684857b7a1939af62b9debdf Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Thu, 21 Sep 2017 17:43:05 +0200 Subject: [PATCH 117/348] Minor bugfixes - do not try to add devices if assemble failed - sort uuid list in error message --- salt/states/mdadm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/states/mdadm.py b/salt/states/mdadm.py index 4588d859fab..64cae4a6d6f 100644 --- a/salt/states/mdadm.py +++ b/salt/states/mdadm.py @@ -107,7 +107,8 @@ def present(name, new_devices.append(dev) if len(uuid_dict) > 1: - ret['comment'] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.'.format(uuid_dict.keys()) + ret['comment'] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.'.format( + sorted(uuid_dict.keys())) ret['result'] = False return ret elif len(uuid_dict) == 1: @@ -192,7 +193,7 @@ def present(name, else: ret['comment'] = 'Raid {0} already present.'.format(name) - if (do_assemble or present) and len(new_devices) > 0: + if (do_assemble or present) and len(new_devices) > 0 and ret['result']: for d in new_devices: res = __salt__['raid.add'](name, d) if not res: From 1c006db2a687ffbab858e73b762adc20e355dda9 Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Thu, 21 Sep 2017 17:46:15 +0200 Subject: [PATCH 118/348] Fix and extend mdadm unit tests --- tests/unit/states/test_mdadm.py | 138 ++++++++++++++++++++++++++------ 1 file changed, 115 insertions(+), 23 deletions(-) diff --git a/tests/unit/states/test_mdadm.py b/tests/unit/states/test_mdadm.py index 9095a1926de..3e0cbdf7ea5 100644 --- a/tests/unit/states/test_mdadm.py +++ b/tests/unit/states/test_mdadm.py @@ -32,41 +32,133 @@ class MdadmTestCase(TestCase, LoaderModuleMockMixin): ''' Test to verify that the raid is present ''' - ret = [{'changes': {}, 'comment': 'Raid salt already present', + ret = [{'changes': {}, 'comment': 'Raid salt already present.', 'name': 'salt', 'result': True}, {'changes': {}, - 'comment': "Devices are a mix of RAID constituents" - " (['dev0']) and non-RAID-constituents(['dev1']).", + 'comment': "Devices are a mix of RAID constituents with multiple MD_UUIDs:" + " ['6be5fc45:05802bba:1c2d6722:666f0e03', 'ffffffff:ffffffff:ffffffff:ffffffff'].", 'name': 'salt', 'result': False}, {'changes': {}, 'comment': 'Raid will be created with: True', 'name': 'salt', 'result': None}, {'changes': {}, 'comment': 'Raid salt failed to be created.', + 'name': 'salt', 'result': False}, + {'changes': {'uuid': '6be5fc45:05802bba:1c2d6722:666f0e03'}, 'comment': 'Raid salt created.', + 'name': 'salt', 'result': True}, + {'changes': {'added': ['dev1'], 'uuid': '6be5fc45:05802bba:1c2d6722:666f0e03'}, + 'comment': 'Raid salt assembled. Added new device dev1 to salt.\n', + 'name': 'salt', 'result': True}, + {'changes': {'added': ['dev1']}, + 'comment': 'Raid salt already present. Added new device dev1 to salt.\n', + 'name': 'salt', 'result': True}, + {'changes': {}, 'comment': 'Raid salt failed to be assembled.', 'name': 'salt', 'result': False}] - mock = MagicMock(side_effect=[{'salt': True}, {'salt': False}, - {'salt': False}, {'salt': False}, - {'salt': False}]) - with patch.dict(mdadm.__salt__, {'raid.list': mock}): - self.assertEqual(mdadm.present("salt", 5, "dev0"), ret[0]) + mock_raid_list_exists = MagicMock(return_value={'salt': {'uuid': '6be5fc45:05802bba:1c2d6722:666f0e03'}}) + mock_raid_list_missing = MagicMock(return_value={}) - mock = MagicMock(side_effect=[0, 1]) - with patch.dict(mdadm.__salt__, {'cmd.retcode': mock}): - self.assertDictEqual(mdadm.present("salt", 5, - ["dev0", "dev1"]), - ret[1]) + mock_file_access_ok = MagicMock(return_value=True) - mock = MagicMock(return_value=True) - with patch.dict(mdadm.__salt__, {'cmd.retcode': mock}): - with patch.dict(mdadm.__opts__, {'test': True}): - with patch.dict(mdadm.__salt__, {'raid.create': mock}): - self.assertDictEqual(mdadm.present("salt", 5, "dev0"), - ret[2]) + mock_raid_examine_ok = MagicMock(return_value={'MD_UUID': '6be5fc45:05802bba:1c2d6722:666f0e03'}) + mock_raid_examine_missing = MagicMock(return_value={}) - with patch.dict(mdadm.__opts__, {'test': False}): - with patch.dict(mdadm.__salt__, {'raid.create': mock}): - self.assertDictEqual(mdadm.present("salt", 5, "dev0"), - ret[3]) + mock_raid_create_success = MagicMock(return_value=True) + mock_raid_create_fail = MagicMock(return_value=False) + + mock_raid_assemble_success = MagicMock(return_value=True) + mock_raid_assemble_fail = MagicMock(return_value=False) + + mock_raid_add_success = MagicMock(return_value=True) + + mock_raid_save_config = MagicMock(return_value=True) + + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_exists, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_ok + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertEqual(mdadm.present("salt", 5, "dev0"), ret[0]) + + mock_raid_examine_mixed = MagicMock(side_effect=[ + {'MD_UUID': '6be5fc45:05802bba:1c2d6722:666f0e03'}, {'MD_UUID': 'ffffffff:ffffffff:ffffffff:ffffffff'}, + ]) + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_missing, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_mixed + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertEqual(mdadm.present("salt", 5, ["dev0", "dev1"]), ret[1]) + + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_missing, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_missing, + 'raid.create': mock_raid_create_success + }): + with patch.dict(mdadm.__opts__, {'test': True}): + self.assertDictEqual(mdadm.present("salt", 5, "dev0"), ret[2]) + + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_missing, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_missing, + 'raid.create': mock_raid_create_fail + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertDictEqual(mdadm.present("salt", 5, "dev0"), ret[3]) + + mock_raid_list_create = MagicMock(side_effect=[{}, {'salt': {'uuid': '6be5fc45:05802bba:1c2d6722:666f0e03'}}]) + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_create, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_missing, + 'raid.create': mock_raid_create_success, + 'raid.save_config': mock_raid_save_config + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertDictEqual(mdadm.present("salt", 5, "dev0"), ret[4]) + + mock_raid_examine_replaced = MagicMock(side_effect=[ + {'MD_UUID': '6be5fc45:05802bba:1c2d6722:666f0e03'}, {}, + ]) + mock_raid_list_create = MagicMock(side_effect=[{}, {'salt': {'uuid': '6be5fc45:05802bba:1c2d6722:666f0e03'}}]) + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_create, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_replaced, + 'raid.assemble': mock_raid_assemble_success, + 'raid.add': mock_raid_add_success, + 'raid.save_config': mock_raid_save_config + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertDictEqual(mdadm.present("salt", 5, ["dev0", "dev1"]), ret[5]) + + mock_raid_examine_replaced = MagicMock(side_effect=[ + {'MD_UUID': '6be5fc45:05802bba:1c2d6722:666f0e03'}, {}, + ]) + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_exists, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_replaced, + 'raid.add': mock_raid_add_success, + 'raid.save_config': mock_raid_save_config + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertDictEqual(mdadm.present("salt", 5, ["dev0", "dev1"]), ret[6]) + + mock_raid_examine_replaced = MagicMock(side_effect=[ + {'MD_UUID': '6be5fc45:05802bba:1c2d6722:666f0e03'}, {}, + ]) + with patch.dict(mdadm.__salt__, { + 'raid.list': mock_raid_list_missing, + 'file.access': mock_file_access_ok, + 'raid.examine': mock_raid_examine_replaced, + 'raid.assemble': mock_raid_assemble_fail, + }): + with patch.dict(mdadm.__opts__, {'test': False}): + self.assertDictEqual(mdadm.present("salt", 5, ["dev0", "dev1"]), ret[7]) def test_absent(self): ''' From 9b74634b23044315fed3898e6f9f005198ec1ea7 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 10:54:27 -0500 Subject: [PATCH 119/348] Fix badly-formatted RST in mattermost runner docstring --- salt/runners/mattermost.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/runners/mattermost.py b/salt/runners/mattermost.py index 2bd3d928c46..686c9602eff 100644 --- a/salt/runners/mattermost.py +++ b/salt/runners/mattermost.py @@ -6,9 +6,10 @@ Module for sending messages to Mattermost :configuration: This module can be used by either passing an api_url and hook directly or by specifying both in a configuration profile in the salt - master/minion config. - For example: + master/minion config. For example: + .. code-block:: yaml + mattermost: hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_url: https://example.com From 292f8c79b8694aaded47375ae867d152fcd9c545 Mon Sep 17 00:00:00 2001 From: vernoncole Date: Thu, 21 Sep 2017 10:26:00 -0600 Subject: [PATCH 120/348] correct default value for salt.cache.Cache --- salt/cache/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cache/__init__.py b/salt/cache/__init__.py index 94d7a36f1e5..fc5e5f09721 100644 --- a/salt/cache/__init__.py +++ b/salt/cache/__init__.py @@ -73,7 +73,7 @@ class Cache(object): self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir - self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS) + self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs From 84f34c93beee2cdf74cd1ee85d743d963bac4dba Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 11:55:40 -0500 Subject: [PATCH 121/348] Backport the non-fileclient changes from PR 43518 to 2017.7 This fixes the unnecessary re-downloading reported in #38971 in 2017.7 without using the new fileclient capabilities added in develop. It includes a helper function in the `file.cached` state that will need to be removed once we merge forward into develop. --- salt/modules/file.py | 30 +-- salt/states/archive.py | 234 ++++++++++++----------- salt/states/file.py | 416 +++++++++++++++++++++++++++++++++++++++-- salt/utils/files.py | 22 +++ 4 files changed, 548 insertions(+), 154 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index b8f1cdb00c6..21a60dda517 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -54,7 +54,8 @@ import salt.utils.files import salt.utils.locales import salt.utils.templates import salt.utils.url -from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message +from salt.exceptions import CommandExecutionError, SaltInvocationError, get_error_message as _get_error_message +from salt.utils.files import HASHES, HASHES_REVMAP log = logging.getLogger(__name__) @@ -62,16 +63,6 @@ __func_alias__ = { 'makedirs_': 'makedirs' } -HASHES = { - 'sha512': 128, - 'sha384': 96, - 'sha256': 64, - 'sha224': 56, - 'sha1': 40, - 'md5': 32, -} -HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) - def __virtual__(): ''' @@ -3627,14 +3618,8 @@ def source_list(source, source_hash, saltenv): ret = (single_src, single_hash) break elif proto.startswith('http') or proto == 'ftp': - try: - if __salt__['cp.cache_file'](single_src): - ret = (single_src, single_hash) - break - except MinionError as exc: - # Error downloading file. Log the caught exception and - # continue on to the next source. - log.exception(exc) + ret = (single_src, single_hash) + break elif proto == 'file' and os.path.exists(urlparsed_single_src.path): ret = (single_src, single_hash) break @@ -3654,9 +3639,8 @@ def source_list(source, source_hash, saltenv): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': - if __salt__['cp.cache_file'](single): - ret = (single, source_hash) - break + ret = (single, source_hash) + break elif single.startswith('/') and os.path.exists(single): ret = (single, source_hash) break @@ -4478,7 +4462,7 @@ def check_file_meta( ''' changes = {} if not source_sum: - source_sum = dict() + source_sum = {} lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False) if not lstats: changes['newfile'] = name diff --git a/salt/states/archive.py b/salt/states/archive.py index f053d3c2076..a992adb8b74 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -61,16 +61,30 @@ def _gen_checksum(path): 'hash_type': __opts__['hash_type']} -def _update_checksum(cached_source): - cached_source_sum = '.'.join((cached_source, 'hash')) - source_sum = _gen_checksum(cached_source) +def _checksum_file_path(path): + relpath = '.'.join((os.path.relpath(path, __opts__['cachedir']), 'hash')) + if re.match(r'..[/\\]', relpath): + # path is a local file + relpath = salt.utils.path_join( + 'local', + os.path.splitdrive(path)[-1].lstrip('/\\'), + ) + return salt.utils.path_join(__opts__['cachedir'], 'archive_hash', relpath) + + +def _update_checksum(path): + checksum_file = _checksum_file_path(path) + checksum_dir = os.path.dirname(checksum_file) + if not os.path.isdir(checksum_dir): + os.makedirs(checksum_dir) + source_sum = _gen_checksum(path) hash_type = source_sum.get('hash_type') hsum = source_sum.get('hsum') if hash_type and hsum: lines = [] try: try: - with salt.utils.fopen(cached_source_sum, 'r') as fp_: + with salt.utils.fopen(checksum_file, 'r') as fp_: for line in fp_: try: lines.append(line.rstrip('\n').split(':', 1)) @@ -80,7 +94,7 @@ def _update_checksum(cached_source): if exc.errno != errno.ENOENT: raise - with salt.utils.fopen(cached_source_sum, 'w') as fp_: + with salt.utils.fopen(checksum_file, 'w') as fp_: for line in lines: if line[0] == hash_type: line[1] = hsum @@ -90,16 +104,16 @@ def _update_checksum(cached_source): except (IOError, OSError) as exc: log.warning( 'Failed to update checksum for %s: %s', - cached_source, exc.__str__() + path, exc.__str__(), exc_info=True ) -def _read_cached_checksum(cached_source, form=None): +def _read_cached_checksum(path, form=None): if form is None: form = __opts__['hash_type'] - path = '.'.join((cached_source, 'hash')) + checksum_file = _checksum_file_path(path) try: - with salt.utils.fopen(path, 'r') as fp_: + with salt.utils.fopen(checksum_file, 'r') as fp_: for line in fp_: # Should only be one line in this file but just in case it # isn't, read only a single line to avoid overuse of memory. @@ -114,9 +128,9 @@ def _read_cached_checksum(cached_source, form=None): return {'hash_type': hash_type, 'hsum': hsum} -def _compare_checksum(cached_source, source_sum): +def _compare_checksum(cached, source_sum): cached_sum = _read_cached_checksum( - cached_source, + cached, form=source_sum.get('hash_type', __opts__['hash_type']) ) return source_sum == cached_sum @@ -152,7 +166,6 @@ def extracted(name, user=None, group=None, if_missing=None, - keep=False, trim_output=False, use_cmd_unzip=None, extract_perms=True, @@ -389,6 +402,22 @@ def extracted(name, .. versionadded:: 2016.3.4 + keep_source : True + For ``source`` archives not local to the minion (i.e. from the Salt + fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt + will need to download the archive to the minion cache before they can + be extracted. To remove the downloaded archive after extraction, set + this argument to ``False``. + + .. versionadded:: 2017.7.3 + + keep : True + Same as ``keep_source``, kept for backward-compatibility. + + .. note:: + If both ``keep_source`` and ``keep`` are used, ``keep`` will be + ignored. + password **For ZIP archives only.** Password used for extraction. @@ -527,13 +556,6 @@ def extracted(name, simply checked for existence and extraction will be skipped if if is present. - keep : False - For ``source`` archives not local to the minion (i.e. from the Salt - fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt - will need to download the archive to the minion cache before they can - be extracted. After extraction, these source archives will be removed - unless this argument is set to ``True``. - trim_output : False Useful for archives with many files in them. This can either be set to ``True`` (in which case only the first 100 files extracted will be @@ -635,6 +657,21 @@ def extracted(name, # Remove pub kwargs as they're irrelevant here. kwargs = salt.utils.clean_kwargs(**kwargs) + if 'keep_source' in kwargs and 'keep' in kwargs: + ret.setdefault('warnings', []).append( + 'Both \'keep_source\' and \'keep\' were used. Since these both ' + 'do the same thing, \'keep\' was ignored.' + ) + keep_source = bool(kwargs.pop('keep_source')) + kwargs.pop('keep') + elif 'keep_source' in kwargs: + keep_source = bool(kwargs.pop('keep_source')) + elif 'keep' in kwargs: + keep_source = bool(kwargs.pop('keep')) + else: + # Neither was passed, default is True + keep_source = True + if not _path_is_abs(name): ret['comment'] = '{0} is not an absolute path'.format(name) return ret @@ -730,10 +767,10 @@ def extracted(name, urlparsed_source = _urlparse(source_match) source_hash_basename = urlparsed_source.path or urlparsed_source.netloc - source_is_local = urlparsed_source.scheme in ('', 'file') + source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS if source_is_local: # Get rid of "file://" from start of source_match - source_match = urlparsed_source.path + source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path)) if not os.path.isfile(source_match): ret['comment'] = 'Source file \'{0}\' does not exist'.format(source_match) return ret @@ -882,95 +919,59 @@ def extracted(name, source_sum = {} if source_is_local: - cached_source = source_match + cached = source_match else: - cached_source = os.path.join( - __opts__['cachedir'], - 'files', - __env__, - re.sub(r'[:/\\]', '_', source_hash_basename), - ) - - if os.path.isdir(cached_source): - # Prevent a traceback from attempting to read from a directory path - salt.utils.rm_rf(cached_source) - - existing_cached_source_sum = _read_cached_checksum(cached_source) - - if source_is_local: - # No need to download archive, it's local to the minion - update_source = False - else: - if not os.path.isfile(cached_source): - # Archive not cached, we need to download it - update_source = True - else: - # Archive is cached, keep=True likely used in prior run. If we need - # to verify the hash, then we *have* to update the source archive - # to know whether or not the hash changed. Hence the below - # statement. bool(source_hash) will be True if source_hash was - # passed, and otherwise False. - update_source = bool(source_hash) - - if update_source: if __opts__['test']: ret['result'] = None ret['comment'] = ( - 'Archive {0} would be downloaded to cache and checked to ' - 'discover if extraction is necessary'.format( + 'Archive {0} would be ached (if necessary) and checked to ' + 'discover if extraction is needed'.format( salt.utils.url.redact_http_basic_auth(source_match) ) ) return ret - # NOTE: This will result in more than one copy of the source archive on - # the minion. The reason this is necessary is because if we are - # tracking the checksum using source_hash_update, we need a location - # where we can place the checksum file alongside the cached source - # file, where it won't be overwritten by caching a file with the same - # name in the same parent dir as the source file. Long term, we should - # come up with a better solution for this. - file_result = __states__['file.managed'](cached_source, - source=source_match, - source_hash=source_hash, - source_hash_name=source_hash_name, - makedirs=True, - skip_verify=skip_verify) - log.debug('file.managed: {0}'.format(file_result)) - - # Prevent a traceback if errors prevented the above state from getting - # off the ground. - if isinstance(file_result, list): - try: - ret['comment'] = '\n'.join(file_result) - except TypeError: - ret['comment'] = '\n'.join([str(x) for x in file_result]) + if 'file.cached' not in __states__: + # Shouldn't happen unless there is a traceback keeping + # salt/states/file.py from being processed through the loader. If + # that is the case, we have much more important problems as _all_ + # file states would be unavailable. + ret['comment'] = ( + 'Unable to cache {0}, file.cached state not available'.format( + source_match + ) + ) return ret try: - if not file_result['result']: - log.debug( - 'failed to download %s', - salt.utils.url.redact_http_basic_auth(source_match) - ) - return file_result - except TypeError: - if not file_result: - log.debug( - 'failed to download %s', - salt.utils.url.redact_http_basic_auth(source_match) - ) - return file_result + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + except Exception as exc: + msg = 'Failed to cache {0}: {1}'.format(source_match, exc.__str__()) + log.exception(msg) + ret['comment'] = msg + return ret + else: + log.debug('file.cached: {0}'.format(result)) - else: - log.debug( - 'Archive %s is already in cache', - salt.utils.url.redact_http_basic_auth(source_match) - ) + if result['result']: + # Get the path of the file in the minion cache + cached = __salt__['cp.is_cached'](source_match) + else: + log.debug( + 'failed to download %s', + salt.utils.url.redact_http_basic_auth(source_match) + ) + return result + + existing_cached_source_sum = _read_cached_checksum(cached) if source_hash and source_hash_update and not skip_verify: # Create local hash sum file if we're going to track sum update - _update_checksum(cached_source) + _update_checksum(cached) if archive_format == 'zip' and not password: log.debug('Checking %s to see if it is password-protected', @@ -979,7 +980,7 @@ def extracted(name, # implicitly enabled by setting the "options" argument. try: encrypted_zip = __salt__['archive.is_encrypted']( - cached_source, + cached, clean=False, saltenv=__env__) except CommandExecutionError: @@ -997,7 +998,7 @@ def extracted(name, return ret try: - contents = __salt__['archive.list'](cached_source, + contents = __salt__['archive.list'](cached, archive_format=archive_format, options=list_options, strip_components=strip_components, @@ -1166,7 +1167,7 @@ def extracted(name, if not extraction_needed \ and source_hash_update \ and existing_cached_source_sum is not None \ - and not _compare_checksum(cached_source, existing_cached_source_sum): + and not _compare_checksum(cached, existing_cached_source_sum): extraction_needed = True source_hash_trigger = True else: @@ -1224,13 +1225,13 @@ def extracted(name, __states__['file.directory'](name, user=user, makedirs=True) created_destdir = True - log.debug('Extracting {0} to {1}'.format(cached_source, name)) + log.debug('Extracting {0} to {1}'.format(cached, name)) try: if archive_format == 'zip': if use_cmd_unzip: try: files = __salt__['archive.cmd_unzip']( - cached_source, + cached, name, options=options, trim_output=trim_output, @@ -1240,7 +1241,7 @@ def extracted(name, ret['comment'] = exc.strerror return ret else: - files = __salt__['archive.unzip'](cached_source, + files = __salt__['archive.unzip'](cached, name, options=options, trim_output=trim_output, @@ -1248,7 +1249,7 @@ def extracted(name, **kwargs) elif archive_format == 'rar': try: - files = __salt__['archive.unrar'](cached_source, + files = __salt__['archive.unrar'](cached, name, trim_output=trim_output, **kwargs) @@ -1258,7 +1259,7 @@ def extracted(name, else: if options is None: try: - with closing(tarfile.open(cached_source, 'r')) as tar: + with closing(tarfile.open(cached, 'r')) as tar: tar.extractall(name) files = tar.getnames() if trim_output: @@ -1266,7 +1267,7 @@ def extracted(name, except tarfile.ReadError: if salt.utils.which('xz'): if __salt__['cmd.retcode']( - ['xz', '-t', cached_source], + ['xz', '-t', cached], python_shell=False, ignore_retcode=True) == 0: # XZ-compressed data @@ -1282,7 +1283,7 @@ def extracted(name, # pipe it to tar for extraction. cmd = 'xz --decompress --stdout {0} | tar xvf -' results = __salt__['cmd.run_all']( - cmd.format(_cmd_quote(cached_source)), + cmd.format(_cmd_quote(cached)), cwd=name, python_shell=True) if results['retcode'] != 0: @@ -1352,7 +1353,7 @@ def extracted(name, tar_cmd.append(tar_shortopts) tar_cmd.extend(tar_longopts) - tar_cmd.extend(['-f', cached_source]) + tar_cmd.extend(['-f', cached]) results = __salt__['cmd.run_all'](tar_cmd, cwd=name, @@ -1523,18 +1524,15 @@ def extracted(name, for item in enforce_failed: ret['comment'] += '\n- {0}'.format(item) - if not source_is_local and not keep: - for path in (cached_source, __salt__['cp.is_cached'](source_match)): - if not path: - continue - log.debug('Cleaning cached source file %s', path) - try: - os.remove(path) - except OSError as exc: - if exc.errno != errno.ENOENT: - log.error( - 'Failed to clean cached source file %s: %s', - cached_source, exc.__str__() - ) + if not source_is_local: + if keep_source: + log.debug('Keeping cached source file %s', cached) + else: + log.debug('Cleaning cached source file %s', cached) + result = __states__['file.not_cached'](source_match, saltenv=__env__) + if not result['result']: + # Don't let failure to delete cached file cause the state + # itself to fail, just drop it in the warnings. + ret.setdefault('warnings', []).append(result['comment']) return ret diff --git a/salt/states/file.py b/salt/states/file.py index 8d819980168..3a2de6047cc 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -294,6 +294,7 @@ if salt.utils.is_windows(): # Import 3rd-party libs import salt.ext.six as six from salt.ext.six.moves import zip_longest +from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module if salt.utils.is_windows(): import pywintypes import win32com.client @@ -1519,6 +1520,7 @@ def managed(name, source=None, source_hash='', source_hash_name=None, + keep_source=True, user=None, group=None, mode=None, @@ -1717,6 +1719,15 @@ def managed(name, .. versionadded:: 2016.3.5 + keep_source : True + Set to ``False`` to discard the cached copy of the source file once the + state completes. This can be useful for larger files to keep them from + taking up space in minion cache. However, keep in mind that discarding + the source file will result in the state needing to re-download the + source file if the state is run again. + + .. versionadded:: 2017.7.3 + user The user to own the file, this defaults to the user salt is running as on the minion @@ -2415,8 +2426,9 @@ def managed(name, except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) - if os.path.isfile(tmp_filename): - os.remove(tmp_filename) + salt.utils.files.remove(tmp_filename) + if not keep_source and sfn: + salt.utils.files.remove(sfn) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) # file being updated to verify using check_cmd @@ -2434,15 +2446,9 @@ def managed(name, cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) if isinstance(cret, dict): ret.update(cret) - if os.path.isfile(tmp_filename): - os.remove(tmp_filename) - if sfn and os.path.isfile(sfn): - os.remove(sfn) + salt.utils.files.remove(tmp_filename) return ret - if sfn and os.path.isfile(sfn): - os.remove(sfn) - # Since we generated a new tempfile and we are not returning here # lets change the original sfn to the new tempfile or else we will # get file not found @@ -2490,10 +2496,10 @@ def managed(name, log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) finally: - if tmp_filename and os.path.isfile(tmp_filename): - os.remove(tmp_filename) - if sfn and os.path.isfile(sfn): - os.remove(sfn) + if tmp_filename: + salt.utils.files.remove(tmp_filename) + if not keep_source and sfn: + salt.utils.files.remove(sfn) _RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs'] @@ -3022,6 +3028,7 @@ def directory(name, def recurse(name, source, + keep_source=True, clean=False, require=None, user=None, @@ -3053,6 +3060,15 @@ def recurse(name, located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs + keep_source : True + Set to ``False`` to discard the cached copy of the source file once the + state completes. This can be useful for larger files to keep them from + taking up space in minion cache. However, keep in mind that discarding + the source file will result in the state needing to re-download the + source file if the state is run again. + + .. versionadded:: 2017.7.3 + clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this @@ -3333,6 +3349,7 @@ def recurse(name, _ret = managed( path, source=source, + keep_source=keep_source, user=user, group=group, mode='keep' if keep_mode else file_mode, @@ -6423,3 +6440,376 @@ def shortcut( ret['comment'] += (', but was unable to set ownership to ' '{0}'.format(user)) return ret + + +def cached(name, + source_hash='', + source_hash_name=None, + skip_verify=False, + saltenv='base'): + ''' + .. versionadded:: 2017.7.3 + + Ensures that a file is saved to the minion's cache. This state is primarily + invoked by other states to ensure that we do not re-download a source file + if we do not need to. + + name + The URL of the file to be cached. To cache a file from an environment + other than ``base``, either use the ``saltenv`` argument or include the + saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``). + + .. note:: + A list of URLs is not supported, this must be a single URL. If a + local file is passed here, then the state will obviously not try to + download anything, but it will compare a hash if one is specified. + + source_hash + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + .. note:: + For remote files not originating from the ``salt://`` fileserver, + such as http(s) or ftp servers, this state will not re-download the + file if the locally-cached copy matches this hash. This is done to + prevent unnecessary downloading on repeated runs of this state. To + update the cached copy of a file, it is necessary to update this + hash. + + source_hash_name + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + skip_verify + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + .. note:: + Setting this to ``True`` will result in a copy of the file being + downloaded from a remote (http(s), ftp, etc.) source each time the + state is run. + + saltenv + Used to specify the environment from which to download a file from the + Salt fileserver (i.e. those with ``salt://`` URL). + + + This state will in most cases not be useful in SLS files, but it is useful + when writing a state or remote-execution module that needs to make sure + that a file at a given URL has been downloaded to the cachedir. One example + of this is in the :py:func:`archive.extracted ` + state: + + .. code-block:: python + + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + + This will return a dictionary containing the state's return data, including + a ``result`` key which will state whether or not the state was successful. + Note that this will not catch exceptions, so it is best used within a + try/except. + + Once this state has been run from within another state or remote-execution + module, the actual location of the cached file can be obtained using + :py:func:`cp.is_cached `: + + .. code-block:: python + + cached = __salt__['cp.is_cached'](source_match) + + This function will return the cached path of the file, or an empty string + if the file is not present in the minion cache. + + This state will in most cases not be useful in SLS files, but it is useful + when writing a state or remote-execution module that needs to make sure + that a file at a given URL has been downloaded to the cachedir. One example + of this is in the :py:func:`archive.extracted ` + state: + + .. code-block:: python + + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + + This will return a dictionary containing the state's return data, including + a ``result`` key which will state whether or not the state was successful. + Note that this will not catch exceptions, so it is best used within a + try/except. + + Once this state has been run from within another state or remote-execution + module, the actual location of the cached file can be obtained using + :py:func:`cp.is_cached `: + + .. code-block:: python + + cached = __salt__['cp.is_cached'](source_match) + + This function will return the cached path of the file, or an empty string + if the file is not present in the minion cache. + ''' + ret = {'changes': {}, + 'comment': '', + 'name': name, + 'result': False} + + try: + parsed = _urlparse(name) + except Exception: + ret['comment'] = 'Only URLs or local file paths are valid input' + return ret + + # This if statement will keep the state from proceeding if a remote source + # is specified and no source_hash is presented (unless we're skipping hash + # verification). + if not skip_verify \ + and not source_hash \ + and parsed.scheme in salt.utils.files.REMOTE_PROTOS: + ret['comment'] = ( + 'Unable to verify upstream hash of source file {0}, please set ' + 'source_hash or set skip_verify to True'.format(name) + ) + return ret + + if source_hash: + # Get the hash and hash type from the input. This takes care of parsing + # the hash out of a file containing checksums, if that is how the + # source_hash was specified. + try: + source_sum = __salt__['file.get_source_sum']( + source=name, + source_hash=source_hash, + source_hash_name=source_hash_name, + saltenv=saltenv) + except CommandExecutionError as exc: + ret['comment'] = exc.strerror + return ret + else: + if not source_sum: + # We shouldn't get here, problems in retrieving the hash in + # file.get_source_sum should result in a CommandExecutionError + # being raised, which we catch above. Nevertheless, we should + # provide useful information in the event that + # file.get_source_sum regresses. + ret['comment'] = ( + 'Failed to get source hash from {0}. This may be a bug. ' + 'If this error persists, please report it and set ' + 'skip_verify to True to work around it.'.format(source_hash) + ) + return ret + else: + source_sum = {} + + if parsed.scheme in salt.utils.files.LOCAL_PROTOS: + # Source is a local file path + full_path = os.path.realpath(os.path.expanduser(parsed.path)) + if os.path.exists(full_path): + if not skip_verify and source_sum: + # Enforce the hash + local_hash = __salt__['file.get_hash']( + full_path, + source_sum.get('hash_type', __opts__['hash_type'])) + if local_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File {0} is present on the minion and has hash ' + '{1}'.format(full_path, local_hash) + ) + else: + ret['comment'] = ( + 'File {0} is present on the minion, but the hash ({1}) ' + 'does not match the specified hash ({2})'.format( + full_path, local_hash, source_sum['hsum'] + ) + ) + return ret + else: + ret['result'] = True + ret['comment'] = 'File {0} is present on the minion'.format( + full_path + ) + return ret + else: + ret['comment'] = 'File {0} is not present on the minion'.format( + full_path + ) + return ret + + local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv) + + if local_copy: + # File is already cached + pre_hash = __salt__['file.get_hash']( + local_copy, + source_sum.get('hash_type', __opts__['hash_type'])) + + if not skip_verify and source_sum: + # Get the local copy's hash to compare with the hash that was + # specified via source_hash. If it matches, we can exit early from + # the state without going any further, because the file is cached + # with the correct hash. + if pre_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File is already cached to {0} with hash {1}'.format( + local_copy, pre_hash + ) + ) + else: + pre_hash = None + + def _try_cache(path, checksum): + ''' + This helper is not needed anymore in develop as the fileclient in the + develop branch now has means of skipping a download if the existing + hash matches one passed to cp.cache_file. Remove this helper and the + code that invokes it, once we have merged forward into develop. + ''' + if not path or not checksum: + return True + form = salt.utils.files.HASHES_REVMAP.get(len(checksum)) + if form is None: + # Shouldn't happen, an invalid checksum length should be caught + # before we get here. But in the event this gets through, don't let + # it cause any trouble, and just return True. + return True + try: + return salt.utils.get_hash(path, form=form) != checksum + except (IOError, OSError, ValueError): + # Again, shouldn't happen, but don't let invalid input/permissions + # in the call to get_hash blow this up. + return True + + # Cache the file. Note that this will not actually download the file if + # either of the following is true: + # 1. source is a salt:// URL and the fileserver determines that the hash + # of the minion's copy matches that of the fileserver. + # 2. File is remote (http(s), ftp, etc.) and the specified source_hash + # matches the cached copy. + # Remote, non salt:// sources _will_ download if a copy of the file was + # not already present in the minion cache. + if _try_cache(local_copy, source_sum.get('hsum')): + # The _try_cache helper is obsolete in the develop branch. Once merged + # forward, remove the helper as well as this if statement, and dedent + # the below block. + try: + local_copy = __salt__['cp.cache_file']( + name, + saltenv=saltenv) + # Once this is merged into develop, uncomment the source_hash + # line below and add it to the list of arguments to + # cp.cache_file (note that this also means removing the + # close-parenthesis above and replacing it with a comma). The + # develop branch has modifications to the fileclient which will + # allow it to skip the download if the source_hash matches what + # is passed to cp.cache_file, so the helper is just a stopgap + # for the 2017.7 release cycle. + #source_hash=source_sum.get('hsum')) + except Exception as exc: + ret['comment'] = exc.__str__() + return ret + + if not local_copy: + ret['comment'] = ( + 'Failed to cache {0}, check minion log for more ' + 'information'.format(name) + ) + return ret + + post_hash = __salt__['file.get_hash']( + local_copy, + source_sum.get('hash_type', __opts__['hash_type'])) + + if pre_hash != post_hash: + ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash} + + # Check the hash, if we're enforcing one. Note that this will be the first + # hash check if the file was not previously cached, and the 2nd hash check + # if it was cached and the + if not skip_verify and source_sum: + if post_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File is already cached to {0} with hash {1}'.format( + local_copy, post_hash + ) + ) + else: + ret['comment'] = ( + 'File is cached to {0}, but the hash ({1}) does not match ' + 'the specified hash ({2})'.format( + local_copy, post_hash, source_sum['hsum'] + ) + ) + return ret + + # We're not enforcing a hash, and we already know that the file was + # successfully cached, so we know the state was successful. + ret['result'] = True + ret['comment'] = 'File is cached to {0}'.format(local_copy) + return ret + + +def not_cached(name, saltenv='base'): + ''' + Ensures that a file is saved to the minion's cache. This state is primarily + invoked by other states to ensure that we do not re-download a source file + if we do not need to. + + name + The URL of the file to be cached. To cache a file from an environment + other than ``base``, either use the ``saltenv`` argument or include the + saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``). + + .. note:: + A list of URLs is not supported, this must be a single URL. If a + local file is passed here, the state will take no action. + + saltenv + Used to specify the environment from which to download a file from the + Salt fileserver (i.e. those with ``salt://`` URL). + ''' + ret = {'changes': {}, + 'comment': '', + 'name': name, + 'result': False} + + try: + parsed = _urlparse(name) + except Exception: + ret['comment'] = 'Only URLs or local file paths are valid input' + return ret + else: + if parsed.scheme in salt.utils.files.LOCAL_PROTOS: + full_path = os.path.realpath(os.path.expanduser(parsed.path)) + ret['result'] = True + ret['comment'] = ( + 'File {0} is a local path, no action taken'.format( + full_path + ) + ) + return ret + + local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv) + + if local_copy: + try: + os.remove(local_copy) + except Exception as exc: + ret['comment'] = 'Failed to delete {0}: {1}'.format( + local_copy, exc.__str__() + ) + else: + ret['result'] = True + ret['changes']['deleted'] = True + ret['comment'] = '{0} was deleted'.format(local_copy) + else: + ret['result'] = True + ret['comment'] = '{0} is not cached'.format(name) + return ret diff --git a/salt/utils/files.py b/salt/utils/files.py index 8d463756d9b..605e9710d88 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -23,10 +23,21 @@ from salt.ext import six log = logging.getLogger(__name__) +LOCAL_PROTOS = ('', 'file') REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3') VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS TEMPFILE_PREFIX = '__salt.tmp.' +HASHES = { + 'sha512': 128, + 'sha384': 96, + 'sha256': 64, + 'sha224': 56, + 'sha1': 40, + 'md5': 32, +} +HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) + def guess_archive_type(name): ''' @@ -296,3 +307,14 @@ def safe_filepath(file_path_name): return os.sep.join([drive, path]) else: return path + + +def remove(path): + ''' + Runs os.remove(path) and suppresses the OSError if the file doesn't exist + ''' + try: + os.remove(path) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise From b1e64b11fbcaf9dedb6d4cb6ee7a06801cbba877 Mon Sep 17 00:00:00 2001 From: Michal Kurtak Date: Thu, 21 Sep 2017 21:51:22 +0200 Subject: [PATCH 122/348] yumpkg.py: install calls list_repo_pkgs only if wildcard in pkg name is used Fixes #43396 --- salt/modules/yumpkg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 8a15867a8cc..51e855e6365 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -1262,6 +1262,7 @@ def install(name=None, to_install = [] to_downgrade = [] to_reinstall = [] + _available = {} # The above three lists will be populated with tuples containing the # package name and the string being used for this particular package # modification. The reason for this method is that the string we use for @@ -1281,7 +1282,8 @@ def install(name=None, if pkg_type == 'repository': has_wildcards = [x for x, y in six.iteritems(pkg_params) if y is not None and '*' in y] - _available = list_repo_pkgs(*has_wildcards, byrepo=False, **kwargs) + if has_wildcards: + _available = list_repo_pkgs(*has_wildcards, byrepo=False, **kwargs) pkg_params_items = six.iteritems(pkg_params) elif pkg_type == 'advisory': pkg_params_items = [] From 7f9a7e2857cc60e0828167a966efc8d73edd019a Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Thu, 21 Sep 2017 20:31:18 -0400 Subject: [PATCH 123/348] INFRA-5292 - add support to fluent logger for Graylog and GELF output formats --- salt/log/handlers/fluent_mod.py | 196 ++++++++++++++++++++++++++------ 1 file changed, 159 insertions(+), 37 deletions(-) diff --git a/salt/log/handlers/fluent_mod.py b/salt/log/handlers/fluent_mod.py index d049920d474..a923dd36d7e 100644 --- a/salt/log/handlers/fluent_mod.py +++ b/salt/log/handlers/fluent_mod.py @@ -11,7 +11,18 @@ Fluent Logging Handler ------------------- - In the salt configuration file: + In the `fluent` configuration file: + + .. code-block:: text + + + type forward + bind localhost + port 24224 + + + Then, to send logs via fluent in Logstash format, add the + following to the salt (master and/or minion) configuration file: .. code-block:: yaml @@ -19,14 +30,32 @@ host: localhost port: 24224 - In the `fluent`_ configuration file: + To send logs via fluent in the Graylog raw json format, add the + following to the salt (master and/or minion) configuration file: - .. code-block:: text + .. code-block:: yaml - - type forward - port 24224 - + fluent_handler: + host: localhost + port: 24224 + payload_type: graylog + tags: + - salt_master.SALT + + The above also illustrates the `tags` option, which allows + one to set descriptive (or useful) tags on records being + sent. If not provided, this defaults to the single tag: + 'salt'. Also note that, via Graylog "magic", the 'facility' + of the logged message is set to 'SALT' (the portion of the + tag after the first period), while the tag itself will be + set to simply 'salt_master'. This is a feature, not a bug :) + + Note: + There is a third emitter, for the GELF format, but it is + largely untested, and I don't currently have a setup supporting + this config, so while it runs cleanly and outputs what LOOKS to + be valid GELF, any real-world feedback on its usefulness, and + correctness, will be appreciated. Log Level ......... @@ -53,7 +82,7 @@ import time import datetime import socket import threading - +import types # Import salt libs from salt.log.setup import LOG_LEVELS @@ -91,6 +120,18 @@ __virtualname__ = 'fluent' _global_sender = None +# Python logger's idea of "level" is wildly at variance with +# Graylog's (and, incidentally, the rest of the civilized world). +syslog_levels = { + 'EMERG': 0, + 'ALERT': 2, + 'CRIT': 2, + 'ERR': 3, + 'WARNING': 4, + 'NOTICE': 5, + 'INFO': 6, + 'DEBUG': 7 +} def setup(tag, **kwargs): host = kwargs.get('host', 'localhost') @@ -116,55 +157,133 @@ def __virtual__(): def setup_handlers(): - host = port = address = None + host = port = None if 'fluent_handler' in __opts__: host = __opts__['fluent_handler'].get('host', None) port = __opts__['fluent_handler'].get('port', None) - version = __opts__['fluent_handler'].get('version', 1) + payload_type = __opts__['fluent_handler'].get('payload_type', None) + # in general, you want the value of tag to ALSO be a member of tags + tags = __opts__['fluent_handler'].get('tags', ['salt']) + tag = tags[0] if len(tags) else 'salt' + if payload_type == 'graylog': + version = 0 + elif payload_type == 'gelf': + # We only support version 1.1 (the latest) of GELF... + version = 1.1 + else: + # Default to logstash for backwards compat + payload_type = 'logstash' + version = __opts__['fluent_handler'].get('version', 1) if host is None and port is None: log.debug( 'The required \'fluent_handler\' configuration keys, ' '\'host\' and/or \'port\', are not properly configured. Not ' - 'configuring the fluent logging handler.' + 'enabling the fluent logging handler.' ) else: - logstash_formatter = LogstashFormatter(version=version) - fluent_handler = FluentHandler('salt', host=host, port=port) - fluent_handler.setFormatter(logstash_formatter) + formatter = MessageFormatter(payload_type=payload_type, version=version, tags=tags) + fluent_handler = FluentHandler(tag, host=host, port=port) + fluent_handler.setFormatter(formatter) fluent_handler.setLevel( - LOG_LEVELS[ - __opts__['fluent_handler'].get( - 'log_level', - # Not set? Get the main salt log_level setting on the - # configuration file - __opts__.get( - 'log_level', - # Also not set?! Default to 'error' - 'error' - ) - ) - ] + LOG_LEVELS[__opts__['fluent_handler'].get('log_level', __opts__.get('log_level', 'error'))] ) yield fluent_handler - if host is None and port is None and address is None: + if host is None and port is None: yield False -class LogstashFormatter(logging.Formatter, NewStyleClassMixIn): - def __init__(self, msg_type='logstash', msg_path='logstash', version=1): - self.msg_path = msg_path - self.msg_type = msg_type +class MessageFormatter(logging.Formatter, NewStyleClassMixIn): + def __init__(self, payload_type, version, tags, msg_type=None, msg_path=None): + self.payload_type = payload_type self.version = version - self.format = getattr(self, 'format_v{0}'.format(version)) - super(LogstashFormatter, self).__init__(fmt=None, datefmt=None) + self.tag = tags[0] if len(tags) else 'salt' # 'salt' for backwards compat + self.tags = tags + self.msg_path = msg_path if msg_path else payload_type + self.msg_type = msg_type if msg_type else payload_type + format_func = 'format_{0}_v{1}'.format(payload_type, version).replace('.', '_') + self.format = getattr(self, format_func) + super(MessageFormatter, self).__init__(fmt=None, datefmt=None) def formatTime(self, record, datefmt=None): + if self.payload_type == 'gelf': # GELF uses epoch times + return record.created return datetime.datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z' - def format_v0(self, record): + def format_graylog_v0(self, record): + ''' + Graylog 'raw' format is essentially the raw record, minimally munged to provide + the bare minimum that td-agent requires to accept and route the event. This is + well suited to a config where the client td-agents log directly to Graylog. + ''' + message_dict = { + 'message': record.getMessage(), + 'timestamp': self.formatTime(record), + # Graylog uses syslog levels, not whatever it is Python does... + 'level': syslog_levels.get(record.levelname, 'ALERT'), + 'tag': self.tag + } + + if record.exc_info: + exc_info = self.formatException(record.exc_info) + message_dict.update({'full_message': exc_info}) + + # Add any extra attributes to the message field + for key, value in six.iteritems(record.__dict__): + if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess', + 'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs', + 'msecs', 'message', 'msg', 'relativeCreated', 'version'): + # These are already handled above or explicitly pruned. + continue + + if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): + val = value + else: + val = repr(value) + message_dict.update({'{0}'.format(key): val}) + return message_dict + + def format_gelf_v1_1(self, record): + ''' + If your agent is (or can be) configured to forward pre-formed GELF to Graylog + with ZERO fluent processing, this function is for YOU, pal... + ''' + message_dict = { + 'version': self.version, + 'host': salt.utils.network.get_fqhostname(), + 'short_message': record.getMessage(), + 'timestamp': self.formatTime(record), + 'level': syslog_levels.get(record.levelname, 'ALERT'), + "_tag": self.tag + } + + if record.exc_info: + exc_info = self.formatException(record.exc_info) + message_dict.update({'full_message': exc_info}) + + # Add any extra attributes to the message field + for key, value in six.iteritems(record.__dict__): + if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess', + 'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs', + 'msecs', 'message', 'msg', 'relativeCreated', 'version'): + # These are already handled above or explicitly avoided. + continue + + if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): + val = value + else: + val = repr(value) + # GELF spec require "non-standard" fields to be prefixed with '_' (underscore). + message_dict.update({'_{0}'.format(key): val}) + + return message_dict + + def format_logstash_v0(self, record): + ''' + Messages are formatted in logstash's expected format. + ''' host = salt.utils.network.get_fqhostname() message_dict = { '@timestamp': self.formatTime(record), @@ -186,7 +305,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn): ), '@source_host': host, '@source_path': self.msg_path, - '@tags': ['salt'], + '@tags': self.tags, '@type': self.msg_type, } @@ -216,7 +335,10 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn): message_dict['@fields'][key] = repr(value) return message_dict - def format_v1(self, record): + def format_logstash_v1(self, record): + ''' + Messages are formatted in logstash's expected format. + ''' message_dict = { '@version': 1, '@timestamp': self.formatTime(record), @@ -230,7 +352,7 @@ class LogstashFormatter(logging.Formatter, NewStyleClassMixIn): 'funcName': record.funcName, 'processName': record.processName, 'message': record.getMessage(), - 'tags': ['salt'], + 'tags': self.tags, 'type': self.msg_type } From 1c979d58096175aefba7a099d17f963d637fe085 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Fri, 22 Sep 2017 10:30:28 +0300 Subject: [PATCH 124/348] Update redis cache `contains` logic to use more efficient `sismember`. --- salt/cache/redis_cache.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py index 0f52dfd6ad2..35bce551984 100644 --- a/salt/cache/redis_cache.py +++ b/salt/cache/redis_cache.py @@ -441,12 +441,9 @@ def contains(bank, key): redis_server = _get_redis_server() bank_redis_key = _get_bank_redis_key(bank) try: - banks = redis_server.smembers(bank_redis_key) + return redis_server.sismember(bank_redis_key, key) except (RedisConnectionError, RedisResponseError) as rerr: mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key, rerr=rerr) log.error(mesg) raise SaltCacheError(mesg) - if not banks: - return False - return key in banks From 9d450f77379d8f21336de414f433ff3ed5830df6 Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Fri, 22 Sep 2017 10:24:11 +0200 Subject: [PATCH 125/348] Fix python3 compatibility --- salt/states/mdadm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/mdadm.py b/salt/states/mdadm.py index 64cae4a6d6f..8981a15dbee 100644 --- a/salt/states/mdadm.py +++ b/salt/states/mdadm.py @@ -112,7 +112,7 @@ def present(name, ret['result'] = False return ret elif len(uuid_dict) == 1: - uuid = uuid_dict.keys()[0] + uuid = list(uuid_dict.keys())[0] if present and present['uuid'] != uuid: ret['comment'] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.'.format(uuid, present['uuid']) ret['result'] = False From f6a8a969a47036ff9f46ff769d1acaf8c7b8b43b Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Fri, 22 Sep 2017 05:19:51 -0400 Subject: [PATCH 126/348] INFRA-5292 - we must please pylint ... --- salt/log/handlers/fluent_mod.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/log/handlers/fluent_mod.py b/salt/log/handlers/fluent_mod.py index a923dd36d7e..ccd56b5521c 100644 --- a/salt/log/handlers/fluent_mod.py +++ b/salt/log/handlers/fluent_mod.py @@ -133,6 +133,7 @@ syslog_levels = { 'DEBUG': 7 } + def setup(tag, **kwargs): host = kwargs.get('host', 'localhost') port = kwargs.get('port', 24224) @@ -238,7 +239,7 @@ class MessageFormatter(logging.Formatter, NewStyleClassMixIn): # These are already handled above or explicitly pruned. continue - if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): + if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699 val = value else: val = repr(value) @@ -271,7 +272,7 @@ class MessageFormatter(logging.Formatter, NewStyleClassMixIn): # These are already handled above or explicitly avoided. continue - if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): + if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699 val = value else: val = repr(value) From cbae45bec43bbe1e1fb9efe2c973402087694df5 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 22 Sep 2017 10:33:10 -0400 Subject: [PATCH 127/348] Lint: Remove extra line at end of file --- tests/unit/utils/test_parsers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/utils/test_parsers.py b/tests/unit/utils/test_parsers.py index 71b8cf62c9a..ba4cc402d80 100644 --- a/tests/unit/utils/test_parsers.py +++ b/tests/unit/utils/test_parsers.py @@ -1002,4 +1002,3 @@ class DaemonMixInTestCase(TestCase): # Hide the class from unittest framework when it searches for TestCase classes in the module del LogSettingsParserTests - From da156583048f0e0cab85afa1a9b195910fcf67a2 Mon Sep 17 00:00:00 2001 From: "Z. Liu" Date: Fri, 22 Sep 2017 23:25:21 +0800 Subject: [PATCH 128/348] remove modify yaml constructor which will modify the default behavior of yaml load. Foe example, for following example (t.sls), it will cause the difference between the content of file testa and testb, but it should be identical! $ cat t {%- load_yaml as vars %} toaddr: - test@test.com {%- endload -%} {{ vars.toaddr }} $ cat t.sls /tmp/testa: file.managed: - source: salt://t - user: root - group: root - mode: "0755" - template: jinja sys-power/acpid: pkg.installed: - refresh: False /tmp/testb: file.managed: - source: salt://t - user: root - group: root - mode: "0755" - template: jinja $ touch /tmp/test{a,b} $ salt-call state.sls t local: ---------- ID: /tmp/testa Function: file.managed Result: None Comment: The file /tmp/testa is set to be changed Changes: ---------- diff: --- +++ @@ -0,0 +1 @@ +['test@test.com'] ---------- ID: /tmp/testb Function: file.managed Result: None Comment: The file /tmp/testb is set to be changed Changes: ---------- diff: --- +++ @@ -0,0 +1 @@ +[u'test@test.com'] --- salt/modules/heat.py | 2 -- salt/states/heat.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/salt/modules/heat.py b/salt/modules/heat.py index 1f94f2e605e..e2b3f97ded2 100644 --- a/salt/modules/heat.py +++ b/salt/modules/heat.py @@ -102,8 +102,6 @@ def _construct_yaml_str(self, node): Construct for yaml ''' return self.construct_scalar(node) -YamlLoader.add_constructor(u'tag:yaml.org,2002:str', - _construct_yaml_str) YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp', _construct_yaml_str) diff --git a/salt/states/heat.py b/salt/states/heat.py index c5f40f16878..a0427512256 100644 --- a/salt/states/heat.py +++ b/salt/states/heat.py @@ -80,8 +80,6 @@ def _construct_yaml_str(self, node): Construct for yaml ''' return self.construct_scalar(node) -YamlLoader.add_constructor(u'tag:yaml.org,2002:str', - _construct_yaml_str) YamlLoader.add_constructor(u'tag:yaml.org,2002:timestamp', _construct_yaml_str) From 9e32ce72cc75edf39b40c0e2c70ec9256d2e003b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:12:37 -0400 Subject: [PATCH 129/348] Added salt.utils.vmware.get_dvss that retrieves DVSs in a datacenter --- salt/utils/vmware.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index b239b269b09..91f86b4b827 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -981,6 +981,45 @@ def get_network_adapter_type(adapter_type): return vim.vm.device.VirtualE1000e() +def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): + ''' + Returns distributed virtual switches (DVSs) in a datacenter. + + dc_ref + The parent datacenter reference. + + dvs_names + The names of the DVSs to return. Default is None. + + get_all_dvss + Return all DVSs in the datacenter. Default is False. + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Retrieving DVSs in datacenter \'{0}\', dvs_names=\'{1}\', ' + 'get_all_dvss={2}'.format(dc_name, + ','.join(dvs_names) if dvs_names + else None, + get_all_dvss)) + properties = ['name'] + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=True, + type=vim.Datacenter, + selectSet=[vmodl.query.PropertyCollector.TraversalSpec( + path='childEntity', + skip=False, + type=vim.Folder)]) + service_instance = get_service_instance_from_managed_object(dc_ref) + items = [i['object'] for i in + get_mors_with_properties(service_instance, + vim.DistributedVirtualSwitch, + container_ref=dc_ref, + property_list=properties, + traversal_spec=traversal_spec) + if get_all_dvss or (dvs_names and i['name'] in dvs_names)] + return items + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From 173a697be2c0296424609448907b1ae405ee4999 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:14:55 -0400 Subject: [PATCH 130/348] Added comments and imports for dvs functions in salt.utils.vmware --- tests/unit/utils/vmware/test_dvs.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 tests/unit/utils/vmware/test_dvs.py diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py new file mode 100644 index 00000000000..27c7886eb58 --- /dev/null +++ b/tests/unit/utils/vmware/test_dvs.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for dvs related functions in salt.utils.vmware +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ + PropertyMock +from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ + ArgumentValueError, VMwareRuntimeError + +#i Import Salt libraries +import salt.utils.vmware as vmware +# Import Third Party Libs +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +class FakeTaskClass(object): + pass From 3584a9169269ed5f672b9e62ad8455a8bfddc2a3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:15:46 -0400 Subject: [PATCH 131/348] Added tests for salt.utils.vmware.get_dvss --- tests/unit/utils/vmware/test_dvs.py | 75 +++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 27c7886eb58..31f87d5b139 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -31,3 +31,78 @@ log = logging.getLogger(__name__) class FakeTaskClass(object): pass + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDvssTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock() + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'name': 'fake_dvs1'}, + {'object': MagicMock(), + 'name': 'fake_dvs2'}, + {'object': MagicMock(), + 'name': 'fake_dvs3'}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock()), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec', + 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_dvss(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvss(self.mock_dc_ref) + mock_traversal_spec.assert_called( + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec']), + call(path='childEntity', skip=False, type=vim.Folder)) + + def test_get_mors_with_properties(self): + vmware.get_dvss(self.mock_dc_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualSwitch, + container_ref=self.mock_dc_ref, property_list=['name'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref) + self.assertEqual(ret, []) + + def test_get_all_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref, get_all_dvss=True) + self.assertEqual(ret, [i['object'] for i in self.mock_items]) + + def test_filtered_all_dvss(self): + ret = vmware.get_dvss(self.mock_dc_ref, + dvs_names=['fake_dvs1', 'fake_dvs3', 'no_dvs']) + self.assertEqual(ret, [self.mock_items[0]['object'], + self.mock_items[2]['object']]) From c0040aaa1a457b0d8a09b1ad8a25c46fea4e37bc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:18:17 -0400 Subject: [PATCH 132/348] Added salt.utils.vmware.get_network_folder that retrieves the network folder --- salt/utils/vmware.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 91f86b4b827..fb621a3dcd4 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1020,6 +1020,30 @@ def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False): return items +def get_network_folder(dc_ref): + ''' + Retrieves the network folder of a datacenter + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Retrieving network folder in datacenter ' + '\'{0}\''.format(dc_name)) + service_instance = get_service_instance_from_managed_object(dc_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=False, + type=vim.Datacenter) + entries = get_mors_with_properties(service_instance, + vim.Folder, + container_ref=dc_ref, + property_list=['name'], + traversal_spec=traversal_spec) + if not entries: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Network folder in datacenter \'{0}\' wasn\'t retrieved' + ''.format(dc_name)) + return entries[0]['object'] + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From 4f09bf5e880bfd55b3b24fdffa690723fa2554b7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:20:17 -0400 Subject: [PATCH 133/348] Added tests for salt.utils.vmware.get_network_folder --- tests/unit/utils/vmware/test_dvs.py | 67 +++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 31f87d5b139..548a2e8909a 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -106,3 +106,70 @@ class GetDvssTestCase(TestCase): dvs_names=['fake_dvs1', 'fake_dvs3', 'no_dvs']) self.assertEqual(ret, [self.mock_items[0]['object'], self.mock_items[2]['object']]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetNetworkFolderTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock() + self.mock_traversal_spec = MagicMock() + self.mock_entries = [{'object': MagicMock(), + 'name': 'fake_netw_folder'}] + self.mock_get_mors = MagicMock(return_value=self.mock_entries) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dc')), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec)), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec', + 'mock_entries', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_network_folder(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_network_folder(self.mock_dc_ref) + mock_traversal_spec.assert_called_once_with( + path='networkFolder', skip=False, type=vim.Datacenter) + + def test_get_mors_with_properties(self): + vmware.get_network_folder(self.mock_dc_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.Folder, container_ref=self.mock_dc_ref, + property_list=['name'], traversal_spec=self.mock_traversal_spec) + + def test_get_no_network_folder(self): + with patch('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + vmware.get_network_folder(self.mock_dc_ref) + self.assertEqual(excinfo.exception.strerror, + 'Network folder in datacenter \'fake_dc\' wasn\'t ' + 'retrieved') + + def test_get_network_folder(self): + ret = vmware.get_network_folder(self.mock_dc_ref) + self.assertEqual(ret, self.mock_entries[0]['object']) From 793acab99fb6416922589c6ca3e4b9b2744b13eb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:21:33 -0400 Subject: [PATCH 134/348] Added for salt.utils.vmware.create_dvs --- salt/utils/vmware.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index fb621a3dcd4..6055cf5ce27 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1044,6 +1044,46 @@ def get_network_folder(dc_ref): return entries[0]['object'] +def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): + ''' + Creates a distributed virtual switches (DVS) in a datacenter. + Returns the reference to the newly created distributed virtual switch. + + dc_ref + The parent datacenter reference. + + dvs_name + The name of the DVS to create. + + dvs_create_spec + The DVS spec (vim.DVSCreateSpec) to use when creating the DVS. + Default is None. + ''' + dc_name = get_managed_object_name(dc_ref) + log.trace('Creating DVS \'{0}\' in datacenter ' + '\'{1}\''.format(dvs_name, dc_name)) + if not dvs_create_spec: + dvs_create_spec = vim.DVSCreateSpec() + if not dvs_create_spec.configSpec: + dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() + dvs_create_spec.configSpec.name = dvs_name + netw_folder_ref = get_network_folder(dc_ref) + try: + task = netw_folder_ref.CreateDVS_Task(dvs_create_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From d31d98c2d39723186dac777485b5a833b00e0ea7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:23:00 -0400 Subject: [PATCH 135/348] Added tests for salt.utils.vmware.create_dvs --- tests/unit/utils/vmware/test_dvs.py | 102 ++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 548a2e8909a..da49c91f8c5 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -173,3 +173,105 @@ class GetNetworkFolderTestCase(TestCase): def test_get_network_folder(self): ret = vmware.get_network_folder(self.mock_dc_ref) self.assertEqual(ret, self.mock_entries[0]['object']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateDvsTestCase(TestCase): + def setUp(self): + self.mock_dc_ref = MagicMock() + self.mock_dvs_create_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_netw_folder = \ + MagicMock(CreateDVS_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dc')), + ('salt.utils.vmware.get_network_folder', + MagicMock(return_value=self.mock_netw_folder)), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dc_ref', 'mock_dvs_create_spec', + 'mock_task', 'mock_netw_folder', 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_no_dvs_create_spec(self): + mock_spec = MagicMock(configSpec=None) + mock_config_spec = MagicMock() + mock_dvs_create_spec = MagicMock(return_value=mock_spec) + mock_vmware_dvs_config_spec = \ + MagicMock(return_value=mock_config_spec) + with patch('salt.utils.vmware.vim.DVSCreateSpec', + mock_dvs_create_spec): + with patch('salt.utils.vmware.vim.VMwareDVSConfigSpec', + mock_vmware_dvs_config_spec): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_dvs_create_spec.assert_called_once_with() + mock_vmware_dvs_config_spec.assert_called_once_with() + self.assertEqual(mock_spec.configSpec, mock_config_spec) + self.assertEqual(mock_config_spec.name, 'fake_dvs') + self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(mock_spec) + + def test_get_network_folder(self): + mock_get_network_folder = MagicMock() + with patch('salt.utils.vmware.get_network_folder', + mock_get_network_folder): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs') + mock_get_network_folder.assert_called_once_with(self.mock_dc_ref) + + def test_create_dvs_task_passed_in_spec(self): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.mock_netw_folder.CreateDVS_Task.assert_called_once_with( + self.mock_dvs_create_spec) + + def test_create_dvs_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_dvs_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_dvs_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.create_dvs(self.mock_dc_ref, 'fake_dvs', + dvs_create_spec=self.mock_dvs_create_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') From ce6e8c8522d8205ac5bd914ff32fc98f2244d826 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:24:09 -0400 Subject: [PATCH 136/348] Added salt.utils.vmware.update_dvs --- salt/utils/vmware.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 6055cf5ce27..96da8309e39 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1084,6 +1084,35 @@ def create_dvs(dc_ref, dvs_name, dvs_create_spec=None): wait_for_task(task, dvs_name, str(task.__class__)) +def update_dvs(dvs_ref, dvs_config_spec): + ''' + Updates a distributed virtual switch with the config_spec. + + dvs_ref + The DVS reference. + + dvs_config_spec + The updated config spec (vim.VMwareDVSConfigSpec) to be applied to + the DVS. + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Updating dvs \'{0}\''.format(dvs_name)) + try: + task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From f21187446242ec394f4b6ef4c5fcbedb719486e5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:26:12 -0400 Subject: [PATCH 137/348] Added tests for salt.utils.vmware.update_dvs --- tests/unit/utils/vmware/test_dvs.py | 69 +++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index da49c91f8c5..e772007bb82 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -275,3 +275,72 @@ class CreateDvsTestCase(TestCase): self.mock_wait_for_task.assert_called_once_with( self.mock_task, 'fake_dvs', '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateDvsTestCase(TestCase): + def setUp(self): + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_dvs_ref = MagicMock( + ReconfigureDvs_Task=MagicMock(return_value=self.mock_task)) + self.mock_dvs_spec = MagicMock() + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dvs_ref', 'mock_task', 'mock_dvs_spec', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_reconfigure_dvs_task(self): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.mock_dvs_ref.ReconfigureDvs_Task.assert_called_once_with( + self.mock_dvs_spec) + + def test_reconfigure_dvs_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_reconfigure_dvs_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_reconfigure_dvs_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') From 77a815dbed46522d0d13839c53ef14d616d375b5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:27:36 -0400 Subject: [PATCH 138/348] Added salt.utils.vmware.set_dvs_network_resource_management_enabled --- salt/utils/vmware.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 96da8309e39..ac5bcbb6d3b 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1113,6 +1113,34 @@ def update_dvs(dvs_ref, dvs_config_spec): wait_for_task(task, dvs_name, str(task.__class__)) +def set_dvs_network_resource_management_enabled(dvs_ref, enabled): + ''' + Sets whether NIOC is enabled on a DVS. + + dvs_ref + The DVS reference. + + enabled + Flag specifying whether NIOC is enabled. + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Setting network resource management enable to {0} on ' + 'dvs \'{1}\''.format(enabled, dvs_name)) + try: + dvs_ref.EnableNetworkResourceManagement(enable=enabled) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From aa247b43b8936badc8b82eefead8c268f6a5e189 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:29:19 -0400 Subject: [PATCH 139/348] Added tests for salt.utils.vmware.set_dvs_network_resource_management_enabled --- tests/unit/utils/vmware/test_dvs.py | 66 +++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index e772007bb82..df1f3afd984 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -344,3 +344,69 @@ class UpdateDvsTestCase(TestCase): self.mock_wait_for_task.assert_called_once_with( self.mock_task, 'fake_dvs', '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class SetDvsNetworkResourceManagementEnabledTestCase(TestCase): + def setUp(self): + self.mock_enabled = MagicMock() + self.mock_dvs_ref = MagicMock( + EnableNetworkResourceManagement=MagicMock()) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_dvs_ref', 'mock_enabled'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_enable_network_resource_management(self): + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.mock_dvs_ref.EnableNetworkResourceManagement.assert_called_once_with( + enable=self.mock_enabled) + + def test_enable_network_resource_management_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_enable_network_resource_management_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + + def test_enable_network_resource_management_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.EnableNetworkResourceManagement = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.set_dvs_network_resource_management_enabled( + self.mock_dvs_ref, self.mock_enabled) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From 16b71d8ab1975f1aeaa11afe2a6576e46671a977 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:30:59 -0400 Subject: [PATCH 140/348] Added salt.utils.vmware.get_dvportgroups to retrieve distributed virtual portgroups --- salt/utils/vmware.py | 54 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index ac5bcbb6d3b..ee671eeb1ae 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1141,6 +1141,60 @@ def set_dvs_network_resource_management_enabled(dvs_ref, enabled): raise salt.exceptions.VMwareRuntimeError(exc.msg) +def get_dvportgroups(parent_ref, portgroup_names=None, + get_all_portgroups=False): + ''' + Returns distributed virtual porgroups (dvportgroups). + The parent object can be either a datacenter or a dvs. + + parent_ref + The parent object reference. Can be either a datacenter or a dvs. + + portgroup_names + The names of the dvss to return. Default is None. + + get_all_portgroups + Return all portgroups in the parent. Default is False. + ''' + if not (isinstance(parent_ref, vim.Datacenter) or + isinstance(parent_ref, vim.DistributedVirtualSwitch)): + raise salt.exceptions.ArgumentValueError( + 'Parent has to be either a datacenter, ' + 'or a distributed virtual switch') + parent_name = get_managed_object_name(parent_ref) + log.trace('Retrieving portgroup in {0} \'{1}\', portgroups_names=\'{2}\', ' + 'get_all_portgroups={3}'.format( + type(parent_ref).__name__, parent_name, + ','.join(portgroup_names) if portgroup_names else None, + get_all_portgroups)) + properties = ['name'] + if isinstance(parent_ref, vim.Datacenter): + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='networkFolder', + skip=True, + type=vim.Datacenter, + selectSet=[vmodl.query.PropertyCollector.TraversalSpec( + path='childEntity', + skip=False, + type=vim.Folder)]) + else: # parent is distributed virtual switch + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='portgroup', + skip=False, + type=vim.DistributedVirtualSwitch) + + service_instance = get_service_instance_from_managed_object(parent_ref) + items = [i['object'] for i in + get_mors_with_properties(service_instance, + vim.DistributedVirtualPortgroup, + container_ref=parent_ref, + property_list=properties, + traversal_spec=traversal_spec) + if get_all_portgroups or + (portgroup_names and i['name'] in portgroup_names)] + return items + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From 82f6ae368880a0453955bdd82bd9d02d458f3505 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:32:30 -0400 Subject: [PATCH 141/348] Added tests for salt.utils.vmware.get_dvportgroups --- tests/unit/utils/vmware/test_dvs.py | 94 +++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index df1f3afd984..da3a3883de9 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -410,3 +410,97 @@ class SetDvsNetworkResourceManagementEnabledTestCase(TestCase): vmware.set_dvs_network_resource_management_enabled( self.mock_dvs_ref, self.mock_enabled) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDvportgroupsTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dc_ref = MagicMock(spec=vim.Datacenter) + self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch) + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'name': 'fake_pg1'}, + {'object': MagicMock(), + 'name': 'fake_pg2'}, + {'object': MagicMock(), + 'name': 'fake_pg3'}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock()), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dc_ref', 'mock_dvs_ref', + 'mock_traversal_spec', 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_unsupported_parrent(self): + with self.assertRaises(ArgumentValueError) as excinfo: + vmware.get_dvportgroups(MagicMock()) + self.assertEqual(excinfo.exception.strerror, + 'Parent has to be either a datacenter, or a ' + 'distributed virtual switch') + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_dvportgroups(self.mock_dc_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref) + + def test_traversal_spec_datacenter_parent(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvportgroups(self.mock_dc_ref) + mock_traversal_spec.assert_called( + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec']), + call(path='childEntity', skip=False, type=vim.Folder)) + + def test_traversal_spec_dvs_parent(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_dvportgroups(self.mock_dvs_ref) + mock_traversal_spec.assert_called_once_with( + path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) + + def test_get_mors_with_properties(self): + vmware.get_dvportgroups(self.mock_dvs_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualPortgroup, + container_ref=self.mock_dvs_ref, property_list=['name'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_pgs(self): + ret = vmware.get_dvportgroups(self.mock_dvs_ref) + self.assertEqual(ret, []) + + def test_get_all_pgs(self): + ret = vmware.get_dvportgroups(self.mock_dvs_ref, + get_all_portgroups=True) + self.assertEqual(ret, [i['object'] for i in self.mock_items]) + + def test_filtered_pgs(self): + ret = vmware.get_dvss(self.mock_dc_ref, + dvs_names=['fake_pg1', 'fake_pg3', 'no_pg']) + self.assertEqual(ret, [self.mock_items[0]['object'], + self.mock_items[2]['object']]) From 35fa6df4ec116508ff905a7c2f6fc455be339bfb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:33:59 -0400 Subject: [PATCH 142/348] Added salt.utils.vmware.get_uplink_dvportgroup to retrieve the uplink distributed virtual portgroup --- salt/utils/vmware.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index ee671eeb1ae..0c0c42767ef 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1195,6 +1195,35 @@ def get_dvportgroups(parent_ref, portgroup_names=None, return items +def get_uplink_dvportgroup(dvs_ref): + ''' + Returns the uplink distributed virtual portgroup of a distributed virtual + switch (dvs) + + dvs_ref + The dvs reference + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Retrieving uplink portgroup of dvs \'{0}\''.format(dvs_name)) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='portgroup', + skip=False, + type=vim.DistributedVirtualSwitch) + service_instance = get_service_instance_from_managed_object(dvs_ref) + items = [entry['object'] for entry in + get_mors_with_properties(service_instance, + vim.DistributedVirtualPortgroup, + container_ref=dvs_ref, + property_list=['tag'], + traversal_spec=traversal_spec) + if entry['tag'] and + [t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']] + if not items: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name)) + return items[0] + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From b8bc8fd581f3b3cd7503f113e7c35c7f2f4dda87 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:35:00 -0400 Subject: [PATCH 143/348] Added tests for salt.utils.vmware.get_uplink_dvportgroup --- tests/unit/utils/vmware/test_dvs.py | 69 +++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index da3a3883de9..2388afe94bd 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -504,3 +504,72 @@ class GetDvportgroupsTestCase(TestCase): dvs_names=['fake_pg1', 'fake_pg3', 'no_pg']) self.assertEqual(ret, [self.mock_items[0]['object'], self.mock_items[2]['object']]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetUplinkDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_si = MagicMock() + self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch) + self.mock_traversal_spec = MagicMock() + self.mock_items = [{'object': MagicMock(), + 'tag': [MagicMock(key='fake_tag')]}, + {'object': MagicMock(), + 'tag': [MagicMock(key='SYSTEM/DVS.UPLINKPG')]}] + self.mock_get_mors = MagicMock(return_value=self.mock_items) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_dvs_ref', 'mock_traversal_spec', + 'mock_items', 'mock_get_mors'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value='traversal_spec') + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + mock_traversal_spec.assert_called_once_with( + path='portgroup', skip=False, type=vim.DistributedVirtualSwitch) + + def test_get_mors_with_properties(self): + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, vim.DistributedVirtualPortgroup, + container_ref=self.mock_dvs_ref, property_list=['tag'], + traversal_spec=self.mock_traversal_spec) + + def test_get_no_uplink_pg(self): + with patch('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.assertEqual(excinfo.exception.strerror, + 'Uplink portgroup of DVS \'fake_dvs\' wasn\'t found') + + def test_get_uplink_pg(self): + ret = vmware.get_uplink_dvportgroup(self.mock_dvs_ref) + self.assertEqual(ret, self.mock_items[1]['object']) From 13b4e0e426d6bdf414b9a5515fb7983863a1d0c5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:36:34 -0400 Subject: [PATCH 144/348] Added salt.utils.vmware.create_dvportgroup to create a distributed virtual portgroup --- salt/utils/vmware.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 0c0c42767ef..7b92e86d8e1 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1224,6 +1224,37 @@ def get_uplink_dvportgroup(dvs_ref): return items[0] +def create_dvportgroup(dvs_ref, spec): + ''' + Creates a distributed virtual portgroup on a distributed virtual switch + (dvs) + + dvs_ref + The dvs reference + + spec + Portgroup spec (vim.DVPortgroupConfigSpec) + ''' + dvs_name = get_managed_object_name(dvs_ref) + log.trace('Adding portgroup {0} to dvs ' + '\'{1}\''.format(spec.name, dvs_name)) + log.trace('spec = {}'.format(spec)) + try: + task = dvs_ref.CreateDVPortgroup_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, dvs_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From 294fad1de0c4fe6d5fe47230cdb87852539dfe5e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:38:08 -0400 Subject: [PATCH 145/348] Added salt.utils.vmware.update_dvportgroup to update a distributed virtual portgroup --- salt/utils/vmware.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 7b92e86d8e1..e006f80322e 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1255,6 +1255,34 @@ def create_dvportgroup(dvs_ref, spec): wait_for_task(task, dvs_name, str(task.__class__)) +def update_dvportgroup(portgroup_ref, spec): + ''' + Updates a distributed virtual portgroup + + portgroup_ref + The portgroup reference + + spec + Portgroup spec (vim.DVPortgroupConfigSpec) + ''' + pg_name = get_managed_object_name(portgroup_ref) + log.trace('Updating portgrouo {0}'.format(pg_name)) + try: + task = portgroup_ref.ReconfigureDVPortgroup_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, pg_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From 8a84f27adffdf414a38832eded7ec67f276d344f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:39:13 -0400 Subject: [PATCH 146/348] Added tests for salt.utils.vmware.create_dvportgroup --- tests/unit/utils/vmware/test_dvs.py | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 2388afe94bd..8cfc1e04a50 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -573,3 +573,73 @@ class GetUplinkDvportgroupTestCase(TestCase): def test_get_uplink_pg(self): ret = vmware.get_uplink_dvportgroup(self.mock_dvs_ref) self.assertEqual(ret, self.mock_items[1]['object']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_pg_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_dvs_ref = \ + MagicMock(CreateDVPortgroup_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_dvs')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_spec', 'mock_dvs_ref', 'mock_task', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref) + + def test_create_dvporgroup_task(self): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.mock_dvs_ref.CreateDVPortgroup_Task.assert_called_once_with( + self.mock_pg_spec) + + def test_create_dvporgroup_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_dvs', + '') From ca3d999be097f21e6660e65846ebcfb930714104 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:40:01 -0400 Subject: [PATCH 147/348] Added salt.utils.vmware.remove_dvportgroup --- salt/utils/vmware.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index e006f80322e..27b728ca691 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1283,6 +1283,31 @@ def update_dvportgroup(portgroup_ref, spec): wait_for_task(task, pg_name, str(task.__class__)) +def remove_dvportgroup(portgroup_ref): + ''' + Removes a distributed virtual portgroup + + portgroup_ref + The portgroup reference + ''' + pg_name = get_managed_object_name(portgroup_ref) + log.trace('Removing portgrouo {0}'.format(pg_name)) + try: + task = portgroup_ref.Destroy_Task() + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, pg_name, str(task.__class__)) + + def list_objects(service_instance, vim_object, properties=None): ''' Returns a simple list of objects from a given service instance. From d7474f8d30cd57f905fbd549982fae8c9379fa0d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 06:17:29 -0400 Subject: [PATCH 148/348] Added tests for salt.utils.vmware.update_dvportgroup --- tests/unit/utils/vmware/test_dvs.py | 73 +++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 8cfc1e04a50..31459d261cd 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -643,3 +643,76 @@ class CreateDvportgroupTestCase(TestCase): self.mock_wait_for_task.assert_called_once_with( self.mock_task, 'fake_dvs', '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_pg_spec = MagicMock() + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_pg_ref = \ + MagicMock(ReconfigureDVPortgroup_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_pg')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_spec', 'mock_pg_ref', 'mock_task', + 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref) + + def test_reconfigure_dvporgroup_task(self): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.mock_pg_ref.ReconfigureDVPortgroup_Task.assert_called_once_with( + self.mock_pg_spec) + + def test_reconfigure_dvporgroup_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_reconfigure_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_reconfigure_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_pg_ref.ReconfigureDVPortgroup_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_pg', + '') From d4d6ad99c22a71b34242dcd6e3872f8ac0ca878c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 06:17:53 -0400 Subject: [PATCH 149/348] Added tests for salt.utils.vmware.remove_dvportgroup --- tests/unit/utils/vmware/test_dvs.py | 67 +++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 31459d261cd..6f88484877c 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -716,3 +716,70 @@ class UpdateDvportgroupTestCase(TestCase): self.mock_wait_for_task.assert_called_once_with( self.mock_task, 'fake_pg', '') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class RemoveDvportgroupTestCase(TestCase): + def setUp(self): + self.mock_task = MagicMock(spec=FakeTaskClass) + self.mock_pg_ref = \ + MagicMock(Destroy_Task=MagicMock( + return_value=self.mock_task)) + self.mock_wait_for_task = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + MagicMock(return_value='fake_pg')), + ('salt.utils.vmware.wait_for_task', self.mock_wait_for_task)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_pg_ref', 'mock_task', 'mock_wait_for_task'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vmware.remove_dvportgroup(self.mock_pg_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref) + + def test_destroy_task(self): + vmware.remove_dvportgroup(self.mock_pg_ref) + self.mock_pg_ref.Destroy_Task.assert_called_once_with() + + def test_destroy_task_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_destroy_treconfigure_dvporgroup_task_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_destroy_treconfigure_dvporgroup_task_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vmware.remove_dvportgroup(self.mock_pg_ref) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_wait_for_tasks(self): + vmware.remove_dvportgroup(self.mock_pg_ref) + self.mock_wait_for_task.assert_called_once_with( + self.mock_task, 'fake_pg', + '') From b65c7be7b4d0e6cddc762bd7f93852a9d93e6a8e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:46:07 -0400 Subject: [PATCH 150/348] Added private functions to convert a vim.VMwareDistributedVirtualSwitch into a dict representation --- salt/modules/vsphere.py | 105 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index d6aabb74e4c..b2bb5666b28 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3622,6 +3622,111 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N return ret +def _get_dvs_config_dict(dvs_name, dvs_config): + ''' + Returns the dict representation of the DVS config + + dvs_name + The name of the DVS + + dvs_config + The DVS config + ''' + log.trace('Building the dict of the DVS \'{0}\' config'.format(dvs_name)) + conf_dict = {'name': dvs_name, + 'contact_email': dvs_config.contact.contact, + 'contact_name': dvs_config.contact.name, + 'description': dvs_config.description, + 'lacp_api_version': dvs_config.lacpApiVersion, + 'network_resource_control_version': + dvs_config.networkResourceControlVersion, + 'network_resource_management_enabled': + dvs_config.networkResourceManagementEnabled, + 'max_mtu': dvs_config.maxMtu} + if isinstance(dvs_config.uplinkPortPolicy, + vim.DVSNameArrayUplinkPortPolicy): + conf_dict.update( + {'uplink_names': dvs_config.uplinkPortPolicy.uplinkPortName}) + return conf_dict + + +def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol): + ''' + Returns the dict representation of the DVS link discovery protocol + + dvs_name + The name of the DVS + + dvs_link_disc_protocl + The DVS link discovery protocol + ''' + log.trace('Building the dict of the DVS \'{0}\' link discovery ' + 'protocol'.format(dvs_name)) + return {'operation': dvs_link_disc_protocol.operation, + 'protocol': dvs_link_disc_protocol.protocol} + + +def _get_dvs_product_info(dvs_name, dvs_product_info): + ''' + Returns the dict representation of the DVS product_info + + dvs_name + The name of the DVS + + dvs_product_info + The DVS product info + ''' + log.trace('Building the dict of the DVS \'{0}\' product ' + 'info'.format(dvs_name)) + return {'name': dvs_product_info.name, + 'vendor': dvs_product_info.vendor, + 'version': dvs_product_info.version} + + +def _get_dvs_capability(dvs_name, dvs_capability): + ''' + Returns the dict representation of the DVS product_info + + dvs_name + The name of the DVS + + dvs_capability + The DVS capability + ''' + log.trace('Building the dict of the DVS \'{0}\' capability' + ''.format(dvs_name)) + return {'operation_supported': dvs_capability.dvsOperationSupported, + 'portgroup_operation_supported': + dvs_capability.dvPortGroupOperationSupported, + 'port_operation_supported': dvs_capability.dvPortOperationSupported} + + +def _get_dvs_infrastructure_traffic_resources(dvs_name, + dvs_infra_traffic_ress): + ''' + Returns a list of dict representations of the DVS infrastructure traffic + resource + + dvs_name + The name of the DVS + + dvs_infra_traffic_ress + The DVS infrastructure traffic resources + ''' + log.trace('Building the dicts of the DVS \'{0}\' infrastructure traffic ' + 'resources'.format(dvs_name)) + res_dicts = [] + for res in dvs_infra_traffic_ress: + res_dict = {'key': res.key, + 'limit': res.allocationInfo.limit, + 'reservation': res.allocationInfo.reservation} + if res.allocationInfo.shares: + res_dict.update({'num_shares': res.allocationInfo.shares.shares, + 'share_level': res.allocationInfo.shares.level}) + res_dicts.append(res_dict) + return res_dicts + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 3657bad621bfeb74dfc161db99c2e050729dd7ff Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:49:53 -0400 Subject: [PATCH 151/348] Added salt.modules.vsphere.list_dvss to list dict representations of a DVS --- salt/modules/vsphere.py | 66 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index b2bb5666b28..8c4571b919e 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3727,6 +3727,72 @@ def _get_dvs_infrastructure_traffic_resources(dvs_name, return res_dicts +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def list_dvss(datacenter=None, dvs_names=None, service_instance=None): + ''' + Returns a list of distributed virtual switches (DVSs). + The list can be filtered by the datacenter or DVS names. + + datacenter + The datacenter to look for DVSs in. + Default value is None. + + dvs_names + List of DVS names to look for. If None, all DVSs are returned. + Default value is None. + + .. code-block:: bash + + salt '*' vsphere.list_dvss + + salt '*' vsphere.list_dvss dvs_names=[dvs1,dvs2] + ''' + ret_dict = [] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + + for dvs in salt.utils.vmware.get_dvss(dc_ref, dvs_names, (not dvs_names)): + dvs_dict = {} + # XXX: Because of how VMware did DVS object inheritance we can\'t + # be more restrictive when retrieving the dvs config, we have to + # retrieve the entire object + props = salt.utils.vmware.get_properties_of_managed_object( + dvs, ['name', 'config', 'capability', 'networkResourcePool']) + dvs_dict = _get_dvs_config_dict(props['name'], props['config']) + # Product info + dvs_dict.update( + {'product_info': + _get_dvs_product_info(props['name'], + props['config'].productInfo)}) + # Link Discovery Protocol + if props['config'].linkDiscoveryProtocolConfig: + dvs_dict.update( + {'link_discovery_protocol': + _get_dvs_link_discovery_protocol( + props['name'], + props['config'].linkDiscoveryProtocolConfig)}) + # Capability + dvs_dict.update({'capability': + _get_dvs_capability(props['name'], + props['capability'])}) + # InfrastructureTrafficResourceConfig - available with vSphere 6.0 + if hasattr(props['config'], 'infrastructureTrafficResourceConfig'): + dvs_dict.update({ + 'infrastructure_traffic_resource_pools': + _get_dvs_infrastructure_traffic_resources( + props['name'], + props['config'].infrastructureTrafficResourceConfig)}) + ret_dict.append(dvs_dict) + return ret_dict + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 9f6981806a4c932d03abe1c464ff1a10ac0cfd72 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:51:41 -0400 Subject: [PATCH 152/348] Added private functions to apply a DVS dict representation to a VMware spec object --- salt/modules/vsphere.py | 129 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 8c4571b919e..03a0afc3219 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3793,6 +3793,135 @@ def list_dvss(datacenter=None, dvs_names=None, service_instance=None): return ret_dict +def _apply_dvs_config(config_spec, config_dict): + ''' + Applies the values of the config dict dictionary to a config spec + (vim.VMwareDVSConfigSpec) + ''' + if config_dict.get('name'): + config_spec.name = config_dict['name'] + if config_dict.get('contact_email') or config_dict.get('contact_name'): + if not config_spec.contact: + config_spec.contact = vim.DVSContactInfo() + config_spec.contact.contact = config_dict.get('contact_email') + config_spec.contact.name = config_dict.get('contact_name') + if config_dict.get('description'): + config_spec.description = config_dict.get('description') + if config_dict.get('max_mtu'): + config_spec.maxMtu = config_dict.get('max_mtu') + if config_dict.get('lacp_api_version'): + config_spec.lacpApiVersion = config_dict.get('lacp_api_version') + if config_dict.get('network_resource_control_version'): + config_spec.networkResourceControlVersion = \ + config_dict.get('network_resource_control_version') + if config_dict.get('uplink_names'): + if not config_spec.uplinkPortPolicy or \ + not isinstance(config_spec.uplinkPortPolicy, + vim.DVSNameArrayUplinkPortPolicy): + + config_spec.uplinkPortPolicy = \ + vim.DVSNameArrayUplinkPortPolicy() + config_spec.uplinkPortPolicy.uplinkPortName = \ + config_dict['uplink_names'] + + +def _apply_dvs_link_discovery_protocol(disc_prot_config, disc_prot_dict): + ''' + Applies the values of the disc_prot_dict dictionary to a link discovery + protocol config object (vim.LinkDiscoveryProtocolConfig) + ''' + disc_prot_config.operation = disc_prot_dict['operation'] + disc_prot_config.protocol = disc_prot_dict['protocol'] + + +def _apply_dvs_product_info(product_info_spec, product_info_dict): + ''' + Applies the values of the product_info_dict dictionary to a product info + spec (vim.DistributedVirtualSwitchProductSpec) + ''' + if product_info_dict.get('name'): + product_info_spec.name = product_info_dict['name'] + if product_info_dict.get('vendor'): + product_info_spec.vendor = product_info_dict['vendor'] + if product_info_dict.get('version'): + product_info_spec.version = product_info_dict['version'] + + +def _apply_dvs_capability(capability_spec, capability_dict): + ''' + Applies the values of the capability_dict dictionary to a DVS capability + object (vim.vim.DVSCapability) + ''' + if 'operation_supported' in capability_dict: + capability_spec.dvsOperationSupported = \ + capability_dict['operation_supported'] + if 'port_operation_supported' in capability_dict: + capability_spec.dvPortOperationSupported = \ + capability_dict['port_operation_supported'] + if 'portgroup_operation_supported' in capability_dict: + capability_spec.dvPortGroupOperationSupported = \ + capability_dict['portgroup_operation_supported'] + + +def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources, + resource_dicts): + ''' + Applies the values of the resource dictionaries to infra traffic resources, + creating the infra traffic resource if required + (vim.DistributedVirtualSwitchProductSpec) + ''' + for res_dict in resource_dicts: + ress = [r for r in infra_traffic_resources if r.key == res_dict['key']] + if ress: + res = ress[0] + else: + res = vim.DvsHostInfrastructureTrafficResource() + res.key = res_dict['key'] + res.allocationInfo = \ + vim.DvsHostInfrastructureTrafficResourceAllocation() + infra_traffic_resources.append(res) + if res_dict.get('limit'): + res.allocationInfo.limit = res_dict['limit'] + if res_dict.get('reservation'): + res.allocationInfo.reservation = res_dict['reservation'] + if res_dict.get('num_shares') or res_dict.get('share_level'): + if not res.allocationInfo.shares: + res.allocationInfo.shares = vim.SharesInfo() + if res_dict.get('share_level'): + res.allocationInfo.shares.level = \ + vim.SharesLevel(res_dict['share_level']) + if res_dict.get('num_shares'): + #XXX Even though we always set the number of shares if provided, + #the vCenter will ignore it unless the share level is 'custom'. + res.allocationInfo.shares.shares=res_dict['num_shares'] + + +def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): + ''' + Applies the values of the resource dictionaries to network resource pools, + creating the resource pools if required + (vim.DVSNetworkResourcePoolConfigSpec) + ''' + for res_dict in resource_dicts: + ress = [r for r in network_resource_pools if r.key == res_dict['key']] + if ress: + res = ress[0] + else: + res = vim.DVSNetworkResourcePoolConfigSpec() + res.key = res_dict['key'] + res.allocationInfo = \ + vim.DVSNetworkResourcePoolAllocationInfo() + network_resource_pools.append(res) + if res_dict.get('limit'): + res.allocationInfo.limit = res_dict['limit'] + if res_dict.get('num_shares') and res_dict.get('share_level'): + if not res.allocationInfo.shares: + res.allocationInfo.shares = vim.SharesInfo() + res.allocationInfo.shares.shares=res_dict['num_shares'] + res.allocationInfo.shares.level = \ + vim.SharesLevel(res_dict['share_level']) + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 34a841a669572d41d55d661c335176b3951416b7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 18:55:52 -0400 Subject: [PATCH 153/348] Added salt.modules.vsphere.create_dvs to create a DVS based on a dict representations --- salt/modules/vsphere.py | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 03a0afc3219..8583f77125c 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3922,6 +3922,78 @@ def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): vim.SharesLevel(res_dict['share_level']) +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def create_dvs(dvs_dict, dvs_name, service_instance=None): + ''' + Creates a distributed virtual switch (DVS). + + Note: The ``dvs_name`` param will override any name set in ``dvs_dict``. + + dvs_dict + Dict representation of the new DVS (exmaple in salt.states.dvs) + + dvs_name + Name of the DVS to be created. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name + ''' + log.trace('Creating dvs \'{0}\' with dict = {1}'.format(dvs_name, + dvs_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + # Make the name of the DVS consistent with the call + dvs_dict['name'] = dvs_name + # Build the config spec from the input + dvs_create_spec = vim.DVSCreateSpec() + dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() + _apply_dvs_config(dvs_create_spec.configSpec, dvs_dict) + if dvs_dict.get('product_info'): + dvs_create_spec.productInfo = vim.DistributedVirtualSwitchProductSpec() + _apply_dvs_product_info(dvs_create_spec.productInfo, + dvs_dict['product_info']) + if dvs_dict.get('capability'): + dvs_create_spec.capability = vim.DVSCapability() + _apply_dvs_capability(dvs_create_spec.capability, + dvs_dict['capability']) + if dvs_dict.get('link_discovery_protocol'): + dvs_create_spec.configSpec.linkDiscoveryProtocolConfig = \ + vim.LinkDiscoveryProtocolConfig() + _apply_dvs_link_discovery_protocol( + dvs_create_spec.configSpec.linkDiscoveryProtocolConfig, + dvs_dict['link_discovery_protocol']) + if dvs_dict.get('infrastructure_traffic_resource_pools'): + dvs_create_spec.configSpec.infrastructureTrafficResourceConfig = [] + _apply_dvs_infrastructure_traffic_resources( + dvs_create_spec.configSpec.infrastructureTrafficResourceConfig, + dvs_dict['infrastructure_traffic_resource_pools']) + log.trace('dvs_create_spec = {}'.format(dvs_create_spec)) + salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec) + if 'network_resource_management_enabled' in dvs_dict: + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, + dvs_names=[dvs_name]) + if not dvs_refs: + raise excs.VMwareObjectRetrievalError( + 'DVS \'{0}\' wasn\'t found in datacenter \'{1}\'' + ''.format(dvs_name, datacenter)) + dvs_ref = dvs_refs[0] + salt.utils.vmware.set_dvs_network_resource_management_enabled( + dvs_ref, dvs_dict['network_resource_management_enabled']) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From c576d3ca959ed900ceeffa9fa694b7af57544d60 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:03:04 -0400 Subject: [PATCH 154/348] Added salt.modules.vsphere.update_dvs to update a DVS based on a dict representations --- salt/modules/vsphere.py | 77 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 8583f77125c..e140195cf1c 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3993,6 +3993,83 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None): dvs_ref, dvs_dict['network_resource_management_enabled']) return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def update_dvs(dvs_dict, dvs, service_instance=None): + ''' + Updates a distributed virtual switch (DVS). + + Note: Updating the product info, capability, uplinks of a DVS is not + supported so the corresponding entries in ``dvs_dict`` will be + ignored. + + dvs_dict + Dictionary with the values the DVS should be update with + (exmaple in salt.states.dvs) + + dvs + Name of the DVS to be updated. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1 + ''' + # Remove ignored properties + log.trace('Updating dvs \'{0}\' with dict = {1}'.format(dvs, dvs_dict)) + for prop in ['product_info', 'capability', 'uplink_names', 'name']: + if prop in dvs_dict: + del dvs_dict[prop] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' wasn\'t found in ' + 'datacenter \'{1}\'' + ''.format(dvs, datacenter)) + dvs_ref = dvs_refs[0] + # Build the config spec from the input + dvs_props = salt.utils.vmware.get_properties_of_managed_object( + dvs_ref, ['config', 'capability']) + dvs_config = vim.VMwareDVSConfigSpec() + # Copy all of the properties in the config of the of the DVS to a + # DvsConfigSpec + skipped_properties = ['host'] + for prop in dvs_config.__dict__.keys(): + if prop in skipped_properties: + continue + if hasattr(dvs_props['config'], prop): + setattr(dvs_config, prop, getattr(dvs_props['config'], prop)) + _apply_dvs_config(dvs_config, dvs_dict) + if dvs_dict.get('link_discovery_protocol'): + if not dvs_config.linkDiscoveryProtocolConfig: + dvs_config.linkDiscoveryProtocolConfig = \ + vim.LinkDiscoveryProtocolConfig() + _apply_dvs_link_discovery_protocol( + dvs_config.linkDiscoveryProtocolConfig, + dvs_dict['link_discovery_protocol']) + if dvs_dict.get('infrastructure_traffic_resource_pools'): + if not dvs_config.infrastructureTrafficResourceConfig: + dvs_config.infrastructureTrafficResourceConfig = [] + _apply_dvs_infrastructure_traffic_resources( + dvs_config.infrastructureTrafficResourceConfig, + dvs_dict['infrastructure_traffic_resource_pools']) + log.trace('dvs_config= {}'.format(dvs_config)) + salt.utils.vmware.update_dvs(dvs_ref, dvs_config_spec=dvs_config) + if 'network_resource_management_enabled' in dvs_dict: + salt.utils.vmware.set_dvs_network_resource_management_enabled( + dvs_ref, dvs_dict['network_resource_management_enabled']) + return True + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') From 5c57e30d3155960946b3bdc5423a3006a030a2bd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:09:05 -0400 Subject: [PATCH 155/348] Added salt.modules.vsphere.list_dvportgroups to list dict representations of a DVPortgroups --- salt/modules/vsphere.py | 169 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index e140195cf1c..f3833b1a014 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4071,6 +4071,175 @@ def update_dvs(dvs_dict, dvs, service_instance=None): return True +def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config): + ''' + Returns the out shaping policy of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' out shaping ' + 'config'.format(pg_name)) + out_shaping_policy = pg_default_port_config.outShapingPolicy + if not out_shaping_policy: + return {} + return {'average_bandwidth': out_shaping_policy.averageBandwidth.value, + 'burst_size': out_shaping_policy.burstSize.value, + 'enabled': out_shaping_policy.enabled.value, + 'peak_bandwidth': out_shaping_policy.peakBandwidth.value} + + +def _get_dvportgroup_security_policy(pg_name, pg_default_port_config): + ''' + Returns the security policy of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' security policy ' + 'config'.format(pg_name)) + sec_policy = pg_default_port_config.securityPolicy + if not sec_policy: + return {} + return {'allow_promiscuous': sec_policy.allowPromiscuous.value, + 'forged_transmits': sec_policy.forgedTransmits.value, + 'mac_changes': sec_policy.macChanges.value} + + +def _get_dvportgroup_teaming(pg_name, pg_default_port_config): + ''' + Returns the teaming of a distributed virtual portgroup + + pg_name + The name of the portgroup + + pg_default_port_config + The dafault port config of the portgroup + ''' + log.trace('Retrieving portgroup\'s \'{0}\' teaming' + 'config'.format(pg_name)) + teaming_policy = pg_default_port_config.uplinkTeamingPolicy + if not teaming_policy: + return {} + ret_dict = {'notify_switches': teaming_policy.notifySwitches.value, + 'policy': teaming_policy.policy.value, + 'reverse_policy': teaming_policy.reversePolicy.value, + 'rolling_order': teaming_policy.rollingOrder.value} + if teaming_policy.failureCriteria: + failure_criteria = teaming_policy.failureCriteria + ret_dict.update({'failure_criteria': { + 'check_beacon': failure_criteria.checkBeacon.value, + 'check_duplex': failure_criteria.checkDuplex.value, + 'check_error_percent': failure_criteria.checkErrorPercent.value, + 'check_speed': failure_criteria.checkSpeed.value, + 'full_duplex': failure_criteria.fullDuplex.value, + 'percentage': failure_criteria.percentage.value, + 'speed': failure_criteria.speed.value}}) + if teaming_policy.uplinkPortOrder: + uplink_order = teaming_policy.uplinkPortOrder + ret_dict.update({'port_order': { + 'active': uplink_order.activeUplinkPort, + 'standby': uplink_order.standbyUplinkPort}}) + return ret_dict + + +def _get_dvportgroup_dict(pg_ref): + ''' + Returns a dictionary with a distributed virutal portgroup data + + + pg_ref + Portgroup reference + ''' + props = salt.utils.vmware.get_properties_of_managed_object( + pg_ref, ['name', 'config.description', 'config.numPorts', + 'config.type', 'config.defaultPortConfig']) + pg_dict = {'name': props['name'], + 'description': props.get('config.description'), + 'num_ports': props['config.numPorts'], + 'type': props['config.type']} + if props['config.defaultPortConfig']: + dpg = props['config.defaultPortConfig'] + if dpg.vlan and \ + isinstance(dpg.vlan, + vim.VmwareDistributedVirtualSwitchVlanIdSpec): + + pg_dict.update({'vlan_id': dpg.vlan.vlanId}) + pg_dict.update({'out_shaping': + _get_dvportgroup_out_shaping( + props['name'], + props['config.defaultPortConfig'])}) + pg_dict.update({'security_policy': + _get_dvportgroup_security_policy( + props['name'], + props['config.defaultPortConfig'])}) + pg_dict.update({'teaming': + _get_dvportgroup_teaming( + props['name'], + props['config.defaultPortConfig'])}) + return pg_dict + + +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None): + ''' + Returns a list of distributed virtual switch portgroups. + The list can be filtered by the portgroup names or by the DVS. + + dvs + Name of the DVS containing the portgroups. + Default value is None. + + portgroup_names + List of portgroup names to look for. If None, all portgroups are + returned. + Default value is None + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_dvporgroups + + salt '*' vsphere.list_dvportgroups dvs=dvs1 + + salt '*' vsphere.list_dvportgroups portgroup_names=[pg1] + + salt '*' vsphere.list_dvportgroups dvs=dvs1 portgroup_names=[pg1] + ''' + ret_dict = [] + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + if dvs: + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + dvs_ref = dvs_refs[0] + get_all_portgroups = True if not portgroup_names else False + for pg_ref in salt.utils.vmware.get_dvportgroups( + parent_ref=dvs_ref if dvs else dc_ref, + portgroup_names=portgroup_names, + get_all_portgroups=get_all_portgroups): + + ret_dict.append(_get_dvportgroup_dict(pg_ref)) + return ret_dict + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 3753a1048985dadc69dd5fd00b5c8a6cc3fc62a7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:11:22 -0400 Subject: [PATCH 156/348] Added salt.modules.vsphere.list_uplink_dvportgroup to list the dict representation of the uplink portgroup of a DVS --- salt/modules/vsphere.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index f3833b1a014..72a9d5ae0e8 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4240,6 +4240,39 @@ def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None): return ret_dict +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def list_uplink_dvportgroup(dvs, service_instance=None): + ''' + Returns the uplink portgroup of a distributed virtual switch. + + dvs + Name of the DVS containing the portgroup. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_uplink_dvportgroup dvs=dvs_name + ''' + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0]) + return _get_dvportgroup_dict(uplink_pg_ref) + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From e2fc69585e510e3eaafab8d414608ef09b58e18d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:13:35 -0400 Subject: [PATCH 157/348] Added private functions to apply a DVPortgroup dict representation to a VMware spec object --- salt/modules/vsphere.py | 179 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 72a9d5ae0e8..a84a1c96601 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4273,6 +4273,185 @@ def list_uplink_dvportgroup(dvs, service_instance=None): return _get_dvportgroup_dict(uplink_pg_ref) +def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): + ''' + Applies the values in out_shaping_conf to an out_shaping object + + pg_name + The name of the portgroup + + out_shaping + The vim.DVSTrafficShapingPolicy to apply the config to + + out_shaping_conf + The out shaping config + ''' + log.trace('Building portgroup\'s \'{0}\' out shaping ' + 'policy'.format(pg_name)) + if out_shaping_conf.get('average_bandwidth'): + out_shaping.averageBandwidth = vim.LongPolicy() + out_shaping.averageBandwidth.value = \ + out_shaping_conf['average_bandwidth'] + if out_shaping_conf.get('burst_size'): + out_shaping.burstSize = vim.LongPolicy() + out_shaping.burstSize.value = out_shaping_conf['burst_size'] + if 'enabled' in out_shaping_conf: + out_shaping.enabled = vim.BoolPolicy() + out_shaping.enabled.value = out_shaping_conf['enabled'] + if out_shaping_conf.get('peak_bandwidth'): + out_shaping.peakBandwidth = vim.LongPolicy() + out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth'] + + +def _apply_dvportgroup_security_policy(pg_name, sec_policy, sec_policy_conf): + ''' + Applies the values in sec_policy_conf to a security policy object + + pg_name + The name of the portgroup + + sec_policy + The vim.DVSTrafficShapingPolicy to apply the config to + + sec_policy_conf + The out shaping config + ''' + log.trace('Building portgroup\'s \'{0}\' security policy '.format(pg_name)) + if 'allow_promiscuous' in sec_policy_conf: + sec_policy.allowPromiscuous = vim.BoolPolicy() + sec_policy.allowPromiscuous.value = \ + sec_policy_conf['allow_promiscuous'] + if 'forged_transmits' in sec_policy_conf: + sec_policy.forgedTransmits = vim.BoolPolicy() + sec_policy.forgedTransmits.value = sec_policy_conf['forged_transmits'] + if 'mac_changes' in sec_policy_conf: + sec_policy.macChanges = vim.BoolPolicy() + sec_policy.macChanges.value = sec_policy_conf['mac_changes'] + + +def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf): + ''' + Applies the values in teaming_conf to a teaming policy object + + pg_name + The name of the portgroup + + teaming + The vim.VmwareUplinkPortTeamingPolicy to apply the config to + + teaming_conf + The teaming config + ''' + log.trace('Building portgroup\'s \'{0}\' teaming'.format(pg_name)) + if 'notify_switches' in teaming_conf: + teaming.notifySwitches = vim.BoolPolicy() + teaming.notifySwitches.value = teaming_conf['notify_switches'] + if 'policy' in teaming_conf: + teaming.policy = vim.StringPolicy() + teaming.policy.value = teaming_conf['policy'] + if 'reverse_policy' in teaming_conf: + teaming.reversePolicy = vim.BoolPolicy() + teaming.reversePolicy.value = teaming_conf['reverse_policy'] + if 'rolling_order' in teaming_conf: + teaming.rollingOrder = vim.BoolPolicy() + teaming.rollingOrder.value = teaming_conf['rolling_order'] + if 'failure_criteria' in teaming_conf: + if not teaming.failureCriteria: + teaming.failureCriteria = vim.DVSFailureCriteria() + failure_criteria_conf = teaming_conf['failure_criteria'] + if 'check_beacon' in failure_criteria_conf: + teaming.failureCriteria.checkBeacon = vim.BoolPolicy() + teaming.failureCriteria.checkBeacon.value = \ + failure_criteria_conf['check_beacon'] + if 'check_duplex' in failure_criteria_conf: + teaming.failureCriteria.checkDuplex = vim.BoolPolicy() + teaming.failureCriteria.checkDuplex.value = \ + failure_criteria_conf['check_duplex'] + if 'check_error_percent' in failure_criteria_conf: + teaming.failureCriteria.checkErrorPercent = vim.BoolPolicy() + teaming.failureCriteria.checkErrorPercent.value = \ + failure_criteria_conf['check_error_percent'] + if 'check_speed' in failure_criteria_conf: + teaming.failureCriteria.checkSpeed = vim.StringPolicy() + teaming.failureCriteria.checkSpeed.value = \ + failure_criteria_conf['check_speed'] + if 'full_duplex' in failure_criteria_conf: + teaming.failureCriteria.fullDuplex = vim.BoolPolicy() + teaming.failureCriteria.fullDuplex.value = \ + failure_criteria_conf['full_duplex'] + if 'percentage' in failure_criteria_conf: + teaming.failureCriteria.percentage = vim.IntPolicy() + teaming.failureCriteria.percentage.value = \ + failure_criteria_conf['percentage'] + if 'speed' in failure_criteria_conf: + teaming.failureCriteria.speed = vim.IntPolicy() + teaming.failureCriteria.speed.value = \ + failure_criteria_conf['speed'] + if 'port_order' in teaming_conf: + if not teaming.uplinkPortOrder: + teaming.uplinkPortOrder = vim.VMwareUplinkPortOrderPolicy() + if 'active' in teaming_conf['port_order']: + teaming.uplinkPortOrder.activeUplinkPort = \ + teaming_conf['port_order']['active'] + if 'standby' in teaming_conf['port_order']: + teaming.uplinkPortOrder.standbyUplinkPort = \ + teaming_conf['port_order']['standby'] + + +def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf): + ''' + Applies the values in conf to a distributed portgroup spec + + pg_name + The name of the portgroup + + pg_spec + The vim.DVPortgroupConfigSpec to apply the config to + + pg_conf + The portgroup config + ''' + log.trace('Building portgroup\'s \'{0}\' spec'.format(pg_name)) + if 'name' in pg_conf: + pg_spec.name = pg_conf['name'] + if 'description' in pg_conf: + pg_spec.description = pg_conf['description'] + if 'num_ports' in pg_conf: + pg_spec.numPorts = pg_conf['num_ports'] + if 'type' in pg_conf: + pg_spec.type = pg_conf['type'] + + if not pg_spec.defaultPortConfig: + for prop in ['vlan_id', 'out_shaping', 'security_policy', 'teaming']: + if prop in pg_conf: + pg_spec.defaultPortConfig = vim.VMwareDVSPortSetting() + if 'vlan_id' in pg_conf: + pg_spec.defaultPortConfig.vlan = \ + vim.VmwareDistributedVirtualSwitchVlanIdSpec() + pg_spec.defaultPortConfig.vlan.vlanId = pg_conf['vlan_id'] + if 'out_shaping' in pg_conf: + if not pg_spec.defaultPortConfig.outShapingPolicy: + pg_spec.defaultPortConfig.outShapingPolicy = \ + vim.DVSTrafficShapingPolicy() + _apply_dvportgroup_out_shaping( + pg_name, pg_spec.defaultPortConfig.outShapingPolicy, + pg_conf['out_shaping']) + if 'security_policy' in pg_conf: + if not pg_spec.defaultPortConfig.securityPolicy: + pg_spec.defaultPortConfig.securityPolicy = \ + vim.DVSSecurityPolicy() + _apply_dvportgroup_security_policy( + pg_name, pg_spec.defaultPortConfig.securityPolicy, + pg_conf['security_policy']) + if 'teaming' in pg_conf: + if not pg_spec.defaultPortConfig.uplinkTeamingPolicy: + pg_spec.defaultPortConfig.uplinkTeamingPolicy = \ + vim.VmwareUplinkPortTeamingPolicy() + _apply_dvportgroup_teaming( + pg_name, pg_spec.defaultPortConfig.uplinkTeamingPolicy, + pg_conf['teaming']) + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From b38f3255b743288cf86c10afe461533d3121dd42 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:15:33 -0400 Subject: [PATCH 158/348] Added salt.modules.vsphere.create_dvportgroup to create a DVPortgroup based on a dict representations --- salt/modules/vsphere.py | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index a84a1c96601..91f98700368 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4452,6 +4452,57 @@ def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf): pg_conf['teaming']) +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def create_dvportgroup(portgroup_dict, portgroup_name, dvs, + service_instance=None): + ''' + Creates a distributed virtual portgroup. + + Note: The ``portgroup_name`` param will override any name already set + in ``portgroup_dict``. + + portgroup_dict + Dictionary with the config values the portgroup should be created with + (exmaple in salt.states.dvs). + + portgroup_name + Name of the portgroup to be created. + + dvs + Name of the DVS that will contain the portgroup. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_dvportgroup portgroup_dict= + portgroup_name=pg1 dvs=dvs1 + ''' + log.trace('Creating portgroup\'{0}\' in dvs \'{1}\' ' + 'with dict = {2}'.format(portgroup_name, dvs, portgroup_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + # Make the name of the dvportgroup consistent with the parameter + portgroup_dict['name'] = portgroup_name + spec = vim.DVPortgroupConfigSpec() + _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict) + salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 6e6756aa100a1bb0594babfd5388193d634583d1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:17:50 -0400 Subject: [PATCH 159/348] Added salt.modules.vsphere.update_dvportgroup to update a DVPortgroup based on a dict representations --- salt/modules/vsphere.py | 60 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 91f98700368..3747ccf6b09 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4503,6 +4503,66 @@ def create_dvportgroup(portgroup_dict, portgroup_name, dvs, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True): + ''' + Updates a distributed virtual portgroup. + + portgroup_dict + Dictionary with the values the portgroup should be update with + (exmaple in salt.states.dvs). + + portgroup + Name of the portgroup to be updated. + + dvs + Name of the DVS containing the portgroups. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.update_dvportgroup portgroup_dict= + portgroup=pg1 + + salt '*' vsphere.update_dvportgroup portgroup_dict= + portgroup=pg1 dvs=dvs1 + ''' + log.trace('Updating portgroup\'{0}\' in dvs \'{1}\' ' + 'with dict = {2}'.format(portgroup, dvs, portgroup_dict)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0], + portgroup_names=[portgroup]) + if not pg_refs: + raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not ' + 'retrieved'.format(portgroup)) + pg_props = salt.utils.vmware.get_properties_of_managed_object(pg_refs[0], + ['config']) + spec = vim.DVPortgroupConfigSpec() + # Copy existing properties in spec + for prop in ['autoExpand', 'configVersion', 'defaultPortConfig', + 'description', 'name', 'numPorts', 'policy', 'portNameFormat', + 'scope', 'type', 'vendorSpecificConfig']: + setattr(spec, prop, getattr(pg_props['config'], prop)) + _apply_dvportgroup_config(portgroup, spec, portgroup_dict) + salt.utils.vmware.update_dvportgroup(pg_refs[0], spec) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 0446c938dd0ea923859c6b811c07710326101890 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:19:21 -0400 Subject: [PATCH 160/348] Added salt.modules.vsphere.remove_dvportgroup to remove a DVPortgroup --- salt/modules/vsphere.py | 43 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 3747ccf6b09..84edc69897b 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4563,6 +4563,49 @@ def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True): return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'esxcluster') +@gets_service_instance_via_proxy +def remove_dvportgroup(portgroup, dvs, service_instance=None): + ''' + Removes a distributed virtual portgroup. + + portgroup + Name of the portgroup to be removed. + + dvs + Name of the DVS containing the portgroups. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1 + ''' + log.trace('Removing portgroup\'{0}\' in dvs \'{1}\' ' + ''.format(portgroup, dvs)) + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) + if not dvs_refs: + raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' + 'retrieved'.format(dvs)) + pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0], + portgroup_names=[portgroup]) + if not pg_refs: + raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not ' + 'retrieved'.format(portgroup)) + salt.utils.vmware.remove_dvportgroup(pg_refs[0]) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From c83f471bffc0015238dbfefe1c5af4dfe6879590 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:21:38 -0400 Subject: [PATCH 161/348] Added comments and imports to dvs states --- salt/states/dvs.py | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 salt/states/dvs.py diff --git a/salt/states/dvs.py b/salt/states/dvs.py new file mode 100644 index 00000000000..d46bde966ff --- /dev/null +++ b/salt/states/dvs.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +''' +Manage VMware distributed virtual switches (DVSs). + +Dependencies +============ + + +- pyVmomi Python Module + + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original ESXi State +Module was developed against. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import traceback + +# Import Salt Libs +import salt.exceptions +from salt.utils.dictupdate import update as dict_merge +import salt.utils + +# Get Logging Started +log = logging.getLogger(__name__) + +def __virtual__(): + return True + + +def mod_init(low): + ''' + Init function + ''' + return True From 5b0d84208ad9c9c674c52d2c9ef957d16143c08d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 06:51:19 -0400 Subject: [PATCH 162/348] Added sysdoc in states.dvs --- salt/states/dvs.py | 161 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index d46bde966ff..1423af11604 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -1,6 +1,165 @@ # -*- coding: utf-8 -*- ''' -Manage VMware distributed virtual switches (DVSs). +Manage VMware distributed virtual switches (DVSs) and their distributed virtual +portgroups (DVportgroups). + +Examples +======== + +Several settings can be changed for DVSs and DVporgroups. Here are two examples +covering all of the settings. Fewer settings can be used + +DVS +--- + +.. code-block:: python + + 'name': 'dvs1', + 'max_mtu': 1000, + 'uplink_names': [ + 'dvUplink1', + 'dvUplink2', + 'dvUplink3' + ], + 'capability': { + 'portgroup_operation_supported': false, + 'operation_supported': true, + 'port_operation_supported': false + }, + 'lacp_api_version': 'multipleLag', + 'contact_email': 'foo@email.com', + 'product_info': { + 'version': + '6.0.0', + 'vendor': + 'VMware, + Inc.', + 'name': + 'DVS' + }, + 'network_resource_management_enabled': true, + 'contact_name': 'me@email.com', + 'infrastructure_traffic_resource_pools': [ + { + 'reservation': 0, + 'limit': 1000, + 'share_level': 'high', + 'key': 'management', + 'num_shares': 100 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'faultTolerance', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': 32000, + 'share_level': 'normal', + 'key': 'vmotion', + 'num_shares': 50 + }, + { + 'reservation': 10000, + 'limit': -1, + 'share_level': 'normal', + 'key': 'virtualMachine', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'custom', + 'key': 'iSCSI', + 'num_shares': 75 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'nfs', + 'num_shares': 50 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'hbr', + 'num_shares': 50 + }, + { + 'reservation': 8750, + 'limit': 15000, + 'share_level': 'high', + 'key': 'vsan', + 'num_shares': 100 + }, + { + 'reservation': 0, + 'limit': -1, + 'share_level': 'normal', + 'key': 'vdp', + 'num_shares': 50 + } + ], + 'link_discovery_protocol': { + 'operation': + 'listen', + 'protocol': + 'cdp' + }, + 'network_resource_control_version': 'version3', + 'description': 'Managed by Salt. Random settings.' + +Note: The mandatory attribute is: ``name``. + +Portgroup +--------- + +.. code-block:: python + 'security_policy': { + 'allow_promiscuous': true, + 'mac_changes': false, + 'forged_transmits': true + }, + 'name': 'vmotion-v702', + 'out_shaping': { + 'enabled': true, + 'average_bandwidth': 1500, + 'burst_size': 4096, + 'peak_bandwidth': 1500 + }, + 'num_ports': 128, + 'teaming': { + 'port_order': { + 'active': [ + 'dvUplink2' + ], + 'standby': [ + 'dvUplink1' + ] + }, + 'notify_switches': false, + 'reverse_policy': true, + 'rolling_order': false, + 'policy': 'failover_explicit', + 'failure_criteria': { + 'check_error_percent': true, + 'full_duplex': false, + 'check_duplex': false, + 'percentage': 50, + 'check_speed': 'minimum', + 'speed': 20, + 'check_beacon': true + } + }, + 'type': 'earlyBinding', + 'vlan_id': 100, + 'description': 'Managed by Salt. Random settings.' + +Note: The mandatory attributes are: ``name``, ``type``. Dependencies ============ From 8e56702598455bb2ea1350dd7065efcb4f6d52f3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:23:14 -0400 Subject: [PATCH 163/348] Added dvs_configured state that configures/adds a DVS --- salt/states/dvs.py | 168 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 1423af11604..39632769b42 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -221,3 +221,171 @@ def mod_init(low): Init function ''' return True + + +def _get_datacenter_name(): + ''' + Returns the datacenter name configured on the proxy + + Supported proxies: esxcluster, esxdatacenter + ''' + + proxy_type = __salt__['vsphere.get_proxy_type']() + details = None + if proxy_type == 'esxcluster': + details = __salt__['esxcluster.get_details']() + elif proxy_type == 'esxdatacenter': + details = __salt__['esxdatacenter.get_details']() + if not details: + raise salt.exceptions.CommandExecutionError( + 'details for proxy type \'{0}\' not loaded'.format(proxy_type)) + return details['datacenter'] + + +def dvs_configured(name, dvs): + ''' + Configures a DVS. + + Creates a new DVS, if it doesn't exist in the provided datacenter or + reconfigures it if configured differently. + + dvs + DVS dict representations (see module sysdocs) + ''' + datacenter_name = _get_datacenter_name() + dvs_name = dvs['name'] if dvs.get('name') else name + log.info('Running state {0} for DVS \'{1}\' in datacenter ' + '\'{2}\''.format(name, dvs_name, datacenter_name)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None} + comments = [] + changes = {} + changes_required = False + + try: + #TODO dvs validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name], + service_instance=si) + if not dvss: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create a new DVS ' + '\'{1}\' in datacenter \'{2}\'' + ''.format(name, dvs_name, datacenter_name)) + log.info(comments[-1]) + else: + dvs['name'] = dvs_name + __salt__['vsphere.create_dvs'](dvs_dict=dvs, + dvs_name=dvs_name, + service_instance=si) + comments.append('Created a new DVS \'{0}\' in datacenter ' + '\'{1}\''.format(dvs_name, datacenter_name)) + log.info(comments[-1]) + changes.update({'dvs': {'new': dvs}}) + else: + # DVS already exists. Checking various aspects of the config + props = ['description', 'contact_email', 'contact_name', + 'lacp_api_version', 'link_discovery_protocol', + 'max_mtu', 'network_resource_control_version', + 'network_resource_management_enabled'] + log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking ' + 'for any updates in ' + '{2}'.format(dvs_name, datacenter_name, props)) + props_to_original_values = {} + props_to_updated_values = {} + current_dvs = dvss[0] + for prop in props: + if prop in dvs and dvs[prop] != current_dvs.get(prop): + props_to_original_values[prop] = current_dvs.get(prop) + props_to_updated_values[prop] = dvs[prop] + + # Simple infrastructure traffic resource control compare doesn't + # work because num_shares is optional if share_level is not custom + # We need to do a dedicated compare for this property + infra_prop = 'infrastructure_traffic_resource_pools' + original_infra_res_pools = [] + updated_infra_res_pools = [] + if infra_prop in dvs: + if not current_dvs.get(infra_prop): + updated_infra_res_pools = dvs[infra_prop] + else: + for idx in range(len(dvs[infra_prop])): + if 'num_shares' not in dvs[infra_prop][idx] and \ + current_dvs[infra_prop][idx]['share_level'] != \ + 'custom' and \ + 'num_shares' in current_dvs[infra_prop][idx]: + + del current_dvs[infra_prop][idx]['num_shares'] + if dvs[infra_prop][idx] != \ + current_dvs[infra_prop][idx]: + + original_infra_res_pools.append( + current_dvs[infra_prop][idx]) + updated_infra_res_pools.append( + dict(dvs[infra_prop][idx])) + if updated_infra_res_pools: + props_to_original_values[ + 'infrastructure_traffic_resource_pools'] = \ + original_infra_res_pools + props_to_updated_values[ + 'infrastructure_traffic_resource_pools'] = \ + updated_infra_res_pools + if props_to_updated_values: + if __opts__['test']: + changes_string = '' + for p in props_to_updated_values.keys(): + if p == 'infrastructure_traffic_resource_pools': + changes_string += \ + '\tinfrastructure_traffic_resource_pools:\n' + for idx in range(len(props_to_updated_values [p])): + d = props_to_updated_values[p][idx] + s = props_to_original_values[p][idx] + changes_string += \ + ('\t\t{0} from \'{1}\' to \'{2}\'\n' + ''.format(d['key'], s, d)) + else: + changes_string += \ + ('\t{0} from \'{1}\' to \'{2}\'\n' + ''.format(p, props_to_original_values[p], + props_to_updated_values[p])) + comments.append( + 'State dvs_configured will update DVS \'{0}\' ' + 'in datacenter \'{1}\':\n{2}' + ''.format(dvs_name, datacenter_name, changes_string)) + log.info(comments[-1]) + else: + __salt__['vsphere.update_dvs']( + dvs_dict=props_to_updated_values, + dvs=dvs_name, + service_instance=si) + comments.append('Updated DVS \'{0}\' in datacenter \'{1}\'' + ''.format(dvs_name, datacenter_name)) + log.info(comments[-1]) + changes.update({'dvs': {'new': props_to_updated_values, + 'old': props_to_original_values}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': str(exc), + 'result': False if not __opts__['test'] else None}) + return ret + if not comments: + # We have no changes + ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is ' + 'correctly configured. Nothing to be done.' + ''.format(dvs_name, datacenter_name)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret From 903b8a989576c9f4441e7d89c00c33c7edc960cc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:24:52 -0400 Subject: [PATCH 164/348] Added portgroups_configured state that configures/adds/removes DVPortgroups --- salt/states/dvs.py | 222 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 39632769b42..193557ed1aa 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -389,3 +389,225 @@ def dvs_configured(name, dvs): ret.update({'changes': changes, 'result': True}) return ret + + +def _get_diff_dict(dict1, dict2): + ''' + Returns a dictionary with the diffs between two dictionaries + + It will ignore any key that doesn't exist in dict2 + ''' + ret_dict = {} + for p in dict2.keys(): + if p not in dict1: + ret_dict.update({p: {'val1': None, 'val2': dict2[p]}}) + elif dict1[p] != dict2[p]: + if isinstance(dict1[p], dict) and isinstance(dict2[p], dict): + sub_diff_dict = _get_diff_dict(dict1[p], dict2[p]) + if sub_diff_dict: + ret_dict.update({p: sub_diff_dict}) + else: + ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}}) + return ret_dict + + +def _get_val2_dict_from_diff_dict(diff_dict): + ''' + Returns a dictionaries with the values stored in val2 of a diff dict. + ''' + ret_dict = {} + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if 'val2' in diff_dict[p].keys(): + ret_dict.update({p: diff_dict[p]['val2']}) + else: + ret_dict.update( + {p: _get_val2_dict_from_diff_dict(diff_dict[p])}) + return ret_dict + + +def _get_val1_dict_from_diff_dict(diff_dict): + ''' + Returns a dictionaries with the values stored in val1 of a diff dict. + ''' + ret_dict = {} + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if 'val1' in diff_dict[p].keys(): + ret_dict.update({p: diff_dict[p]['val1']}) + else: + ret_dict.update( + {p: _get_val1_dict_from_diff_dict(diff_dict[p])}) + return ret_dict + + +def _get_changes_from_diff_dict(diff_dict): + ''' + Returns a list of string message of the differences in a diff dict. + + Each inner message is tabulated one tab deeper + ''' + changes_strings = [] + for p in diff_dict.keys(): + if not isinstance(diff_dict[p], dict): + raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) + if sorted(diff_dict[p].keys()) == ['val1', 'val2']: + # Some string formatting + from_str = diff_dict[p]['val1'] + if isinstance(diff_dict[p]['val1'], str): + from_str = '\'{0}\''.format(diff_dict[p]['val1']) + elif isinstance(diff_dict[p]['val1'], list): + from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1'])) + to_str = diff_dict[p]['val2'] + if isinstance(diff_dict[p]['val2'], str): + to_str = '\'{0}\''.format(diff_dict[p]['val2']) + elif isinstance(diff_dict[p]['val2'], list): + to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2'])) + changes_strings.append('{0} from {1} to {2}'.format( + p, from_str, to_str)) + else: + sub_changes = _get_changes_from_diff_dict(diff_dict[p]) + if sub_changes: + changes_strings.append('{0}:'.format(p)) + changes_strings.extend(['\t{0}'.format(c) + for c in sub_changes]) + return changes_strings + + +def portgroups_configured(name, dvs, portgroups): + ''' + Configures portgroups on a DVS. + + Creates/updates/removes portgroups in a provided DVS + + dvs + Name of the DVS + + portgroups + Portgroup dict representations (see module sysdocs) + ''' + datacenter = _get_datacenter_name() + log.info('Running state {0} on DVS \'{1}\', datacenter ' + '\'{2}\''.format(name, dvs, datacenter)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + + try: + #TODO portroups validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_pgs = __salt__['vsphere.list_dvportgroups']( + dvs=dvs, service_instance=si) + expected_pg_names = [] + for pg in portgroups: + pg_name = pg['name'] + expected_pg_names.append(pg_name) + del pg['name'] + log.info('Checking pg \'{0}\''.format(pg_name)) + filtered_current_pgs = \ + [p for p in current_pgs if p.get('name') == pg_name] + if not filtered_current_pgs: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create a new portgroup ' + '\'{1}\' in DVS \'{2}\', datacenter ' + '\'{3}\''.format(name, pg_name, dvs, + datacenter)) + else: + __salt__['vsphere.create_dvportgroup']( + portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs, + service_instance=si) + comments.append('Created a new portgroup \'{0}\' in DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(pg_name, dvs, datacenter)) + log.info(comments[-1]) + changes.update({pg_name: {'new': pg}}) + else: + # Porgroup already exists. Checking the config + log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter ' + '\'{2}\'. Checking for any updates.' + ''.format(pg_name, dvs, datacenter)) + current_pg = filtered_current_pgs[0] + diff_dict = _get_diff_dict(current_pg, pg) + + if diff_dict: + changes_required=True + if __opts__['test']: + changes_strings = \ + _get_changes_from_diff_dict(diff_dict) + log.trace('changes_strings = ' + '{0}'.format(changes_strings)) + comments.append( + 'State {0} will update portgroup \'{1}\' in ' + 'DVS \'{2}\', datacenter \'{3}\':\n{4}' + ''.format(name, pg_name, dvs, datacenter, + '\n'.join(['\t{0}'.format(c) for c in + changes_strings]))) + else: + __salt__['vsphere.update_dvportgroup']( + portgroup_dict=pg, portgroup=pg_name, dvs=dvs, + service_instance=si) + comments.append('Updated portgroup \'{0}\' in DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(pg_name, dvs, datacenter)) + log.info(comments[-1]) + changes.update( + {pg_name: {'new': + _get_val2_dict_from_diff_dict(diff_dict), + 'old': + _get_val1_dict_from_diff_dict(diff_dict)}}) + # Add the uplink portgroup to the expected pg names + uplink_pg = __salt__['vsphere.list_uplink_dvportgroup']( + dvs=dvs, service_instance=si) + expected_pg_names.append(uplink_pg['name']) + # Remove any extra portgroups + for current_pg in current_pgs: + if current_pg['name'] not in expected_pg_names: + changes_required=True + if __opts__['test']: + comments.append('State {0} will remove ' + 'the portgroup \'{1}\' from DVS \'{2}\', ' + 'datacenter \'{3}\'' + ''.format(name, current_pg['name'], dvs, + datacenter)) + else: + __salt__['vsphere.remove_dvportgroup']( + portgroup=current_pg['name'], dvs=dvs, + service_instance=si) + comments.append('Removed the portgroup \'{0}\' from DVS ' + '\'{1}\', datacenter \'{2}\'' + ''.format(current_pg['name'], dvs, + datacenter)) + log.info(comments[-1]) + changes.update({current_pg['name']: + {'old': current_pg}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter ' + '\'{1}\' exist and are correctly configured. ' + 'Nothing to be done.'.format(dvs, datacenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret From 6b66fd75ae3b865ccf19474abcd7b422f958bff1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 18 Sep 2017 19:25:49 -0400 Subject: [PATCH 165/348] Added uplink_portgroup_configured state that configures the uplink portgroup of a DVS --- salt/states/dvs.py | 85 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 193557ed1aa..897d5edebfc 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -611,3 +611,88 @@ def portgroups_configured(name, dvs, portgroups): ret.update({'changes': changes, 'result': True}) return ret + + +def uplink_portgroup_configured(name, dvs, uplink_portgroup): + ''' + Configures the uplink portgroup on a DVS. The state assumes there is only + one uplink portgroup. + + dvs + Name of the DVS + + upling_portgroup + Uplink portgroup dict representations (see module sysdocs) + + ''' + datacenter = _get_datacenter_name() + log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\'' + ''.format(name, dvs, datacenter)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + + try: + #TODO portroups validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup']( + dvs=dvs, service_instance=si) + log.trace('current_uplink_portgroup = ' + '{0}'.format(current_uplink_portgroup)) + diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup) + if diff_dict: + changes_required=True + if __opts__['test']: + changes_strings = \ + _get_changes_from_diff_dict(diff_dict) + log.trace('changes_strings = ' + '{0}'.format(changes_strings)) + comments.append( + 'State {0} will update the ' + 'uplink portgroup in DVS \'{1}\', datacenter ' + '\'{2}\':\n{3}' + ''.format(name, dvs, datacenter, + '\n'.join(['\t{0}'.format(c) for c in + changes_strings]))) + else: + __salt__['vsphere.update_dvportgroup']( + portgroup_dict=uplink_portgroup, + portgroup=current_uplink_portgroup['name'], + dvs=dvs, + service_instance=si) + comments.append('Updated the uplink portgroup in DVS ' + '\'{0}\', datacenter \'{1}\'' + ''.format(dvs, datacenter)) + log.info(comments[-1]) + changes.update( + {'uplink_portgroup': + {'new': _get_val2_dict_from_diff_dict(diff_dict), + 'old': _get_val1_dict_from_diff_dict(diff_dict)}}) + __salt__['vsphere.disconnect'](si) + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter ' + '\'{1}\' is correctly configured. ' + 'Nothing to be done.'.format(dvs, datacenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret From f811523e80973ff4e3ee90b1db80d6b780247db7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 06:01:25 -0400 Subject: [PATCH 166/348] pylint --- salt/modules/vsphere.py | 9 +++++---- salt/states/dvs.py | 22 +++++++++++----------- salt/utils/vmware.py | 2 +- tests/unit/utils/vmware/test_dvs.py | 3 +-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 84edc69897b..b3a8064153f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3893,7 +3893,7 @@ def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources, if res_dict.get('num_shares'): #XXX Even though we always set the number of shares if provided, #the vCenter will ignore it unless the share level is 'custom'. - res.allocationInfo.shares.shares=res_dict['num_shares'] + res.allocationInfo.shares.shares = res_dict['num_shares'] def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): @@ -3917,7 +3917,7 @@ def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): if res_dict.get('num_shares') and res_dict.get('share_level'): if not res.allocationInfo.shares: res.allocationInfo.shares = vim.SharesInfo() - res.allocationInfo.shares.shares=res_dict['num_shares'] + res.allocationInfo.shares.shares = res_dict['num_shares'] res.allocationInfo.shares.level = \ vim.SharesLevel(res_dict['share_level']) @@ -3985,7 +3985,7 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None): dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs_name]) if not dvs_refs: - raise excs.VMwareObjectRetrievalError( + raise VMwareObjectRetrievalError( 'DVS \'{0}\' wasn\'t found in datacenter \'{1}\'' ''.format(dvs_name, datacenter)) dvs_ref = dvs_refs[0] @@ -3993,6 +3993,7 @@ def create_dvs(dvs_dict, dvs_name, service_instance=None): dvs_ref, dvs_dict['network_resource_management_enabled']) return True + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy @@ -4394,7 +4395,7 @@ def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf): teaming.uplinkPortOrder.activeUplinkPort = \ teaming_conf['port_order']['active'] if 'standby' in teaming_conf['port_order']: - teaming.uplinkPortOrder.standbyUplinkPort = \ + teaming.uplinkPortOrder.standbyUplinkPort = \ teaming_conf['port_order']['standby'] diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 897d5edebfc..b48ab74f87c 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -206,12 +206,12 @@ import traceback # Import Salt Libs import salt.exceptions -from salt.utils.dictupdate import update as dict_merge import salt.utils # Get Logging Started log = logging.getLogger(__name__) + def __virtual__(): return True @@ -285,10 +285,10 @@ def dvs_configured(name, dvs): changes.update({'dvs': {'new': dvs}}) else: # DVS already exists. Checking various aspects of the config - props = ['description', 'contact_email', 'contact_name', - 'lacp_api_version', 'link_discovery_protocol', - 'max_mtu', 'network_resource_control_version', - 'network_resource_management_enabled'] + props = ['description', 'contact_email', 'contact_name', + 'lacp_api_version', 'link_discovery_protocol', + 'max_mtu', 'network_resource_control_version', + 'network_resource_management_enabled'] log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking ' 'for any updates in ' '{2}'.format(dvs_name, datacenter_name, props)) @@ -334,11 +334,11 @@ def dvs_configured(name, dvs): if props_to_updated_values: if __opts__['test']: changes_string = '' - for p in props_to_updated_values.keys(): + for p in props_to_updated_values: if p == 'infrastructure_traffic_resource_pools': changes_string += \ '\tinfrastructure_traffic_resource_pools:\n' - for idx in range(len(props_to_updated_values [p])): + for idx in range(len(props_to_updated_values[p])): d = props_to_updated_values[p][idx] s = props_to_original_values[p][idx] changes_string += \ @@ -536,7 +536,7 @@ def portgroups_configured(name, dvs, portgroups): diff_dict = _get_diff_dict(current_pg, pg) if diff_dict: - changes_required=True + changes_required = True if __opts__['test']: changes_strings = \ _get_changes_from_diff_dict(diff_dict) @@ -545,7 +545,7 @@ def portgroups_configured(name, dvs, portgroups): comments.append( 'State {0} will update portgroup \'{1}\' in ' 'DVS \'{2}\', datacenter \'{3}\':\n{4}' - ''.format(name, pg_name, dvs, datacenter, + ''.format(name, pg_name, dvs, datacenter, '\n'.join(['\t{0}'.format(c) for c in changes_strings]))) else: @@ -568,7 +568,7 @@ def portgroups_configured(name, dvs, portgroups): # Remove any extra portgroups for current_pg in current_pgs: if current_pg['name'] not in expected_pg_names: - changes_required=True + changes_required = True if __opts__['test']: comments.append('State {0} will remove ' 'the portgroup \'{1}\' from DVS \'{2}\', ' @@ -644,7 +644,7 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup): '{0}'.format(current_uplink_portgroup)) diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup) if diff_dict: - changes_required=True + changes_required = True if __opts__['test']: changes_strings = \ _get_changes_from_diff_dict(diff_dict) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 27b728ca691..d54dbced042 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1177,7 +1177,7 @@ def get_dvportgroups(parent_ref, portgroup_names=None, path='childEntity', skip=False, type=vim.Folder)]) - else: # parent is distributed virtual switch + else: # parent is distributed virtual switch traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='portgroup', skip=False, diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 6f88484877c..3f2f493f5a2 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -11,8 +11,7 @@ import logging # Import Salt testing libraries from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ - PropertyMock +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ ArgumentValueError, VMwareRuntimeError From 8d80dc328a6b0e1f57b57305779f0c97ce006369 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 08:04:07 -0400 Subject: [PATCH 167/348] more pylint --- salt/states/dvs.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index b48ab74f87c..da4ba012091 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -3,6 +3,8 @@ Manage VMware distributed virtual switches (DVSs) and their distributed virtual portgroups (DVportgroups). +:codeauthor: :email:`Alexandru Bleotu ` + Examples ======== @@ -206,14 +208,13 @@ import traceback # Import Salt Libs import salt.exceptions -import salt.utils # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): - return True + return 'dvs' def mod_init(low): From c65358d4fa84a806a7e78590ec90acf70f0ada75 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 18:02:02 -0400 Subject: [PATCH 168/348] Imported range from six --- salt/states/dvs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index da4ba012091..eeeae446f51 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -208,6 +208,7 @@ import traceback # Import Salt Libs import salt.exceptions +from salt.ext.six.moves import range # Get Logging Started log = logging.getLogger(__name__) From 3c7c202216124f7a6ff1019f1c8fc0e8ae291a87 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 06:52:18 -0400 Subject: [PATCH 169/348] Fixed assert_has_calls in vmware.utils.dvs tests --- tests/unit/utils/vmware/test_dvs.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/unit/utils/vmware/test_dvs.py b/tests/unit/utils/vmware/test_dvs.py index 3f2f493f5a2..458e240e283 100644 --- a/tests/unit/utils/vmware/test_dvs.py +++ b/tests/unit/utils/vmware/test_dvs.py @@ -80,10 +80,10 @@ class GetDvssTestCase(TestCase): mock_traversal_spec): vmware.get_dvss(self.mock_dc_ref) - mock_traversal_spec.assert_called( - call(path='networkFolder', skip=True, type=vim.Datacenter, - selectSet=['traversal_spec']), - call(path='childEntity', skip=False, type=vim.Folder)) + mock_traversal_spec.assert_has_calls( + [call(path='childEntity', skip=False, type=vim.Folder), + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec'])]) def test_get_mors_with_properties(self): vmware.get_dvss(self.mock_dc_ref) @@ -467,10 +467,10 @@ class GetDvportgroupsTestCase(TestCase): mock_traversal_spec): vmware.get_dvportgroups(self.mock_dc_ref) - mock_traversal_spec.assert_called( - call(path='networkFolder', skip=True, type=vim.Datacenter, - selectSet=['traversal_spec']), - call(path='childEntity', skip=False, type=vim.Folder)) + mock_traversal_spec.assert_has_calls( + [call(path='childEntity', skip=False, type=vim.Folder), + call(path='networkFolder', skip=True, type=vim.Datacenter, + selectSet=['traversal_spec'])]) def test_traversal_spec_dvs_parent(self): mock_traversal_spec = MagicMock(return_value='traversal_spec') From f0a813b12660639b234e7a3d77010253e6f3839d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 15:46:38 -0400 Subject: [PATCH 170/348] Review changes --- salt/modules/vsphere.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index b3a8064153f..bde7c9c98e1 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3749,7 +3749,7 @@ def list_dvss(datacenter=None, dvs_names=None, service_instance=None): salt '*' vsphere.list_dvss dvs_names=[dvs1,dvs2] ''' - ret_dict = [] + ret_list = [] proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] @@ -3789,8 +3789,8 @@ def list_dvss(datacenter=None, dvs_names=None, service_instance=None): _get_dvs_infrastructure_traffic_resources( props['name'], props['config'].infrastructureTrafficResourceConfig)}) - ret_dict.append(dvs_dict) - return ret_dict + ret_list.append(dvs_dict) + return ret_list def _apply_dvs_config(config_spec, config_dict): @@ -3871,29 +3871,30 @@ def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources, (vim.DistributedVirtualSwitchProductSpec) ''' for res_dict in resource_dicts: - ress = [r for r in infra_traffic_resources if r.key == res_dict['key']] - if ress: - res = ress[0] + filtered_traffic_resources = \ + [r for r in infra_traffic_resources if r.key == res_dict['key']] + if filtered_traffic_resources: + traffic_res = filtered_traffic_resources[0] else: - res = vim.DvsHostInfrastructureTrafficResource() - res.key = res_dict['key'] - res.allocationInfo = \ + traffic_res = vim.DvsHostInfrastructureTrafficResource() + traffic_res.key = res_dict['key'] + traffic_res.allocationInfo = \ vim.DvsHostInfrastructureTrafficResourceAllocation() - infra_traffic_resources.append(res) + infra_traffic_resources.append(traffic_res) if res_dict.get('limit'): - res.allocationInfo.limit = res_dict['limit'] + traffic_res.allocationInfo.limit = res_dict['limit'] if res_dict.get('reservation'): - res.allocationInfo.reservation = res_dict['reservation'] + traffic_res.allocationInfo.reservation = res_dict['reservation'] if res_dict.get('num_shares') or res_dict.get('share_level'): - if not res.allocationInfo.shares: - res.allocationInfo.shares = vim.SharesInfo() + if not traffic_res.allocationInfo.shares: + traffic_res.allocationInfo.shares = vim.SharesInfo() if res_dict.get('share_level'): - res.allocationInfo.shares.level = \ + traffic_res.allocationInfo.shares.level = \ vim.SharesLevel(res_dict['share_level']) if res_dict.get('num_shares'): #XXX Even though we always set the number of shares if provided, #the vCenter will ignore it unless the share level is 'custom'. - res.allocationInfo.shares.shares = res_dict['num_shares'] + traffic_res.allocationInfo.shares.shares = res_dict['num_shares'] def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts): From c1d3bda729b5863f87a662d47b40037e65d4bd02 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 22 Sep 2017 14:49:47 -0400 Subject: [PATCH 171/348] Added python/pyvmomi compatibility check to salt.modules.vsphere + removed reference to Python 2.6 --- salt/states/dvs.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/salt/states/dvs.py b/salt/states/dvs.py index eeeae446f51..6b44a84c387 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -182,8 +182,8 @@ PyVmomi can be installed via pip: .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain - versions of Python. If using version 6.0 of pyVmomi, Python 2.6, - Python 2.7.9, or newer must be present. This is due to an upstream dependency + versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9, + or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. @@ -205,16 +205,33 @@ Module was developed against. from __future__ import absolute_import import logging import traceback +import sys # Import Salt Libs import salt.exceptions from salt.ext.six.moves import range +# Import Third Party Libs +try: + from pyVmomi import VmomiSupport + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): + if not HAS_PYVMOMI: + return False, 'State module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('State module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') return 'dvs' From 03ce4d81b7fb7a47b668d7ef6c3fad12093124eb Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 22 Sep 2017 15:01:11 -0400 Subject: [PATCH 172/348] Reactor Test: Fix incorrect merge conflict resolution --- tests/unit/utils/test_reactor.py | 36 -------------------------------- 1 file changed, 36 deletions(-) diff --git a/tests/unit/utils/test_reactor.py b/tests/unit/utils/test_reactor.py index bfc57095dfc..b0a10d581fd 100644 --- a/tests/unit/utils/test_reactor.py +++ b/tests/unit/utils/test_reactor.py @@ -10,7 +10,6 @@ import yaml import salt.loader import salt.utils -import salt.utils.files import salt.utils.reactor as reactor from tests.support.unit import TestCase, skipIf @@ -380,41 +379,6 @@ WRAPPER_CALLS = { log = logging.getLogger(__name__) -@skipIf(NO_MOCK, NO_MOCK_REASON) -class TestReactorBasic(TestCase, AdaptedConfigurationTestCaseMixin): - def setUp(self): - self.opts = self.get_temp_config('master') - self.tempdir = tempfile.mkdtemp(dir=TMP) - self.sls_name = os.path.join(self.tempdir, 'test.sls') - with salt.utils.files.fopen(self.sls_name, 'w') as fh: - fh.write(''' -update_fileserver: - runner.fileserver.update -''') - - def tearDown(self): - if os.path.isdir(self.tempdir): - shutil.rmtree(self.tempdir) - del self.opts - del self.tempdir - del self.sls_name - - def test_basic(self): - reactor_config = [ - {'salt/tagA': ['/srv/reactor/A.sls']}, - {'salt/tagB': ['/srv/reactor/B.sls']}, - {'*': ['/srv/reactor/all.sls']}, - ] - wrap = reactor.ReactWrap(self.opts) - with patch.object(reactor.ReactWrap, 'local', MagicMock(side_effect=_args_sideffect)): - ret = wrap.run({'fun': 'test.ping', - 'state': 'local', - 'order': 1, - 'name': 'foo_action', - '__id__': 'foo_action'}) - raise Exception(ret) - - @skipIf(NO_MOCK, NO_MOCK_REASON) class TestReactor(TestCase, AdaptedConfigurationTestCaseMixin): ''' From 6baadf7a776338229275e5a502478970d7f120db Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Wed, 20 Sep 2017 14:33:33 +0200 Subject: [PATCH 173/348] Introduce process_count_max minion configuration parameter This allows users to limit the number of processes or threads a minion will start in response to published messages, prevents resource exhaustion in case a high number of concurrent jobs is scheduled in a short time. --- salt/minion.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/minion.py b/salt/minion.py index d51445be28a..053b5b7fbda 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1333,6 +1333,7 @@ class Minion(MinionBase): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True + @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data @@ -1365,6 +1366,15 @@ class Minion(MinionBase): self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners + + process_count_max = self.opts.get('process_count_max') + if process_count_max > 0: + process_count = len(salt.utils.minion.running(self.opts)) + while process_count >= process_count_max: + log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid'])) + yield tornado.gen.sleep(10) + process_count = len(salt.utils.minion.running(self.opts)) + # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other From 4d181ea5237918130b9aaca611479e57ff2696df Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Wed, 20 Sep 2017 14:35:11 +0200 Subject: [PATCH 174/348] process_count_max: add defaults and documentation --- conf/minion | 6 ++++++ doc/ref/configuration/minion.rst | 17 +++++++++++++++++ salt/config/__init__.py | 4 ++++ 3 files changed, 27 insertions(+) diff --git a/conf/minion b/conf/minion index fa5caf317b9..0cef29a6e15 100644 --- a/conf/minion +++ b/conf/minion @@ -689,6 +689,12 @@ # for a full explanation. #multiprocessing: True +# Limit the maximum amount of processes or threads created by salt-minion. +# This is useful to avoid resource exhaustion in case the minion receives more +# publications than it is able to handle, as it limits the number of spawned +# processes or threads. -1 disables the limit. +#process_count_max: 20 + ##### Logging settings ##### ########################################## diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 3438bfca035..5dafffaadd6 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2419,6 +2419,23 @@ executed in a thread. multiprocessing: True +.. conf_minion:: process_count_max + +``process_count_max`` +------- + +.. versionadded:: Oxygen + +Default: ``20`` + +Limit the maximum amount of processes or threads created by ``salt-minion``. +This is useful to avoid resource exhaustion in case the minion receives more +publications than it is able to handle, as it limits the number of spawned +processes or threads. ``-1`` disables the limit. + +.. code-block:: yaml + + process_count_max: 20 .. _minion-logging-settings: diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 6a89e1f4857..fea68eb70ad 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -337,6 +337,9 @@ VALID_OPTS = { # Whether or not processes should be forked when needed. The alternative is to use threading. 'multiprocessing': bool, + # Maximum number of concurrently active processes at any given point in time + 'process_count_max': int, + # Whether or not the salt minion should run scheduled mine updates 'mine_enabled': bool, @@ -1258,6 +1261,7 @@ DEFAULT_MINION_OPTS = { 'auto_accept': True, 'autosign_timeout': 120, 'multiprocessing': True, + 'process_count_max': 20, 'mine_enabled': True, 'mine_return_job': False, 'mine_interval': 60, From 04ab9a610287416b8674cf920fd00a9da381176a Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Wed, 20 Sep 2017 16:53:09 +0200 Subject: [PATCH 175/348] process_count_max: adapt existing unit tests --- tests/unit/test_minion.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_minion.py b/tests/unit/test_minion.py index e60e08edf30..13704f75804 100644 --- a/tests/unit/test_minion.py +++ b/tests/unit/test_minion.py @@ -69,7 +69,7 @@ class MinionTestCase(TestCase): mock_jid_queue = [123] try: minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop()) - ret = minion._handle_decoded_payload(mock_data) + ret = minion._handle_decoded_payload(mock_data).result() self.assertEqual(minion.jid_queue, mock_jid_queue) self.assertIsNone(ret) finally: @@ -98,7 +98,7 @@ class MinionTestCase(TestCase): # Call the _handle_decoded_payload function and update the mock_jid_queue to include the new # mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't # previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal. - minion._handle_decoded_payload(mock_data) + minion._handle_decoded_payload(mock_data).result() mock_jid_queue.append(mock_jid) self.assertEqual(minion.jid_queue, mock_jid_queue) finally: @@ -126,7 +126,7 @@ class MinionTestCase(TestCase): # Call the _handle_decoded_payload function and check that the queue is smaller by one item # and contains the new jid - minion._handle_decoded_payload(mock_data) + minion._handle_decoded_payload(mock_data).result() self.assertEqual(len(minion.jid_queue), 2) self.assertEqual(minion.jid_queue, [456, 789]) finally: From d53550de353c3f64c06381a6b623fca22740532f Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Thu, 21 Sep 2017 10:00:00 +0200 Subject: [PATCH 176/348] process_count_max: add unit test --- tests/unit/test_minion.py | 47 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/unit/test_minion.py b/tests/unit/test_minion.py index 13704f75804..b96d586ddd4 100644 --- a/tests/unit/test_minion.py +++ b/tests/unit/test_minion.py @@ -18,6 +18,7 @@ import salt.utils.event as event from salt.exceptions import SaltSystemExit import salt.syspaths import tornado +from salt.ext.six.moves import range __opts__ = {} @@ -131,3 +132,49 @@ class MinionTestCase(TestCase): self.assertEqual(minion.jid_queue, [456, 789]) finally: minion.destroy() + + def test_process_count_max(self): + ''' + Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes, + as per process_count_max. + ''' + with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \ + patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \ + patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)), \ + patch('salt.utils.minion.running', MagicMock(return_value=[])), \ + patch('tornado.gen.sleep', MagicMock(return_value=tornado.concurrent.Future())): + process_count_max = 10 + mock_opts = salt.config.DEFAULT_MINION_OPTS + mock_opts['minion_jid_queue_hwm'] = 100 + mock_opts["process_count_max"] = process_count_max + + try: + io_loop = tornado.ioloop.IOLoop() + minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop) + + # mock gen.sleep to throw a special Exception when called, so that we detect it + class SleepCalledEception(Exception): + """Thrown when sleep is called""" + pass + tornado.gen.sleep.return_value.set_exception(SleepCalledEception()) + + # up until process_count_max: gen.sleep does not get called, processes are started normally + for i in range(process_count_max): + mock_data = {'fun': 'foo.bar', + 'jid': i} + io_loop.run_sync(lambda data=mock_data: minion._handle_decoded_payload(data)) + self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, i + 1) + self.assertEqual(len(minion.jid_queue), i + 1) + salt.utils.minion.running.return_value += [i] + + # above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started + mock_data = {'fun': 'foo.bar', + 'jid': process_count_max + 1} + + self.assertRaises(SleepCalledEception, + lambda: io_loop.run_sync(lambda: minion._handle_decoded_payload(mock_data))) + self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, + process_count_max) + self.assertEqual(len(minion.jid_queue), process_count_max + 1) + finally: + minion.destroy() From fd4194ade05059325fb9bc1ef4984a10e7700691 Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Fri, 22 Sep 2017 15:37:43 +0200 Subject: [PATCH 177/348] process_count_max: disable by default --- conf/minion | 4 ++-- doc/ref/configuration/minion.rst | 6 +++--- salt/config/__init__.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conf/minion b/conf/minion index 0cef29a6e15..2946007e2f4 100644 --- a/conf/minion +++ b/conf/minion @@ -692,8 +692,8 @@ # Limit the maximum amount of processes or threads created by salt-minion. # This is useful to avoid resource exhaustion in case the minion receives more # publications than it is able to handle, as it limits the number of spawned -# processes or threads. -1 disables the limit. -#process_count_max: 20 +# processes or threads. -1 is the default and disables the limit. +#process_count_max: -1 ##### Logging settings ##### diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 5dafffaadd6..e4fe7a44e6e 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2426,16 +2426,16 @@ executed in a thread. .. versionadded:: Oxygen -Default: ``20`` +Default: ``-1`` Limit the maximum amount of processes or threads created by ``salt-minion``. This is useful to avoid resource exhaustion in case the minion receives more publications than it is able to handle, as it limits the number of spawned -processes or threads. ``-1`` disables the limit. +processes or threads. ``-1`` is the default and disables the limit. .. code-block:: yaml - process_count_max: 20 + process_count_max: -1 .. _minion-logging-settings: diff --git a/salt/config/__init__.py b/salt/config/__init__.py index fea68eb70ad..5a65b49d5ae 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -1261,7 +1261,7 @@ DEFAULT_MINION_OPTS = { 'auto_accept': True, 'autosign_timeout': 120, 'multiprocessing': True, - 'process_count_max': 20, + 'process_count_max': -1, 'mine_enabled': True, 'mine_return_job': False, 'mine_interval': 60, From 9aecf5f8472ff9973bae6bc8fa07e774ed341014 Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 15:48:41 -0500 Subject: [PATCH 178/348] Remove stderr spam when using salt-cloud with libvirt Install error handler and redirect stderr output to debug log. --- salt/cloud/clouds/libvirt.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index c77b6d6a205..c9fbd1aeb63 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -82,9 +82,6 @@ from salt.exceptions import ( SaltCloudSystemExit ) -# Get logging started -log = logging.getLogger(__name__) - VIRT_STATE_NAME_MAP = {0: 'running', 1: 'running', 2: 'running', @@ -99,6 +96,18 @@ IP_LEARNING_XML = """ __virtualname__ = 'libvirt' +# Set up logging +log = logging.getLogger(__name__) + +def libvirtErrorHandler(ctx, error): + ''' + Redirect stderr prints from libvirt to salt logging. + ''' + log.debug("libvirt error {0}".format(error)) + + +if HAS_LIBVIRT: + libvirt.registerErrorHandler(f=libvirtErrorHandler, ctx=None) def __virtual__(): ''' From 235bec492ef9c5b7818e67fc50fd306465fa44ea Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 12:59:01 -0500 Subject: [PATCH 179/348] salt-cloud + libvirt: Mention Fedora 26 support --- salt/cloud/clouds/libvirt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index c9fbd1aeb63..d147f7d5786 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -41,6 +41,7 @@ Example profile: master_port: 5506 Tested on: +- Fedora 26 (libvirt 3.2.1, qemu 2.9.1) - Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1) - Fedora 23 (libvirt 1.2.18, qemu 2.4.1) - Centos 7 (libvirt 1.2.17, qemu 1.5.3) From 88530c4cb6dc77a51b4a1c11139bcb844c1666e0 Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Fri, 22 Sep 2017 13:55:58 -0500 Subject: [PATCH 180/348] Lint fixes --- salt/cloud/clouds/libvirt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index d147f7d5786..1da5925f8f0 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -100,7 +100,8 @@ __virtualname__ = 'libvirt' # Set up logging log = logging.getLogger(__name__) -def libvirtErrorHandler(ctx, error): + +def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument ''' Redirect stderr prints from libvirt to salt logging. ''' @@ -108,7 +109,8 @@ def libvirtErrorHandler(ctx, error): if HAS_LIBVIRT: - libvirt.registerErrorHandler(f=libvirtErrorHandler, ctx=None) + libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None) + def __virtual__(): ''' From ae035f6b4d8d28976c8cca1b9e3fa0faf890080d Mon Sep 17 00:00:00 2001 From: Shane Hathaway Date: Fri, 22 Sep 2017 15:05:47 -0600 Subject: [PATCH 181/348] Fixed the 'status.procs' and 'status.pid' functions for openvzhn environments. In openvzhn environments, running the 'ps' grain requires python_shell=True. This may also be true of environments where the 'ps' grain has been customized. --- salt/modules/status.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/status.py b/salt/modules/status.py index edb268267ff..24d593d25f8 100644 --- a/salt/modules/status.py +++ b/salt/modules/status.py @@ -132,7 +132,7 @@ def procs(): uind = 0 pind = 0 cind = 0 - plines = __salt__['cmd.run'](__grains__['ps']).splitlines() + plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines() guide = plines.pop(0).split() if 'USER' in guide: uind = guide.index('USER') @@ -1417,7 +1417,7 @@ def pid(sig): ''' cmd = __grains__['ps'] - output = __salt__['cmd.run_stdout'](cmd) + output = __salt__['cmd.run_stdout'](cmd, python_shell=True) pids = '' for line in output.splitlines(): From 85f7549cb9a0a99c36d3abfddd5c84abcab480d8 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 20 Sep 2017 15:01:46 -0700 Subject: [PATCH 182/348] Adding the ability to monitoring logins by user & specific time ranges in the btmp & wtmp beacons --- salt/beacons/btmp.py | 122 ++++++++++++++++++++++++++++++++++++++++++- salt/beacons/wtmp.py | 117 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 235 insertions(+), 4 deletions(-) diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index 40b50470d88..f332cde3b03 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -10,8 +10,10 @@ Beacon to fire events at failed login of users # Import python libs from __future__ import absolute_import +import logging import os import struct +import time # Import Salt Libs import salt.utils.files @@ -37,6 +39,15 @@ FIELDS = [ SIZE = struct.calcsize(FMT) LOC_KEY = 'btmp.loc' +log = logging.getLogger(__name__) + +# pylint: disable=import-error +try: + import dateutil.parser as dateutil_parser + _TIME_SUPPORTED = True +except ImportError: + _TIME_SUPPORTED = False + def __virtual__(): if os.path.isfile(BTMP): @@ -44,6 +55,20 @@ def __virtual__(): return False +def _check_time_range(time_range, now): + ''' + Check time range + ''' + if _TIME_SUPPORTED: + _start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple())) + _end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple())) + + return bool(_start <= now <= _end) + else: + log.error('Dateutil is required.') + return False + + def _get_loc(): ''' return the active file location @@ -60,6 +85,45 @@ def validate(config): if not isinstance(config, list): return False, ('Configuration for btmp beacon must ' 'be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'users' in _config: + if not isinstance(_config['users'], dict): + return False, ('User configuration for btmp beacon must ' + 'be a dictionary.') + else: + for user in _config['users']: + if _config['users'][user] and \ + 'time_range' in _config['users'][user]: + _time_range = _config['users'][user]['time_range'] + if not isinstance(_time_range, dict): + return False, ('The time_range parameter for ' + 'btmp beacon must ' + 'be a dictionary.') + else: + if not all(k in _time_range for k in ('start', 'end')): + return False, ('The time_range parameter for ' + 'btmp beacon must contain ' + 'start & end options.') + if 'defaults' in _config: + if not isinstance(_config['defaults'], dict): + return False, ('Defaults configuration for btmp beacon must ' + 'be a dictionary.') + else: + if 'time_range' in _config['defaults']: + _time_range = _config['defaults']['time_range'] + if not isinstance(_time_range, dict): + return False, ('The time_range parameter for ' + 'btmp beacon must ' + 'be a dictionary.') + else: + if not all(k in _time_range for k in ('start', 'end')): + return False, ('The time_range parameter for ' + 'btmp beacon must contain ' + 'start & end options.') + return True, 'Valid beacon configuration' @@ -72,8 +136,40 @@ def beacon(config): beacons: btmp: [] + + beacons: + btmp: + - users: + gareth: + - defaults: + time_range: + start: '8am' + end: '4pm' + + beacons: + btmp: + - users: + gareth: + time_range: + start: '8am' + end: '4pm' + - defaults: + time_range: + start: '8am' + end: '4pm' ''' ret = [] + + users = None + defaults = None + + for config_item in config: + if 'users' in config_item: + users = config_item['users'] + + if 'defaults' in config_item: + defaults = config_item['defaults'] + with salt.utils.files.fopen(BTMP, 'rb') as fp_: loc = __context__.get(LOC_KEY, 0) if loc == 0: @@ -83,15 +179,39 @@ def beacon(config): else: fp_.seek(loc) while True: + now = int(time.time()) raw = fp_.read(SIZE) if len(raw) != SIZE: return ret + else: + log.debug(raw) __context__[LOC_KEY] = fp_.tell() pack = struct.unpack(FMT, raw) + log.debug(pack) event = {} for ind, field in enumerate(FIELDS): + log.debug('{} {}'.format(ind, field)) event[field] = pack[ind] if isinstance(event[field], six.string_types): event[field] = event[field].strip('\x00') - ret.append(event) + + if users: + if event['user'] in users: + _user = users[event['user']] + if isinstance(_user, dict) and 'time_range' in _user: + if _check_time_range(_user['time_range'], now): + ret.append(event) + else: + if defaults and 'time_range' in defaults: + if _check_time_range(defaults['time_range'], + now): + ret.append(event) + else: + ret.append(event) + else: + if defaults and 'time_range' in defaults: + if _check_time_range(defaults['time_range'], now): + ret.append(event) + else: + ret.append(event) return ret diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index c10a335e0cd..3810a383068 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -10,8 +10,10 @@ Beacon to fire events at login of users as registered in the wtmp file # Import Python libs from __future__ import absolute_import +import logging import os import struct +import time # Import salt libs import salt.utils.files @@ -37,9 +39,15 @@ FIELDS = [ SIZE = struct.calcsize(FMT) LOC_KEY = 'wtmp.loc' -import logging log = logging.getLogger(__name__) +# pylint: disable=import-error +try: + import dateutil.parser as dateutil_parser + _TIME_SUPPORTED = True +except ImportError: + _TIME_SUPPORTED = False + def __virtual__(): if os.path.isfile(WTMP): @@ -47,6 +55,20 @@ def __virtual__(): return False +def _check_time_range(time_range, now): + ''' + Check time range + ''' + if _TIME_SUPPORTED: + _start = int(time.mktime(dateutil_parser.parse(time_range['start']).timetuple())) + _end = int(time.mktime(dateutil_parser.parse(time_range['end']).timetuple())) + + return bool(_start <= now <= _end) + else: + log.error('Dateutil is required.') + return False + + def _get_loc(): ''' return the active file location @@ -62,6 +84,44 @@ def validate(config): # Configuration for wtmp beacon should be a list of dicts if not isinstance(config, list): return False, ('Configuration for wtmp beacon must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'users' in _config: + if not isinstance(_config['users'], dict): + return False, ('User configuration for btmp beacon must ' + 'be a dictionary.') + else: + for user in _config['users']: + if _config['users'][user] and \ + 'time_range' in _config['users'][user]: + _time_range = _config['users'][user]['time_range'] + if not isinstance(_time_range, dict): + return False, ('The time_range parameter for ' + 'btmp beacon must ' + 'be a dictionary.') + else: + if not all(k in _time_range for k in ('start', 'end')): + return False, ('The time_range parameter for ' + 'btmp beacon must contain ' + 'start & end options.') + if 'defaults' in _config: + if not isinstance(_config['defaults'], dict): + return False, ('Defaults configuration for btmp beacon must ' + 'be a dictionary.') + else: + if 'time_range' in _config['defaults']: + _time_range = _config['defaults']['time_range'] + if not isinstance(_time_range, dict): + return False, ('The time_range parameter for ' + 'btmp beacon must ' + 'be a dictionary.') + else: + if not all(k in _time_range for k in ('start', 'end')): + return False, ('The time_range parameter for ' + 'btmp beacon must contain ' + 'start & end options.') return True, 'Valid beacon configuration' @@ -74,8 +134,40 @@ def beacon(config): beacons: wtmp: [] - ''' + + beacons: + wtmp: + - users: + gareth: + - defaults: + time_range: + start: '8am' + end: '4pm' + + beacons: + wtmp: + - users: + gareth: + time_range: + start: '8am' + end: '4pm' + - defaults: + time_range: + start: '8am' + end: '4pm' +''' ret = [] + + users = None + defaults = None + + for config_item in config: + if 'users' in config_item: + users = config_item['users'] + + if 'defaults' in config_item: + defaults = config_item['defaults'] + with salt.utils.files.fopen(WTMP, 'rb') as fp_: loc = __context__.get(LOC_KEY, 0) if loc == 0: @@ -85,6 +177,7 @@ def beacon(config): else: fp_.seek(loc) while True: + now = int(time.time()) raw = fp_.read(SIZE) if len(raw) != SIZE: return ret @@ -95,5 +188,23 @@ def beacon(config): event[field] = pack[ind] if isinstance(event[field], six.string_types): event[field] = event[field].strip('\x00') - ret.append(event) + if users: + if event['user'] in users: + _user = users[event['user']] + if isinstance(_user, dict) and 'time_range' in _user: + if _check_time_range(_user['time_range'], now): + ret.append(event) + else: + if defaults and 'time_range' in defaults: + if _check_time_range(defaults['time_range'], + now): + ret.append(event) + else: + ret.append(event) + else: + if defaults and 'time_range' in defaults: + if _check_time_range(defaults['time_range'], now): + ret.append(event) + else: + ret.append(event) return ret From 6fe02e3c6c07fd1db9f5fa432c82c444734b4b34 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 22 Sep 2017 11:52:39 -0700 Subject: [PATCH 183/348] Updating btmp & wtmp beacons to work with python3. Adding ability to fire alerts for specific users & specific times. Adding some unit tests for both. --- salt/beacons/btmp.py | 10 +-- salt/beacons/wtmp.py | 7 +- tests/unit/beacons/test_wtmp_beacon.py | 119 +++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 6 deletions(-) create mode 100644 tests/unit/beacons/test_wtmp_beacon.py diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index f332cde3b03..e56e4b50872 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -183,17 +183,17 @@ def beacon(config): raw = fp_.read(SIZE) if len(raw) != SIZE: return ret - else: - log.debug(raw) __context__[LOC_KEY] = fp_.tell() pack = struct.unpack(FMT, raw) - log.debug(pack) event = {} for ind, field in enumerate(FIELDS): - log.debug('{} {}'.format(ind, field)) event[field] = pack[ind] if isinstance(event[field], six.string_types): - event[field] = event[field].strip('\x00') + if isinstance(event[field], bytes): + event[field] = event[field].decode() + event[field] = event[field].strip('b\x00') + else: + event[field] = event[field].strip('\x00') if users: if event['user'] in users: diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index 3810a383068..b882e5598f0 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -187,7 +187,12 @@ def beacon(config): for ind, field in enumerate(FIELDS): event[field] = pack[ind] if isinstance(event[field], six.string_types): - event[field] = event[field].strip('\x00') + if isinstance(event[field], bytes): + event[field] = event[field].decode() + event[field] = event[field].strip('b\x00') + else: + event[field] = event[field].strip('\x00') + if users: if event['user'] in users: _user = users[event['user']] diff --git a/tests/unit/beacons/test_wtmp_beacon.py b/tests/unit/beacons/test_wtmp_beacon.py new file mode 100644 index 00000000000..b1edd970966 --- /dev/null +++ b/tests/unit/beacons/test_wtmp_beacon.py @@ -0,0 +1,119 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +import logging +import sys + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, mock_open +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.wtmp as wtmp + +if sys.version_info >= (3,): + raw = bytes('\x07\x00\x00\x00H\x18\x00\x00pts/14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00s/14gareth\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13I\xc5YZf\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 'utf-8') + pack = (7, 6216, b'pts/14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b's/14', b'gareth\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 0, 0, 0, 1506101523, 353882, 0, 0, 0, 16777216) +else: + raw = b'\x07\x00\x00\x00H\x18\x00\x00pts/14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00s/14gareth\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13I\xc5YZf\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + pack = (7, 6216, 'pts/14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 's/14', 'gareth\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', '::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 0, 0, 0, 1506101523, 353882, 0, 0, 0, 16777216) + +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class WTMPBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.[s] + ''' + + def setup_loader_modules(self): + return { + wtmp: { + '__context__': {'wtmp.loc': 2}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + ret = wtmp.validate(config) + + self.assertEqual(ret, (False, 'Configuration for wtmp beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = wtmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + def test_no_match(self): + config = [{'users': {'gareth': {'time': {'end': '5pm', + 'start': '3pm'}}}} + ] + + ret = wtmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = wtmp.beacon(config) + self.assertEqual(ret, []) + + def test_match(self): + with patch('salt.utils.files.fopen', + mock_open(read_data=raw)): + with patch('struct.unpack', + MagicMock(return_value=pack)): + config = [{'users': {'gareth': {}}}] + + ret = wtmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected = [{'PID': 6216, + 'line': 'pts/14', + 'session': 0, + 'time': 0, + 'exit_status': 0, + 'inittab': 's/14', + 'type': 7, + 'addr': 1506101523, + 'hostname': '::1', + 'user': 'gareth'}] + + ret = wtmp.beacon(config) + log.debug('{}'.format(ret)) + self.assertEqual(ret, _expected) + + def test_match_time(self): + with patch('salt.utils.files.fopen', + mock_open(read_data=raw)): + with patch('time.time', + MagicMock(return_value=1506121200)): + with patch('struct.unpack', + MagicMock(return_value=pack)): + config = [{'users': {'gareth': {'time': {'end': '5pm', + 'start': '3pm'}}}} + ] + + ret = wtmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected = [{'PID': 6216, + 'line': 'pts/14', + 'session': 0, + 'time': 0, + 'exit_status': 0, + 'inittab': 's/14', + 'type': 7, + 'addr': 1506101523, + 'hostname': '::1', + 'user': 'gareth'}] + + ret = wtmp.beacon(config) + self.assertEqual(ret, _expected) From ca3f77f81e5aa105bfc5531fb45c95214d501926 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 22 Sep 2017 11:57:34 -0700 Subject: [PATCH 184/348] Adding test_btmp_beacon. --- tests/unit/beacons/test_btmp_beacon.py | 117 +++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 tests/unit/beacons/test_btmp_beacon.py diff --git a/tests/unit/beacons/test_btmp_beacon.py b/tests/unit/beacons/test_btmp_beacon.py new file mode 100644 index 00000000000..708dae94547 --- /dev/null +++ b/tests/unit/beacons/test_btmp_beacon.py @@ -0,0 +1,117 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +import logging +import sys + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, mock_open +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.btmp as btmp + +if sys.version_info >= (3,): + raw = bytes('\x06\x00\x00\x00Nt\x00\x00ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdd\xc7\xc2Y\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 'utf-8') + pack = (6, 29774, b'ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'\x00\x00\x00\x00', b'garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 0, 0, 0, 1505937373, 0, 0, 0, 0, 16777216) +else: + raw = b'\x06\x00\x00\x00Nt\x00\x00ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdd\xc7\xc2Y\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + pack = (6, 29774, 'ssh:notty\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', '\x00\x00\x00\x00', 'garet\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', '::1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 0, 0, 0, 1505937373, 0, 0, 0, 0, 16777216) +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class BTMPBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.[s] + ''' + + def setup_loader_modules(self): + return { + btmp: { + '__context__': {'btmp.loc': 2}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + ret = btmp.validate(config) + + self.assertEqual(ret, (False, 'Configuration for btmp beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = btmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + def test_no_match(self): + config = [{'users': {'gareth': {'time': {'end': '5pm', + 'start': '3pm'}}}} + ] + + ret = btmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = btmp.beacon(config) + self.assertEqual(ret, []) + + def test_match(self): + with patch('salt.utils.files.fopen', + mock_open(read_data=raw)): + with patch('struct.unpack', + MagicMock(return_value=pack)): + config = [{'users': {'garet': {}}}] + + ret = btmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected = [{'addr': 1505937373, + 'exit_status': 0, + 'inittab': '', + 'hostname': '::1', + 'PID': 29774, + 'session': 0, + 'user': + 'garet', + 'time': 0, + 'line': 'ssh:notty', + 'type': 6}] + ret = btmp.beacon(config) + self.assertEqual(ret, _expected) + + def test_match_time(self): + with patch('salt.utils.files.fopen', + mock_open(read_data=raw)): + with patch('time.time', + MagicMock(return_value=1506121200)): + with patch('struct.unpack', + MagicMock(return_value=pack)): + config = [{'users': {'garet': {'time': {'end': '5pm', + 'start': '3pm'}}}} + ] + + ret = btmp.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected = [{'addr': 1505937373, + 'exit_status': 0, + 'inittab': '', + 'hostname': '::1', + 'PID': 29774, + 'session': 0, + 'user': + 'garet', + 'time': 0, + 'line': 'ssh:notty', + 'type': 6}] + ret = btmp.beacon(config) + self.assertEqual(ret, _expected) From 2c0bc3596396334f637e19857eda12672c3a8fb2 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 22 Sep 2017 12:48:57 -0700 Subject: [PATCH 185/348] Fixing lint errors. --- salt/beacons/btmp.py | 1 + salt/beacons/wtmp.py | 1 + 2 files changed, 2 insertions(+) diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index e56e4b50872..f539d6032ac 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -20,6 +20,7 @@ import salt.utils.files # Import 3rd-party libs from salt.ext import six +from salt.ext.six.moves import map __virtualname__ = 'btmp' BTMP = '/var/log/btmp' diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index b882e5598f0..2ccf1730b4c 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -20,6 +20,7 @@ import salt.utils.files # Import 3rd-party libs from salt.ext import six +from salt.ext.six.moves import map __virtualname__ = 'wtmp' WTMP = '/var/log/wtmp' From fb0b987d0654d836000913ee7974b9d5922197e7 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 22 Sep 2017 14:20:24 -0700 Subject: [PATCH 186/348] Adding some lines to disable the lint errors on import for salt.ext.six.moves. --- salt/beacons/btmp.py | 4 +++- salt/beacons/inotify.py | 2 ++ salt/beacons/wtmp.py | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index f539d6032ac..9c8aca4e0f6 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -19,8 +19,10 @@ import time import salt.utils.files # Import 3rd-party libs -from salt.ext import six +import salt.ext.six +# pylint: disable=import-error from salt.ext.six.moves import map +# pylint: enable=import-error __virtualname__ = 'btmp' BTMP = '/var/log/btmp' diff --git a/salt/beacons/inotify.py b/salt/beacons/inotify.py index ee1dfa4f785..7e5f4df863c 100644 --- a/salt/beacons/inotify.py +++ b/salt/beacons/inotify.py @@ -23,7 +23,9 @@ import re # Import salt libs import salt.ext.six +# pylint: disable=import-error from salt.ext.six.moves import map +# pylint: enable=import-error # Import third party libs try: diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index 2ccf1730b4c..65a88df28ba 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -20,7 +20,9 @@ import salt.utils.files # Import 3rd-party libs from salt.ext import six +# pylint: disable=import-error from salt.ext.six.moves import map +# pylint: enable=import-error __virtualname__ = 'wtmp' WTMP = '/var/log/wtmp' From db25b6500b2337c9abde32c34b4766ea62d16dc9 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 22 Sep 2017 14:27:55 -0700 Subject: [PATCH 187/348] Fixing one more import. --- salt/beacons/btmp.py | 2 +- salt/beacons/wtmp.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index 9c8aca4e0f6..fe7f329025f 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -191,7 +191,7 @@ def beacon(config): event = {} for ind, field in enumerate(FIELDS): event[field] = pack[ind] - if isinstance(event[field], six.string_types): + if isinstance(event[field], salt.ext.six.string_types): if isinstance(event[field], bytes): event[field] = event[field].decode() event[field] = event[field].strip('b\x00') diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index 65a88df28ba..4cb3a0f4fc8 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -19,7 +19,7 @@ import time import salt.utils.files # Import 3rd-party libs -from salt.ext import six +import salt.ext.six # pylint: disable=import-error from salt.ext.six.moves import map # pylint: enable=import-error @@ -189,7 +189,7 @@ def beacon(config): event = {} for ind, field in enumerate(FIELDS): event[field] = pack[ind] - if isinstance(event[field], six.string_types): + if isinstance(event[field], salt.ext.six.string_types): if isinstance(event[field], bytes): event[field] = event[field].decode() event[field] = event[field].strip('b\x00') From 6b574ec5dac61addbc7f67d75f73f342b95ca53d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 22 Sep 2017 17:07:53 -0400 Subject: [PATCH 188/348] Return sorted added/removed/changed/unchanged keys in RecursiveDictDiffer so result is deterministic --- salt/utils/dictdiffer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index b0077420834..abe8bfc1c54 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -267,7 +267,7 @@ class RecursiveDictDiffer(DictDiffer): keys.append('{0}{1}'.format(prefix, key)) return keys - return _added(self._diffs, prefix='') + return sorted(_added(self._diffs, prefix='')) def removed(self): ''' @@ -290,7 +290,7 @@ class RecursiveDictDiffer(DictDiffer): prefix='{0}{1}.'.format(prefix, key))) return keys - return _removed(self._diffs, prefix='') + return sorted(_removed(self._diffs, prefix='')) def changed(self): ''' @@ -338,7 +338,7 @@ class RecursiveDictDiffer(DictDiffer): return keys - return _changed(self._diffs, prefix='') + return sorted(_changed(self._diffs, prefix='')) def unchanged(self): ''' @@ -363,7 +363,7 @@ class RecursiveDictDiffer(DictDiffer): prefix='{0}{1}.'.format(prefix, key))) return keys - return _unchanged(self.current_dict, self._diffs, prefix='') + return sorted(_unchanged(self.current_dict, self._diffs, prefix='')) @property def diffs(self): From 847debab7a5567a47e7a33e02173dcb9176f5f31 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 22 Sep 2017 17:09:01 -0400 Subject: [PATCH 189/348] Fix failing storage and listdiffer tests --- tests/unit/utils/test_dictdiffer.py | 2 +- tests/unit/utils/vmware/test_storage.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py index 2c6243bbd85..23fa5955ebc 100644 --- a/tests/unit/utils/test_dictdiffer.py +++ b/tests/unit/utils/test_dictdiffer.py @@ -49,7 +49,7 @@ class RecursiveDictDifferTestCase(TestCase): def test_changed_without_ignore_unset_values(self): self.recursive_diff.ignore_unset_values = False self.assertEqual(self.recursive_diff.changed(), - ['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i']) + ['a.c', 'a.e', 'a.f', 'a.g', 'h', 'i']) def test_unchanged(self): self.assertEqual(self.recursive_diff.unchanged(), diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 43434225ae3..8f9a069149b 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -264,14 +264,14 @@ class GetDatastoresTestCase(TestCase): mock_reference, get_all_datastores=True) - mock_traversal_spec_init.assert_called([ + mock_traversal_spec_init.assert_has_calls([ + call(path='datastore', + skip=False, + type=vim.Datacenter), call(path='childEntity', selectSet=['traversal'], skip=False, - type=vim.Folder), - call(path='datastore', - skip=False, - type=vim.Datacenter)]) + type=vim.Folder)]) def test_unsupported_reference_type(self): class FakeClass(object): @@ -379,7 +379,7 @@ class RenameDatastoreTestCase(TestCase): with self.assertRaises(VMwareApiError) as excinfo: salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') - self.assertEqual(excinfo.exception.message, 'vim_fault') + self.assertEqual(excinfo.exception.strerror, 'vim_fault') def test_rename_datastore_raise_runtime_fault(self): exc = vmodl.RuntimeFault() @@ -388,7 +388,7 @@ class RenameDatastoreTestCase(TestCase): with self.assertRaises(VMwareRuntimeError) as excinfo: salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') - self.assertEqual(excinfo.exception.message, 'runtime_fault') + self.assertEqual(excinfo.exception.strerror, 'runtime_fault') def test_rename_datastore(self): salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') From c7a652784afe8dfc04f6a47e4abb6c0508ad57e0 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 23 Sep 2017 13:56:50 +1000 Subject: [PATCH 190/348] remove blank line at end of file --- salt/utils/files.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/utils/files.py b/salt/utils/files.py index be4077583f8..657ff82b048 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -328,4 +328,3 @@ def remove(path): except OSError as exc: if exc.errno != errno.ENOENT: raise - From 96c1ef48e62807b71853cb72c2dcc5a8ebef6448 Mon Sep 17 00:00:00 2001 From: Wedge Jarrad Date: Sat, 23 Sep 2017 17:38:51 -0700 Subject: [PATCH 191/348] Ignore retcode on call to grep in selinux.py module Fixes #43711 Returning an exit code of 1 is normal operation of grep when it does not find a match. This will happen every time this function is called by fcontext_policy_present to detirmine whether a selinux policy exists before creating it. Ignoring the retcode will prevent it from emitting an error when this happens. --- salt/modules/selinux.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index 208eee03f5a..aecadd7a147 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -463,7 +463,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype) cmd = 'semanage fcontext -l | egrep ' + \ "'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs) - current_entry_text = __salt__['cmd.shell'](cmd) + current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True) if current_entry_text == '': return None ret = {} From 7ba690afaa1d4d8fe54aac0843ee4eba2e8f641c Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Sun, 24 Sep 2017 12:49:11 +0300 Subject: [PATCH 192/348] added link to citrix SDK download --- salt/cloud/clouds/xen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 558c7cacb61..d1eeb95acef 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -7,6 +7,7 @@ XenServer Cloud Driver The XenServer driver is designed to work with a Citrix XenServer. Requires XenServer SDK +(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ ) Place a copy of the XenAPI.py in the Python site-packages folder. From a327ee96148826d2fa355aec0fc4170ae020d055 Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Sun, 24 Sep 2017 17:34:38 +0300 Subject: [PATCH 193/348] fix lint errors --- salt/cloud/clouds/xen.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index d1eeb95acef..759aa5ebd80 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -162,8 +162,7 @@ def _get_session(): user, password, api_version, originator) except XenAPI.Failure as ex: ''' - if the server on the url is not the pool master, the pool master's - address will be rturned in the exception message + get the pool master's address from the XenAPI raised exception ''' pool_master_addr = str(ex.__dict__['details'][1]) slash_parts = url.split('/') @@ -193,10 +192,10 @@ def list_nodes(): ret = {} for vm in vms: record = session.xenapi.VM.get_record(vm) - if not(record['is_a_template']) and not(record['is_control_domain']): + if not record['is_a_template'] and not record['is_control_domain']: try: base_template_name = record['other_config']['base_template_name'] - except Exception as KeyError: + except Exception: base_template_name = None log.debug('VM {}, doesnt have base_template_name attribute'.format( record['name_label'])) @@ -316,7 +315,7 @@ def list_nodes_full(session=None): # deal with cases where the VM doesn't have 'base_template_name' attribute try: base_template_name = record['other_config']['base_template_name'] - except Exception as KeyError: + except Exception: base_template_name = None log.debug('VM {}, doesnt have base_template_name attribute'.format( record['name_label'])) @@ -481,7 +480,7 @@ def show_instance(name, session=None, call=None): if not record['is_a_template'] and not record['is_control_domain']: try: base_template_name = record['other_config']['base_template_name'] - except Exception as KeyError: + except Exception: base_template_name = None log.debug('VM {}, doesnt have base_template_name attribute'.format( record['name_label'])) From 78137c0860f9f70e4325e74caa4b5241638d805e Mon Sep 17 00:00:00 2001 From: Sebastien Huber Date: Mon, 25 Sep 2017 10:00:52 +0200 Subject: [PATCH 194/348] Corrected custom port handling This pillar was only able to connect to a Postgres DB running on the default port (5432) This commit extend this to a custom port --- salt/pillar/postgres.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/pillar/postgres.py b/salt/pillar/postgres.py index 58cd0e32985..7b6300989a6 100644 --- a/salt/pillar/postgres.py +++ b/salt/pillar/postgres.py @@ -90,7 +90,8 @@ class POSTGRESExtPillar(SqlBaseExtPillar): conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], - dbname=_options['db']) + dbname=_options['db'], + port=_options['port']) cursor = conn.cursor() try: yield cursor From 2c80ea54f4c363ffc4fbcd2344b9f8568bb78c30 Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Mon, 25 Sep 2017 12:06:48 +0300 Subject: [PATCH 195/348] more lint fixes --- salt/cloud/clouds/xen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 759aa5ebd80..a57bb65fa1b 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -162,7 +162,7 @@ def _get_session(): user, password, api_version, originator) except XenAPI.Failure as ex: ''' - get the pool master's address from the XenAPI raised exception + get the pool master address from the XenAPI raised exception ''' pool_master_addr = str(ex.__dict__['details'][1]) slash_parts = url.split('/') From 19da1000b4ac9716c3743c8c72640ca74e302512 Mon Sep 17 00:00:00 2001 From: Heghedus Razvan Date: Fri, 22 Sep 2017 15:57:59 +0300 Subject: [PATCH 196/348] test_nilrt_ip: Fix set_static_all test The nameservers needs to be specified only by ip address. Signed-off-by: Heghedus Razvan --- tests/integration/modules/test_nilrt_ip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/modules/test_nilrt_ip.py b/tests/integration/modules/test_nilrt_ip.py index 1412cffb2d2..5c2fbc0bfba 100644 --- a/tests/integration/modules/test_nilrt_ip.py +++ b/tests/integration/modules/test_nilrt_ip.py @@ -98,13 +98,13 @@ class Nilrt_ipModuleTest(ModuleCase): def test_static_all(self): interfaces = self.__interfaces() for interface in interfaces: - result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 my.dns.com']) + result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 8.8.8.8']) self.assertTrue(result) info = self.run_function('ip.get_interfaces_details') for interface in info['interfaces']: self.assertIn('8.8.4.4', interface['ipv4']['dns']) - self.assertIn('my.dns.com', interface['ipv4']['dns']) + self.assertIn('8.8.8.8', interface['ipv4']['dns']) self.assertEqual(interface['ipv4']['requestmode'], 'static') self.assertEqual(interface['ipv4']['address'], '192.168.10.4') self.assertEqual(interface['ipv4']['netmask'], '255.255.255.0') From 06e68bfa4fb2aad6f1b50ad334ba6b775ba18dfd Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Mon, 25 Sep 2017 14:52:40 +0300 Subject: [PATCH 197/348] lint errors fixed --- salt/cloud/clouds/xen.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index a57bb65fa1b..7359796c202 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -161,9 +161,6 @@ def _get_session(): session.xenapi.login_with_password( user, password, api_version, originator) except XenAPI.Failure as ex: - ''' - get the pool master address from the XenAPI raised exception - ''' pool_master_addr = str(ex.__dict__['details'][1]) slash_parts = url.split('/') new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr From 3abba0b999c087ad4d6c757e4df1d5b9573e32ea Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Mon, 25 Sep 2017 06:40:31 -0700 Subject: [PATCH 198/348] Update documentation in Pure Storage [purefa] module Add external array authentication methods. Changed version added to be Oxygen --- salt/modules/purefa.py | 60 ++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index 14beb37bef2..aeb4104ee7d 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -27,6 +27,20 @@ Installation Prerequisites pip install purestorage +- Configure Pure Storage FlashArray authentication. Use one of the following + three methods. + + 1) From the minion config + .. code-block:: yaml + + pure_tags: + fa: + san_ip: management vip or hostname for the FlashArray + api_token: A valid api token for the FlashArray being managed + + 2) From environment (PUREFA_IP and PUREFA_API) + 3) From the pillar (PUREFA_IP and PUREFA_API) + :maintainer: Simon Dodsley (simon@purestorage.com) :maturity: new :requires: purestorage @@ -195,7 +209,7 @@ def snap_create(name, suffix=None): Will return False is volume selected to snap does not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume to snapshot @@ -231,7 +245,7 @@ def snap_delete(name, suffix=None, eradicate=False): Will return False if selected snapshot does not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -273,7 +287,7 @@ def snap_eradicate(name, suffix=None): Will retunr False is snapshot is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -306,7 +320,7 @@ def volume_create(name, size=None): Will return False if volume already exists. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume (truncated to 63 characters) @@ -344,7 +358,7 @@ def volume_delete(name, eradicate=False): Will return False if volume doesn't exist is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -383,7 +397,7 @@ def volume_eradicate(name): Will return False is volume is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -413,7 +427,7 @@ def volume_extend(name, size): Will return False if new size is less than or equal to existing size. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -451,7 +465,7 @@ def snap_volume_create(name, target, overwrite=False): Will return False if target volume already exists and overwrite is not specified, or selected snapshot doesn't exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume snapshot @@ -497,7 +511,7 @@ def volume_clone(name, target, overwrite=False): Will return False if source volume doesn't exist, or target volume already exists and overwrite not specified. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -541,7 +555,7 @@ def volume_attach(name, host): Host and volume must exist or else will return False. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -574,7 +588,7 @@ def volume_detach(name, host): Will return False if either host or volume do not exist, or if selected volume isn't already connected to the host. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of volume @@ -608,7 +622,7 @@ def host_create(name, iqn=None, wwn=None): Fibre Channel parameters are not in a valid format. See Pure Storage FlashArray documentation. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host (truncated to 63 characters) @@ -659,7 +673,7 @@ def host_update(name, iqn=None, wwn=None): by another host, or are not in a valid format. See Pure Storage FlashArray documentation. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host @@ -699,7 +713,7 @@ def host_delete(name): Will return False if the host doesn't exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of host @@ -735,7 +749,7 @@ def hg_create(name, host=None, volume=None): Will return False if hostgroup already exists, or if named host or volume do not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup (truncated to 63 characters) @@ -791,7 +805,7 @@ def hg_update(name, host=None, volume=None): Will return False is hostgroup doesn't exist, or host or volume do not exist. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -837,7 +851,7 @@ def hg_delete(name): Will return False is hostgroup is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -875,7 +889,7 @@ def hg_remove(name, volume=None, host=None): Will return False is hostgroup does not exist, or named host or volume are not in the hostgroup. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup @@ -936,7 +950,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True): hostgroups, hosts or volumes * Named type for protection group does not exist - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1029,7 +1043,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None): * Incorrect type selected for current protection group type * Specified type does not exist - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1119,7 +1133,7 @@ def pg_delete(name, eradicate=False): Will return False if protection group is already in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1156,7 +1170,7 @@ def pg_eradicate(name): Will return False if protection group is not in a deleted state. - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of protection group @@ -1188,7 +1202,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None): * Protection group does not exist * Specified type is not currently associated with the protection group - .. versionadded:: 2017.7.3 + .. versionadded:: Oxygen name : string name of hostgroup From 72d96ed74b706c432ef37ef46551a6c7c31951f9 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 22 Sep 2017 15:37:12 -0600 Subject: [PATCH 199/348] Add an up_to_date state to win_wua --- salt/modules/win_wua.py | 2 +- salt/states/win_wua.py | 187 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 181 insertions(+), 8 deletions(-) diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py index 5549b3e2bfa..24441d185c4 100644 --- a/salt/modules/win_wua.py +++ b/salt/modules/win_wua.py @@ -110,7 +110,7 @@ def available(software=True, Include software updates in the results (default is True) drivers (bool): - Include driver updates in the results (default is False) + Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. diff --git a/salt/states/win_wua.py b/salt/states/win_wua.py index ab43b656544..fef44abe69f 100644 --- a/salt/states/win_wua.py +++ b/salt/states/win_wua.py @@ -84,10 +84,12 @@ def installed(name, updates=None): Args: - name (str): The identifier of a single update to install. + name (str): + The identifier of a single update to install. - updates (list): A list of identifiers for updates to be installed. - Overrides ``name``. Default is None. + updates (list): + A list of identifiers for updates to be installed. Overrides + ``name``. Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method @@ -121,7 +123,7 @@ def installed(name, updates=None): # Install multiple updates install_updates: wua.installed: - - name: + - updates: - KB3194343 - 28cf1b09-2b1a-458c-9bd1-971d1b26b211 ''' @@ -215,10 +217,12 @@ def removed(name, updates=None): Args: - name (str): The identifier of a single update to uninstall. + name (str): + The identifier of a single update to uninstall. - updates (list): A list of identifiers for updates to be removed. - Overrides ``name``. Default is None. + updates (list): + A list of identifiers for updates to be removed. Overrides ``name``. + Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method @@ -329,3 +333,172 @@ def removed(name, updates=None): ret['comment'] = 'Updates removed successfully' return ret + + +def up_to_date(name, + software=True, + drivers=False, + skip_hidden=False, + skip_mandatory=False, + skip_reboot=True, + categories=None, + severities=None,): + ''' + Ensure Microsoft Updates that match the passed criteria are installed. + Updates will be downloaded if needed. + + This state allows you to update a system without specifying a specific + update to apply. All matching updates will be installed. + + Args: + + name (str): + The name has no functional value and is only used as a tracking + reference + + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) + + skip_hidden (bool): + Skip updates that have been hidden. Default is False. + + skip_mandatory (bool): + Skip mandatory updates. Default is False. + + skip_reboot (bool): + Skip updates that require a reboot. Default is True. + + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. + + Categories include the following: + + * Critical Updates + * Definition Updates + * Drivers (make sure you set drivers=True) + * Feature Packs + * Security Updates + * Update Rollups + * Updates + * Update Rollups + * Windows 7 + * Windows 8.1 + * Windows 8.1 drivers + * Windows 8.1 and later drivers + * Windows Defender + + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. + + Severities include the following: + + * Critical + * Important + + + Returns: + dict: A dictionary containing the results of the update + + CLI Example: + + .. code-block:: yaml + + # Update the system using the state defaults + update_system: + wua.up_to_date + + # Update the drivers + update_drivers: + wua.up_to_date: + - software: False + - drivers: True + - skip_reboot: False + + # Apply all critical updates + update_critical: + wua.up_to_date: + - severities: + - Critical + ''' + ret = {'name': name, + 'changes': {}, + 'result': True, + 'comment': ''} + + wua = salt.utils.win_update.WindowsUpdateAgent() + + available_updates = wua.available( + skip_hidden=skip_hidden, skip_installed=True, + skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, + software=software, drivers=drivers, categories=categories, + severities=severities) + + # No updates found + if available_updates.count() == 0: + ret['comment'] = 'No updates found' + return ret + + updates = list(available_updates.list().keys()) + + # Search for updates + install_list = wua.search(updates) + + # List of updates to download + download = salt.utils.win_update.Updates() + for item in install_list.updates: + if not salt.utils.is_true(item.IsDownloaded): + download.updates.Add(item) + + # List of updates to install + install = salt.utils.win_update.Updates() + for item in install_list.updates: + if not salt.utils.is_true(item.IsInstalled): + install.updates.Add(item) + + # Return comment of changes if test. + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Updates will be installed:' + for update in install.updates: + ret['comment'] += '\n' + ret['comment'] += ': '.join( + [update.Identity.UpdateID, update.Title]) + return ret + + # Download updates + wua.download(download) + + # Install updates + wua.install(install) + + # Refresh windows update info + wua.refresh() + + post_info = wua.updates().list() + + # Verify the installation + for item in install.list(): + if not salt.utils.is_true(post_info[item]['Installed']): + ret['changes']['failed'] = { + item: {'Title': post_info[item]['Title'][:40] + '...', + 'KBs': post_info[item]['KBs']} + } + ret['result'] = False + else: + ret['changes']['installed'] = { + item: {'Title': post_info[item]['Title'][:40] + '...', + 'NeedsReboot': post_info[item]['NeedsReboot'], + 'KBs': post_info[item]['KBs']} + } + + if ret['changes'].get('failed', False): + ret['comment'] = 'Updates failed' + else: + ret['comment'] = 'Updates installed successfully' + + return ret From be554c898b679aaad66acf036854af4d7c97c58e Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 25 Sep 2017 09:21:41 -0600 Subject: [PATCH 200/348] Rename new state to `uptodate` --- salt/states/win_wua.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/states/win_wua.py b/salt/states/win_wua.py index fef44abe69f..798853d5ca7 100644 --- a/salt/states/win_wua.py +++ b/salt/states/win_wua.py @@ -335,14 +335,14 @@ def removed(name, updates=None): return ret -def up_to_date(name, - software=True, - drivers=False, - skip_hidden=False, - skip_mandatory=False, - skip_reboot=True, - categories=None, - severities=None,): +def uptodate(name, + software=True, + drivers=False, + skip_hidden=False, + skip_mandatory=False, + skip_reboot=True, + categories=None, + severities=None,): ''' Ensure Microsoft Updates that match the passed criteria are installed. Updates will be downloaded if needed. From 85b0a8c401844afe66211b5317e40fc2e9ca00af Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 25 Sep 2017 17:29:27 +0200 Subject: [PATCH 201/348] Improved delete_deployment test for kubernetes module This is a follow up of this PR: https://github.com/saltstack/salt/pull/43235 With the fix in PR 43235, we are polling the status of the deletion via show_deployment. This is now also reflected in the tests with this change. --- tests/unit/modules/test_kubernetes.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index 46ac7601581..4e8f6cd4b5d 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -97,19 +97,20 @@ class KubernetesTestCase(TestCase, LoaderModuleMockMixin): def test_delete_deployments(self): ''' - Tests deployment creation. + Tests deployment deletion :return: ''' with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: - with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): - mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") - mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( - **{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': 200}} - ) - self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200}) - self.assertTrue( - kubernetes.kubernetes.client.ExtensionsV1beta1Api(). - delete_namespaced_deployment().to_dict.called) + with patch('salt.modules.kubernetes.show_deployment', Mock(return_value=None)): + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"delete_namespaced_deployment.return_value.to_dict.return_value": {'code': ''}} + ) + self.assertEqual(kubernetes.delete_deployment("test"), {'code': 200}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + delete_namespaced_deployment().to_dict.called) def test_create_deployments(self): ''' From c5cf5e92c1290e3740425ca3b4ed63076826e9df Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 29 Jun 2017 16:39:13 -0600 Subject: [PATCH 202/348] Fix many tests --- salt/modules/file.py | 36 ++-- tests/unit/modules/test_file.py | 333 +++++++++++++++++--------------- 2 files changed, 198 insertions(+), 171 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 21a60dda517..9f8263cab0b 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2179,14 +2179,14 @@ def replace(path, if not_found_content is None: not_found_content = repl if prepend_if_not_found: - new_file.insert(0, not_found_content + b'\n') + new_file.insert(0, not_found_content + os.linesep) else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith(b'\n'): - new_file[-1] += b'\n' - new_file.append(not_found_content + b'\n') + if not new_file[-1].endswith(os.linesep): + new_file[-1] += os.linesep + new_file.append(not_found_content + os.linesep) has_changes = True if not dry_run: try: @@ -2197,7 +2197,7 @@ def replace(path, raise CommandExecutionError("Exception: {0}".format(exc)) # write new content in the file while avoiding partial reads try: - fh_ = salt.utils.atomicfile.atomic_open(path, 'w') + fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: fh_.write(salt.utils.to_str(line)) finally: @@ -2369,7 +2369,7 @@ def blockreplace(path, try: fi_file = fileinput.input(path, inplace=False, backup=False, - bufsize=1, mode='r') + bufsize=1, mode='rb') for line in fi_file: result = line @@ -2386,12 +2386,12 @@ def blockreplace(path, # Check for multi-line '\n' terminated content as split will # introduce an unwanted additional new line. - if content and content[-1] == '\n': + if content and content[-1] == os.linesep: content = content[:-1] # push new block content in file - for cline in content.split('\n'): - new_file.append(cline + '\n') + for cline in content.split(os.linesep): + new_file.append(cline + os.linesep) done = True @@ -2419,25 +2419,25 @@ def blockreplace(path, if not done: if prepend_if_not_found: # add the markers and content at the beginning of file - new_file.insert(0, marker_end + '\n') + new_file.insert(0, marker_end + os.linesep) if append_newline is True: - new_file.insert(0, content + '\n') + new_file.insert(0, content + os.linesep) else: new_file.insert(0, content) - new_file.insert(0, marker_start + '\n') + new_file.insert(0, marker_start + os.linesep) done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith('\n'): - new_file[-1] += '\n' + if not new_file[-1].endswith(os.linesep): + new_file[-1] += os.linesep # add the markers and content at the end of file - new_file.append(marker_start + '\n') + new_file.append(marker_start + os.linesep) if append_newline is True: - new_file.append(content + '\n') + new_file.append(content + os.linesep) else: new_file.append(content) - new_file.append(marker_end + '\n') + new_file.append(marker_end + os.linesep) done = True else: raise CommandExecutionError( @@ -2468,7 +2468,7 @@ def blockreplace(path, # write new content in the file while avoiding partial reads try: - fh_ = salt.utils.atomicfile.atomic_open(path, 'w') + fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: fh_.write(line) finally: diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 1c7dbe13eb4..0a7ef227423 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -10,7 +10,7 @@ import textwrap # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.paths import TMP -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch # Import Salt libs @@ -89,45 +89,57 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): 'repl': 'baz=\\g', 'append_if_not_found': True, } - base = 'foo=1\nbar=2' - expected = '{base}\n{repl}\n'.format(base=base, **args) + base = os.linesep.join(['foo=1', 'bar=2']) + # File ending with a newline, no match - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write(base + '\n') + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + tfile.write(base + os.linesep) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.replace(tfile.name, **args) + expected = os.linesep.join([base, 'baz=\\g']) + os.linesep + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # File not ending with a newline, no match - with tempfile.NamedTemporaryFile('w+') as tfile: + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: tfile.write(base) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.replace(tfile.name, **args) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # A newline should not be added in empty files - with tempfile.NamedTemporaryFile('w+') as tfile: - filemod.replace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), args['repl'] + '\n') + tfile = tempfile.NamedTemporaryFile('w+b', delete=False) + tfile.close() + filemod.replace(tfile.name, **args) + expected = args['repl'] + os.linesep + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # Using not_found_content, rather than repl - with tempfile.NamedTemporaryFile('w+') as tfile: - args['not_found_content'] = 'baz=3' - expected = '{base}\n{not_found_content}\n'.format(base=base, **args) + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: tfile.write(base) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + args['not_found_content'] = 'baz=3' + expected = os.linesep.join([base, 'baz=3']) + os.linesep + filemod.replace(tfile.name, **args) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # not appending if matches - with tempfile.NamedTemporaryFile('w+') as tfile: + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: base = 'foo=1\n#baz=42\nbar=2\n' - expected = 'foo=1\nbaz=42\nbar=2\n' + base = os.linesep.join(['foo=1', 'baz=42', 'bar=2']) tfile.write(base) tfile.flush() - filemod.replace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + expected = base + filemod.replace(tfile.name, **args) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) def test_backup(self): fext = '.bak' @@ -246,23 +258,24 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): del self.tfile def test_replace_multiline(self): - new_multiline_content = ( - "Who's that then?\nWell, how'd you become king," - "then?\nWe found them. I'm not a witch.\nWe shall" - "say 'Ni' again to you, if you do not appease us." - ) + new_multiline_content = os.linesep.join([ + "Who's that then?", + "Well, how'd you become king, then?", + "We found them. I'm not a witch.", + "We shall say 'Ni' again to you, if you do not appease us." + ]) filemod.blockreplace(self.tfile.name, '#-- START BLOCK 1', '#-- END BLOCK 1', new_multiline_content, backup=False) - with salt.utils.fopen(self.tfile.name, 'r') as fp: + with salt.utils.fopen(self.tfile.name, 'rb') as fp: filecontent = fp.read() - self.assertIn('#-- START BLOCK 1' - + "\n" + new_multiline_content - + "\n" - + '#-- END BLOCK 1', filecontent) + self.assertIn( + os.linesep.join([ + '#-- START BLOCK 1', new_multiline_content, '#-- END BLOCK 1']), + filecontent) self.assertNotIn('old content part 1', filecontent) self.assertNotIn('old content part 2', filecontent) @@ -291,10 +304,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): backup=False, append_if_not_found=True) - with salt.utils.fopen(self.tfile.name, 'r') as fp: - self.assertIn('#-- START BLOCK 2' - + "\n" + new_content - + '#-- END BLOCK 2', fp.read()) + with salt.utils.fopen(self.tfile.name, 'rb') as fp: + self.assertIn( + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)]), + fp.read()) def test_replace_append_newline_at_eof(self): ''' @@ -308,27 +323,33 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): 'content': 'baz', 'append_if_not_found': True, } - block = '{marker_start}\n{content}{marker_end}\n'.format(**args) - expected = base + '\n' + block + block = os.linesep.join(['#start', 'baz#stop']) + os.linesep # File ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write(base + '\n') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write(base + os.linesep) tfile.flush() - filemod.blockreplace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.blockreplace(tfile.name, **args) + expected = os.linesep.join([base, block]) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # File not ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: tfile.write(base) tfile.flush() - filemod.blockreplace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), expected) + filemod.blockreplace(tfile.name, **args) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + os.remove(tfile.name) + # A newline should not be added in empty files - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - filemod.blockreplace(tfile.name, **args) - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), block) + tfile = tempfile.NamedTemporaryFile(mode='w+b', delete=False) + tfile.close() + filemod.blockreplace(tfile.name, **args) + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), block) + os.remove(tfile.name) def test_replace_prepend(self): new_content = "Well, I didn't vote for you." @@ -343,10 +364,11 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): prepend_if_not_found=False, backup=False ) - with salt.utils.fopen(self.tfile.name, 'r') as fp: + with salt.utils.fopen(self.tfile.name, 'rb') as fp: self.assertNotIn( - '#-- START BLOCK 2' + "\n" - + new_content + '#-- END BLOCK 2', + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)]), fp.read()) filemod.blockreplace(self.tfile.name, @@ -355,12 +377,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): backup=False, prepend_if_not_found=True) - with salt.utils.fopen(self.tfile.name, 'r') as fp: + with salt.utils.fopen(self.tfile.name, 'rb') as fp: self.assertTrue( fp.read().startswith( - '#-- START BLOCK 2' - + "\n" + new_content - + '#-- END BLOCK 2')) + os.linesep.join([ + '#-- START BLOCK 2', + '{0}#-- END BLOCK 2'.format(new_content)]))) def test_replace_partial_marked_lines(self): filemod.blockreplace(self.tfile.name, @@ -477,6 +499,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): } } + @skipIf(salt.utils.is_windows(), 'SED is not available on Windows') def test_sed_limit_escaped(self): with tempfile.NamedTemporaryFile(mode='w+') as tfile: tfile.write(SED_CONTENT) @@ -501,31 +524,34 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): newlines at end of file. ''' # File ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - tfile.write('foo\n') + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + tfile.write('foo' + os.linesep) tfile.flush() - filemod.append(tfile.name, 'bar') - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'foo\nbar\n') + filemod.append(tfile.name, 'bar') + expected = os.linesep.join(['foo', 'bar']) + os.linesep + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + # File not ending with a newline - with tempfile.NamedTemporaryFile(mode='w+') as tfile: + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: tfile.write('foo') tfile.flush() + filemod.append(tfile.name, 'bar') + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), expected) + + # A newline should be added in empty files + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: filemod.append(tfile.name, 'bar') - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'foo\nbar\n') - # A newline should not be added in empty files - with tempfile.NamedTemporaryFile(mode='w+') as tfile: - filemod.append(tfile.name, 'bar') - with salt.utils.fopen(tfile.name) as tfile2: - self.assertEqual(tfile2.read(), 'bar\n') + with salt.utils.fopen(tfile.name) as tfile2: + self.assertEqual(tfile2.read(), 'bar' + os.linesep) def test_extract_hash(self): ''' Check various hash file formats. ''' # With file name - with tempfile.NamedTemporaryFile(mode='w+') as tfile: + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: tfile.write( 'rc.conf ef6e82e4006dee563d98ada2a2a80a27\n' 'ead48423703509d37c4a90e6a0d53e143b6fc268 example.tar.gz\n' @@ -534,94 +560,94 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): ) tfile.flush() - result = filemod.extract_hash(tfile.name, '', '/rc.conf') - self.assertEqual(result, { - 'hsum': 'ef6e82e4006dee563d98ada2a2a80a27', - 'hash_type': 'md5' - }) + result = filemod.extract_hash(tfile.name, '', '/rc.conf') + self.assertEqual(result, { + 'hsum': 'ef6e82e4006dee563d98ada2a2a80a27', + 'hash_type': 'md5' + }) - result = filemod.extract_hash(tfile.name, '', '/example.tar.gz') - self.assertEqual(result, { + result = filemod.extract_hash(tfile.name, '', '/example.tar.gz') + self.assertEqual(result, { + 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', + 'hash_type': 'sha1' + }) + + # All the checksums in this test file are sha1 sums. We run this + # loop three times. The first pass tests auto-detection of hash + # type by length of the hash. The second tests matching a specific + # type. The third tests a failed attempt to match a specific type, + # since sha256 was requested but sha1 is what is in the file. + for hash_type in ('', 'sha1', 'sha256'): + # Test the source_hash_name argument. Even though there are + # matches in the source_hash file for both the file_name and + # source params, they should be ignored in favor of the + # source_hash_name. + file_name = '/example.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = './subdir/example.tar.gz' + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { + 'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) + + # Test both a file_name and source but no source_hash_name. + # Even though there are matches for both file_name and + # source_hash_name, file_name should be preferred. + file_name = '/example.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = None + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', 'hash_type': 'sha1' - }) + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) - # All the checksums in this test file are sha1 sums. We run this - # loop three times. The first pass tests auto-detection of hash - # type by length of the hash. The second tests matching a specific - # type. The third tests a failed attempt to match a specific type, - # since sha256 was requested but sha1 is what is in the file. - for hash_type in ('', 'sha1', 'sha256'): - # Test the source_hash_name argument. Even though there are - # matches in the source_hash file for both the file_name and - # source params, they should be ignored in favor of the - # source_hash_name. - file_name = '/example.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = './subdir/example.tar.gz' - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) - - # Test both a file_name and source but no source_hash_name. - # Even though there are matches for both file_name and - # source_hash_name, file_name should be preferred. - file_name = '/example.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = None - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) - - # Test both a file_name and source but no source_hash_name. - # Since there is no match for the file_name, the source is - # matched. - file_name = '/somefile.tar.gz' - source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' - source_hash_name = None - result = filemod.extract_hash( - tfile.name, - hash_type, - file_name, - source, - source_hash_name) - expected = { - 'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) + # Test both a file_name and source but no source_hash_name. + # Since there is no match for the file_name, the source is + # matched. + file_name = '/somefile.tar.gz' + source = 'https://mydomain.tld/foo.tar.bz2?key1=val1&key2=val2' + source_hash_name = None + result = filemod.extract_hash( + tfile.name, + hash_type, + file_name, + source, + source_hash_name) + expected = { + 'hsum': 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) # Hash only, no file name (Maven repo checksum format) # Since there is no name match, the first checksum in the file will # always be returned, never the second. - with tempfile.NamedTemporaryFile(mode='w+') as tfile: + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: tfile.write('ead48423703509d37c4a90e6a0d53e143b6fc268\n' 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n') tfile.flush() - for hash_type in ('', 'sha1', 'sha256'): - result = filemod.extract_hash(tfile.name, hash_type, '/testfile') - expected = { - 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', - 'hash_type': 'sha1' - } if hash_type != 'sha256' else None - self.assertEqual(result, expected) + for hash_type in ('', 'sha1', 'sha256'): + result = filemod.extract_hash(tfile.name, hash_type, '/testfile') + expected = { + 'hsum': 'ead48423703509d37c4a90e6a0d53e143b6fc268', + 'hash_type': 'sha1' + } if hash_type != 'sha256' else None + self.assertEqual(result, expected) def test_user_to_uid_int(self): ''' @@ -774,6 +800,7 @@ class FileBasicsTestCase(TestCase, LoaderModuleMockMixin): self.addCleanup(os.remove, self.myfile) self.addCleanup(delattr, self, 'myfile') + @skipIf(salt.utils.is_windows(), 'os.symlink is not available on Windows') def test_symlink_already_in_desired_state(self): os.symlink(self.tfile.name, self.directory + '/a_link') self.addCleanup(os.remove, self.directory + '/a_link') From 9fe83a34a55fc799ee005d958b7ff2bcd07f271d Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 29 Jun 2017 16:57:01 -0600 Subject: [PATCH 203/348] Remove old variable declaration --- tests/unit/modules/test_file.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 0a7ef227423..f987c5f8a1c 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -132,7 +132,6 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): # not appending if matches with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: - base = 'foo=1\n#baz=42\nbar=2\n' base = os.linesep.join(['foo=1', 'baz=42', 'bar=2']) tfile.write(base) tfile.flush() From 543610570cbf9b6647e954e1ca5ddd98766f509f Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 31 Jul 2017 17:48:48 -0600 Subject: [PATCH 204/348] Fix bytestring issues, fix errored tests --- salt/modules/file.py | 62 +++++++++++++++++++++++---------- tests/unit/modules/test_file.py | 4 +++ 2 files changed, 48 insertions(+), 18 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 9f8263cab0b..8e0d4edbf1d 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2179,14 +2179,14 @@ def replace(path, if not_found_content is None: not_found_content = repl if prepend_if_not_found: - new_file.insert(0, not_found_content + os.linesep) + new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep)) else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith(os.linesep): - new_file[-1] += os.linesep - new_file.append(not_found_content + os.linesep) + if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)): + new_file[-1] += salt.utils.to_bytes(os.linesep) + new_file.append(not_found_content + salt.utils.to_bytes(os.linesep)) has_changes = True if not dry_run: try: @@ -2386,12 +2386,12 @@ def blockreplace(path, # Check for multi-line '\n' terminated content as split will # introduce an unwanted additional new line. - if content and content[-1] == os.linesep: + if content and content[-1] == salt.utils.to_bytes(os.linesep): content = content[:-1] # push new block content in file - for cline in content.split(os.linesep): - new_file.append(cline + os.linesep) + for cline in content.split(salt.utils.to_bytes(os.linesep)): + new_file.append(cline + salt.utils.to_bytes(os.linesep)) done = True @@ -2419,25 +2419,25 @@ def blockreplace(path, if not done: if prepend_if_not_found: # add the markers and content at the beginning of file - new_file.insert(0, marker_end + os.linesep) + new_file.insert(0, marker_end + salt.utils.to_bytes(os.linesep)) if append_newline is True: - new_file.insert(0, content + os.linesep) + new_file.insert(0, content + salt.utils.to_bytes(os.linesep)) else: new_file.insert(0, content) - new_file.insert(0, marker_start + os.linesep) + new_file.insert(0, marker_start + salt.utils.to_bytes(os.linesep)) done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith(os.linesep): + if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)): new_file[-1] += os.linesep # add the markers and content at the end of file - new_file.append(marker_start + os.linesep) + new_file.append(marker_start + salt.utils.to_bytes(os.linesep)) if append_newline is True: - new_file.append(content + os.linesep) + new_file.append(content + salt.utils.to_bytes(os.linesep)) else: new_file.append(content) - new_file.append(marker_end + os.linesep) + new_file.append(marker_end + salt.utils.to_bytes(os.linesep)) done = True else: raise CommandExecutionError( @@ -3585,6 +3585,7 @@ def source_list(source, source_hash, saltenv): if contextkey in __context__: return __context__[contextkey] + # get the master file list if isinstance(source, list): mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)] @@ -3609,6 +3610,14 @@ def source_list(source, source_hash, saltenv): single_src = next(iter(single)) single_hash = single[single_src] if single[single_src] else source_hash urlparsed_single_src = _urlparse(single_src) + # Fix this for Windows + if salt.utils.is_windows(): + # urlparse doesn't handle a local Windows path without the + # protocol indicator (file://). The scheme will be the + # drive letter instead of the protocol. So, we'll add the + # protocol and re-parse + if urlparsed_single_src.scheme.lower() in string.ascii_lowercase: + urlparsed_single_src = _urlparse('file://' + single_src) proto = urlparsed_single_src.scheme if proto == 'salt': path, senv = salt.utils.url.parse(single_src) @@ -3620,10 +3629,15 @@ def source_list(source, source_hash, saltenv): elif proto.startswith('http') or proto == 'ftp': ret = (single_src, single_hash) break - elif proto == 'file' and os.path.exists(urlparsed_single_src.path): + elif proto == 'file' and ( + os.path.exists(urlparsed_single_src.netloc) or + os.path.exists(urlparsed_single_src.path) or + os.path.exists(os.path.join( + urlparsed_single_src.netloc, + urlparsed_single_src.path))): ret = (single_src, single_hash) break - elif single_src.startswith('/') and os.path.exists(single_src): + elif single_src.startswith(os.linesep) and os.path.exists(single_src): ret = (single_src, single_hash) break elif isinstance(single, six.string_types): @@ -3634,14 +3648,26 @@ def source_list(source, source_hash, saltenv): ret = (single, source_hash) break urlparsed_src = _urlparse(single) + if salt.utils.is_windows(): + # urlparse doesn't handle a local Windows path without the + # protocol indicator (file://). The scheme will be the + # drive letter instead of the protocol. So, we'll add the + # protocol and re-parse + if urlparsed_src.scheme.lower() in string.ascii_lowercase: + urlparsed_src = _urlparse('file://' + single) proto = urlparsed_src.scheme - if proto == 'file' and os.path.exists(urlparsed_src.path): + if proto == 'file' and ( + os.path.exists(urlparsed_src.netloc) or + os.path.exists(urlparsed_src.path) or + os.path.exists(os.path.join( + urlparsed_src.netloc, + urlparsed_src.path))): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': ret = (single, source_hash) break - elif single.startswith('/') and os.path.exists(single): + elif single.startswith(os.linesep) and os.path.exists(single): ret = (single, source_hash) break if ret is None: diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index f987c5f8a1c..65f20d0cdc9 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -862,6 +862,10 @@ class FileBasicsTestCase(TestCase, LoaderModuleMockMixin): def test_source_list_for_list_returns_local_file_slash_from_dict(self): with patch.dict(filemod.__salt__, {'cp.list_master': MagicMock(return_value=[]), 'cp.list_master_dirs': MagicMock(return_value=[])}): + print('*' * 68) + print(self.myfile) + print(os.path.exists(self.myfile)) + print('*' * 68) ret = filemod.source_list( [{self.myfile: ''}], 'filehash', 'base') self.assertEqual(list(ret), [self.myfile, 'filehash']) From 716e99c4530a3fbd49c41836db5f54be951fe92d Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 23 Aug 2017 17:11:18 -0600 Subject: [PATCH 205/348] Fix py3 bytestring problems --- salt/modules/file.py | 27 +++++++++--------- tests/unit/modules/test_file.py | 49 ++++++++++++++++----------------- 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 8e0d4edbf1d..c5f27d2ec50 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2179,7 +2179,7 @@ def replace(path, if not_found_content is None: not_found_content = repl if prepend_if_not_found: - new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep)) + new_file.insert(0, not_found_content + os.linesep) else: # append_if_not_found # Make sure we have a newline at the end of the file @@ -2199,7 +2199,7 @@ def replace(path, try: fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: - fh_.write(salt.utils.to_str(line)) + fh_.write(salt.utils.to_bytes(line)) finally: fh_.close() @@ -2372,6 +2372,7 @@ def blockreplace(path, bufsize=1, mode='rb') for line in fi_file: + line = salt.utils.to_str(line) result = line if marker_start in line: @@ -2386,12 +2387,12 @@ def blockreplace(path, # Check for multi-line '\n' terminated content as split will # introduce an unwanted additional new line. - if content and content[-1] == salt.utils.to_bytes(os.linesep): + if content and content[-1] == os.linesep: content = content[:-1] # push new block content in file - for cline in content.split(salt.utils.to_bytes(os.linesep)): - new_file.append(cline + salt.utils.to_bytes(os.linesep)) + for cline in content.split(os.linesep): + new_file.append(cline + os.linesep) done = True @@ -2419,25 +2420,25 @@ def blockreplace(path, if not done: if prepend_if_not_found: # add the markers and content at the beginning of file - new_file.insert(0, marker_end + salt.utils.to_bytes(os.linesep)) + new_file.insert(0, marker_end + os.linesep) if append_newline is True: - new_file.insert(0, content + salt.utils.to_bytes(os.linesep)) + new_file.insert(0, content + os.linesep) else: new_file.insert(0, content) - new_file.insert(0, marker_start + salt.utils.to_bytes(os.linesep)) + new_file.insert(0, marker_start + os.linesep) done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): - if not new_file[-1].endswith(salt.utils.to_bytes(os.linesep)): + if not new_file[-1].endswith(os.linesep): new_file[-1] += os.linesep # add the markers and content at the end of file - new_file.append(marker_start + salt.utils.to_bytes(os.linesep)) + new_file.append(marker_start + os.linesep) if append_newline is True: - new_file.append(content + salt.utils.to_bytes(os.linesep)) + new_file.append(content + os.linesep) else: new_file.append(content) - new_file.append(marker_end + salt.utils.to_bytes(os.linesep)) + new_file.append(marker_end + os.linesep) done = True else: raise CommandExecutionError( @@ -2470,7 +2471,7 @@ def blockreplace(path, try: fh_ = salt.utils.atomicfile.atomic_open(path, 'wb') for line in new_file: - fh_.write(line) + fh_.write(salt.utils.to_bytes(line)) finally: fh_.close() diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 65f20d0cdc9..2b844294875 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -93,7 +93,7 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): # File ending with a newline, no match with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: - tfile.write(base + os.linesep) + tfile.write(salt.utils.to_bytes(base + os.linesep)) tfile.flush() filemod.replace(tfile.name, **args) expected = os.linesep.join([base, 'baz=\\g']) + os.linesep @@ -103,7 +103,7 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): # File not ending with a newline, no match with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: - tfile.write(base) + tfile.write(salt.utils.to_bytes(base)) tfile.flush() filemod.replace(tfile.name, **args) with salt.utils.fopen(tfile.name) as tfile2: @@ -121,7 +121,7 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): # Using not_found_content, rather than repl with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: - tfile.write(base) + tfile.write(salt.utils.to_bytes(base)) tfile.flush() args['not_found_content'] = 'baz=3' expected = os.linesep.join([base, 'baz=3']) + os.linesep @@ -133,7 +133,7 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): # not appending if matches with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: base = os.linesep.join(['foo=1', 'baz=42', 'bar=2']) - tfile.write(base) + tfile.write(salt.utils.to_bytes(base)) tfile.flush() expected = base filemod.replace(tfile.name, **args) @@ -271,12 +271,12 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): with salt.utils.fopen(self.tfile.name, 'rb') as fp: filecontent = fp.read() - self.assertIn( + self.assertIn(salt.utils.to_bytes( os.linesep.join([ - '#-- START BLOCK 1', new_multiline_content, '#-- END BLOCK 1']), + '#-- START BLOCK 1', new_multiline_content, '#-- END BLOCK 1'])), filecontent) - self.assertNotIn('old content part 1', filecontent) - self.assertNotIn('old content part 2', filecontent) + self.assertNotIn(b'old content part 1', filecontent) + self.assertNotIn(b'old content part 2', filecontent) def test_replace_append(self): new_content = "Well, I didn't vote for you." @@ -304,10 +304,10 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): append_if_not_found=True) with salt.utils.fopen(self.tfile.name, 'rb') as fp: - self.assertIn( + self.assertIn(salt.utils.to_bytes( os.linesep.join([ '#-- START BLOCK 2', - '{0}#-- END BLOCK 2'.format(new_content)]), + '{0}#-- END BLOCK 2'.format(new_content)])), fp.read()) def test_replace_append_newline_at_eof(self): @@ -325,7 +325,7 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): block = os.linesep.join(['#start', 'baz#stop']) + os.linesep # File ending with a newline with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write(base + os.linesep) + tfile.write(salt.utils.to_bytes(base + os.linesep)) tfile.flush() filemod.blockreplace(tfile.name, **args) expected = os.linesep.join([base, block]) @@ -335,7 +335,7 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): # File not ending with a newline with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write(base) + tfile.write(salt.utils.to_bytes(base)) tfile.flush() filemod.blockreplace(tfile.name, **args) with salt.utils.fopen(tfile.name) as tfile2: @@ -364,10 +364,10 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): backup=False ) with salt.utils.fopen(self.tfile.name, 'rb') as fp: - self.assertNotIn( + self.assertNotIn(salt.utils.to_bytes( os.linesep.join([ '#-- START BLOCK 2', - '{0}#-- END BLOCK 2'.format(new_content)]), + '{0}#-- END BLOCK 2'.format(new_content)])), fp.read()) filemod.blockreplace(self.tfile.name, @@ -378,10 +378,10 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): with salt.utils.fopen(self.tfile.name, 'rb') as fp: self.assertTrue( - fp.read().startswith( + fp.read().startswith(salt.utils.to_bytes( os.linesep.join([ '#-- START BLOCK 2', - '{0}#-- END BLOCK 2'.format(new_content)]))) + '{0}#-- END BLOCK 2'.format(new_content)])))) def test_replace_partial_marked_lines(self): filemod.blockreplace(self.tfile.name, @@ -524,7 +524,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): ''' # File ending with a newline with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write('foo' + os.linesep) + tfile.write(salt.utils.to_bytes('foo' + os.linesep)) tfile.flush() filemod.append(tfile.name, 'bar') expected = os.linesep.join(['foo', 'bar']) + os.linesep @@ -533,7 +533,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): # File not ending with a newline with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write('foo') + tfile.write(salt.utils.to_bytes('foo')) tfile.flush() filemod.append(tfile.name, 'bar') with salt.utils.fopen(tfile.name) as tfile2: @@ -551,12 +551,12 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): ''' # With file name with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write( + tfile.write(salt.utils.to_bytes( 'rc.conf ef6e82e4006dee563d98ada2a2a80a27\n' 'ead48423703509d37c4a90e6a0d53e143b6fc268 example.tar.gz\n' 'fe05bcdcdc4928012781a5f1a2a77cbb5398e106 ./subdir/example.tar.gz\n' 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b foo.tar.bz2\n' - ) + )) tfile.flush() result = filemod.extract_hash(tfile.name, '', '/rc.conf') @@ -636,8 +636,9 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): # Since there is no name match, the first checksum in the file will # always be returned, never the second. with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: - tfile.write('ead48423703509d37c4a90e6a0d53e143b6fc268\n' - 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n') + tfile.write(salt.utils.to_bytes( + 'ead48423703509d37c4a90e6a0d53e143b6fc268\n' + 'ad782ecdac770fc6eb9a62e44f90873fb97fb26b\n')) tfile.flush() for hash_type in ('', 'sha1', 'sha256'): @@ -862,10 +863,6 @@ class FileBasicsTestCase(TestCase, LoaderModuleMockMixin): def test_source_list_for_list_returns_local_file_slash_from_dict(self): with patch.dict(filemod.__salt__, {'cp.list_master': MagicMock(return_value=[]), 'cp.list_master_dirs': MagicMock(return_value=[])}): - print('*' * 68) - print(self.myfile) - print(os.path.exists(self.myfile)) - print('*' * 68) ret = filemod.source_list( [{self.myfile: ''}], 'filehash', 'base') self.assertEqual(list(ret), [self.myfile, 'filehash']) From d5f27901e324ad6904ea4da1ddc9cd12bf72e543 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 23 Aug 2017 17:16:49 -0600 Subject: [PATCH 206/348] Fix additional bytestring issue --- salt/modules/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index c5f27d2ec50..f6fd3e5a5d4 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2179,7 +2179,7 @@ def replace(path, if not_found_content is None: not_found_content = repl if prepend_if_not_found: - new_file.insert(0, not_found_content + os.linesep) + new_file.insert(0, not_found_content + salt.utils.to_bytes(os.linesep)) else: # append_if_not_found # Make sure we have a newline at the end of the file From e20aa5c39b7d7315664094bc832858c3f866a77a Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 24 Aug 2017 15:39:30 -0600 Subject: [PATCH 207/348] Fix line, use os.sep instead of os.linesep --- salt/modules/file.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index f6fd3e5a5d4..ae704d75a5f 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -3586,7 +3586,6 @@ def source_list(source, source_hash, saltenv): if contextkey in __context__: return __context__[contextkey] - # get the master file list if isinstance(source, list): mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)] @@ -3638,7 +3637,7 @@ def source_list(source, source_hash, saltenv): urlparsed_single_src.path))): ret = (single_src, single_hash) break - elif single_src.startswith(os.linesep) and os.path.exists(single_src): + elif single_src.startswith(os.sep) and os.path.exists(single_src): ret = (single_src, single_hash) break elif isinstance(single, six.string_types): @@ -3668,7 +3667,7 @@ def source_list(source, source_hash, saltenv): elif proto.startswith('http') or proto == 'ftp': ret = (single, source_hash) break - elif single.startswith(os.linesep) and os.path.exists(single): + elif single.startswith(os.sep) and os.path.exists(single): ret = (single, source_hash) break if ret is None: From b55172d5dc802ad501924c82ac46aff0819c08ef Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 24 Aug 2017 17:06:10 -0600 Subject: [PATCH 208/348] Split by Windows and Linux style line endings --- salt/modules/file.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index ae704d75a5f..4751476a2ae 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2385,14 +2385,22 @@ def blockreplace(path, # end of block detected in_block = False - # Check for multi-line '\n' terminated content as split will - # introduce an unwanted additional new line. - if content and content[-1] == os.linesep: - content = content[:-1] + # Separate the content into lines. Account for Windows + # style line endings using os.linesep, then by linux + # style line endings + split_content = [] + for linesep_line in content.split(os.linesep): + for content_line in linesep_line.split('\n'): + split_content.append(content_line) + + # Trim any trailing new lines to avoid unwanted + # additional new lines + while not split_content[-1]: + split_content.pop() # push new block content in file - for cline in content.split(os.linesep): - new_file.append(cline + os.linesep) + for content_line in split_content: + new_file.append(content_line + os.linesep) done = True From 352fe69e3568558c6de274cd46a3efc20fecde44 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 17:47:01 -0600 Subject: [PATCH 209/348] Clarify the purpose of the for loop --- salt/modules/file.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 4751476a2ae..6e903a5669d 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -2385,9 +2385,11 @@ def blockreplace(path, # end of block detected in_block = False - # Separate the content into lines. Account for Windows - # style line endings using os.linesep, then by linux - # style line endings + # Handle situations where there may be multiple types + # of line endings in the same file. Separate the content + # into lines. Account for Windows-style line endings + # using os.linesep, then by linux-style line endings + # using '\n' split_content = [] for linesep_line in content.split(os.linesep): for content_line in linesep_line.split('\n'): From 056f3bb4c09c7b8a9cfb9f12edaa4d1d43b1184e Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 14 Sep 2017 08:42:19 -0600 Subject: [PATCH 210/348] Use with to open temp file --- tests/unit/modules/test_file.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 2b844294875..713a96576a5 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -111,8 +111,8 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): os.remove(tfile.name) # A newline should not be added in empty files - tfile = tempfile.NamedTemporaryFile('w+b', delete=False) - tfile.close() + with tempfile.NamedTemporaryFile('w+b', delete=False) as tfile: + pass filemod.replace(tfile.name, **args) expected = args['repl'] + os.linesep with salt.utils.fopen(tfile.name) as tfile2: @@ -343,8 +343,8 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): os.remove(tfile.name) # A newline should not be added in empty files - tfile = tempfile.NamedTemporaryFile(mode='w+b', delete=False) - tfile.close() + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tfile: + pass filemod.blockreplace(tfile.name, **args) with salt.utils.fopen(tfile.name) as tfile2: self.assertEqual(tfile2.read(), block) From 048e16883f54b27e43f892dc447a369c15c0c2e4 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 28 Jun 2017 17:20:34 -0600 Subject: [PATCH 211/348] Use uppercase KEY --- tests/unit/modules/test_environ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_environ.py b/tests/unit/modules/test_environ.py index 94429420413..889f2e5c80b 100644 --- a/tests/unit/modules/test_environ.py +++ b/tests/unit/modules/test_environ.py @@ -83,7 +83,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(os.environ, mock_environ): mock_setval = MagicMock(return_value=None) with patch.object(environ, 'setval', mock_setval): - self.assertEqual(environ.setenv({}, False, True, False)['key'], + self.assertEqual(environ.setenv({}, False, True, False)['KEY'], None) def test_get(self): From d73ef44cf676ccc3a021c0dbab47321792c84360 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 29 Jun 2017 10:54:25 -0600 Subject: [PATCH 212/348] Mock with uppercase KEY --- tests/unit/modules/test_environ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_environ.py b/tests/unit/modules/test_environ.py index 889f2e5c80b..085887bfe47 100644 --- a/tests/unit/modules/test_environ.py +++ b/tests/unit/modules/test_environ.py @@ -70,7 +70,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin): Set multiple salt process environment variables from a dict. Returns a dict. ''' - mock_environ = {'key': 'value'} + mock_environ = {'KEY': 'value'} with patch.dict(os.environ, mock_environ): self.assertFalse(environ.setenv('environ')) From 68e1bd99ebe8ff83ebe1e456af91d284e776c820 Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Mon, 25 Sep 2017 12:10:23 -0400 Subject: [PATCH 213/348] Revert "Extend openscap module command parsing." --- salt/modules/openscap.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py index 0dfb911f4a5..20615500123 100644 --- a/salt/modules/openscap.py +++ b/salt/modules/openscap.py @@ -26,7 +26,7 @@ _XCCDF_MAP = { 'cmd_pattern': ( "oscap xccdf eval " "--oval-results --results results.xml --report report.html " - "--profile {0} {1} {2}" + "--profile {0} {1}" ) } } @@ -73,7 +73,6 @@ def xccdf(params): ''' params = shlex.split(params) policy = params[-1] - del params[-1] success = True error = None @@ -90,7 +89,7 @@ def xccdf(params): error = str(err) if success: - cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, " ".join(argv), policy) + cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy) tempdir = tempfile.mkdtemp() proc = Popen( shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir) From 8d6ab66658dc8ddc1f571afe39bc4af08aa40f8e Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Wed, 20 Sep 2017 12:54:48 -0700 Subject: [PATCH 214/348] Add new core grains to display minion storage initiators Support for Linux and Windows platforms to display both the iSCSI IQN and Fibre Channel HBA WWPNs. With the integration of storage modules to allow configuration of 3rd party external storage arrays, these values are needed to enable the full auotmation of storage provisioning to minions. Support for Windows, Linux and AIX (iSCSI only) --- salt/grains/core.py | 118 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py index 57142ded3fd..c613c27d64e 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -16,6 +16,7 @@ import os import json import socket import sys +import glob import re import platform import logging @@ -65,6 +66,7 @@ __salt__ = { 'cmd.run_all': salt.modules.cmdmod._run_all_quiet, 'smbios.records': salt.modules.smbios.records, 'smbios.get': salt.modules.smbios.get, + 'cmd.run_ps': salt.modules.cmdmod.powershell, } log = logging.getLogger(__name__) @@ -2472,3 +2474,119 @@ def default_gateway(): except Exception as exc: pass return grains + + +def fc_wwn(): + ''' + Return list of fiber channel HBA WWNs + ''' + grains = {} + grains['fc_wwn'] = False + if salt.utils.platform.is_linux(): + grains['fc_wwn'] = _linux_wwns() + elif salt.utils.platform.is_windows(): + grains['fc_wwn'] = _windows_wwns() + return grains + + +def iscsi_iqn(): + ''' + Return iSCSI IQN + ''' + grains = {} + grains['iscsi_iqn'] = False + if salt.utils.platform.is_linux(): + grains['iscsi_iqn'] = _linux_iqn() + elif salt.utils.platform.is_windows(): + grains['iscsi_iqn'] = _windows_iqn() + elif salt.utils.platform.is_aix(): + grains['iscsi_iqn'] = _aix_iqn() + return grains + + +def _linux_iqn(): + ''' + Return iSCSI IQN from a Linux host. + ''' + ret = [] + + initiator = '/etc/iscsi/initiatorname.iscsi' + + if os.path.isfile(initiator): + with salt.utils.files.fopen(initiator, 'r') as _iscsi: + for line in _iscsi: + if line.find('InitiatorName') != -1: + iqn = line.split('=') + ret.extend([iqn[1]]) + return ret + + +def _aix_iqn(): + ''' + Return iSCSI IQN from an AIX host. + ''' + ret = [] + + aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name' + + aixret = __salt__['cmd.run'](aixcmd) + if aixret[0].isalpha(): + iqn = aixret.split() + ret.extend([iqn[1]]) + return ret + + +def _linux_wwns(): + ''' + Return Fibre Channel port WWNs from a Linux host. + ''' + ret = [] + + for fcfile in glob.glob('/sys/class/fc_host/*/port_name'): + with salt.utils.files.fopen(fcfile, 'r') as _wwn: + for line in _wwn: + ret.extend([line[2:]]) + return ret + + +def _windows_iqn(): + ''' + Return iSCSI IQN from a Windows host. + ''' + ret = [] + + wmic = salt.utils.path.which('wmic') + + if not wmic: + return ret + + namespace = r'\\root\WMI' + mspath = 'MSiSCSIInitiator_MethodClass' + get = 'iSCSINodeName' + + cmdret = __salt__['cmd.run_all']( + '{0} /namespace:{1} path {2} get {3} /format:table'.format( + wmic, namespace, mspath, get)) + + for line in cmdret['stdout'].splitlines(): + if line[0].isalpha(): + continue + ret.extend([line]) + + return ret + + +def _windows_wwns(): + ''' + Return Fibre Channel port WWNs from a Windows host. + ''' + ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}' + + ret = [] + + cmdret = __salt__['cmd.run_ps'](ps_cmd) + + for line in cmdret: + ret.append(line) + + return ret From 03c673bb004cef6737df9ff0b3996daf7f2bd19b Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 14 Sep 2017 13:46:54 +0200 Subject: [PATCH 215/348] highstate output: allow '_id' mode for each output mode --- salt/output/highstate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/output/highstate.py b/salt/output/highstate.py index c003cfc32cf..944fac87778 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -246,7 +246,7 @@ def _format_host(host, data): state_output = __opts__.get('state_output', 'full').lower() comps = [sdecode(comp) for comp in tname.split('_|-')] - if state_output == 'mixed_id': + if state_output.endswith('_id'): # Swap in the ID for the name. Refs #35137 comps[2] = comps[1] From bfbca748e2fc829a51e1820985b97464c31889bf Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 14 Sep 2017 13:58:39 +0200 Subject: [PATCH 216/348] highstate output: Document additional output modes --- conf/master | 11 ++++++----- conf/minion | 9 ++++++--- conf/proxy | 9 ++++++--- conf/suse/master | 11 ++++++----- doc/ref/configuration/master.rst | 13 ++++++++----- doc/ref/configuration/minion.rst | 12 ++++++++---- doc/topics/releases/oxygen.rst | 6 ++++++ salt/output/highstate.py | 24 ++++++++++++------------ 8 files changed, 58 insertions(+), 37 deletions(-) diff --git a/conf/master b/conf/master index 08accd85cb4..e39b0b5e3e5 100644 --- a/conf/master +++ b/conf/master @@ -589,11 +589,12 @@ # all data that has a result of True and no changes will be suppressed. #state_verbose: True -# The state_output setting changes if the output is the full multi line -# output for each changed state if set to 'full', but if set to 'terse' -# the output will be shortened to a single line. If set to 'mixed', the output -# will be terse unless a state failed, in which case that output will be full. -# If set to 'changes', the output will be full unless the state didn't change. +# The state_output setting controls which results will be output full multi line +# full, terse - each state will be full/terse +# mixed - only states with errors will be full +# changes - states with changes and errors will be full +# full_id, mixed_id, changes_id and terse_id are also allowed; +# when set, the state ID will be used as name in the output #state_output: full # The state_output_diff setting changes whether or not the output from diff --git a/conf/minion b/conf/minion index fa5caf317b9..ffa6b7273fb 100644 --- a/conf/minion +++ b/conf/minion @@ -635,9 +635,12 @@ # all data that has a result of True and no changes will be suppressed. #state_verbose: True -# The state_output setting changes if the output is the full multi line -# output for each changed state if set to 'full', but if set to 'terse' -# the output will be shortened to a single line. +# The state_output setting controls which results will be output full multi line +# full, terse - each state will be full/terse +# mixed - only states with errors will be full +# changes - states with changes and errors will be full +# full_id, mixed_id, changes_id and terse_id are also allowed; +# when set, the state ID will be used as name in the output #state_output: full # The state_output_diff setting changes whether or not the output from diff --git a/conf/proxy b/conf/proxy index f81dc32b5c7..908dd25ba83 100644 --- a/conf/proxy +++ b/conf/proxy @@ -498,9 +498,12 @@ # all data that has a result of True and no changes will be suppressed. #state_verbose: True -# The state_output setting changes if the output is the full multi line -# output for each changed state if set to 'full', but if set to 'terse' -# the output will be shortened to a single line. +# The state_output setting controls which results will be output full multi line +# full, terse - each state will be full/terse +# mixed - only states with errors will be full +# changes - states with changes and errors will be full +# full_id, mixed_id, changes_id and terse_id are also allowed; +# when set, the state ID will be used as name in the output #state_output: full # The state_output_diff setting changes whether or not the output from diff --git a/conf/suse/master b/conf/suse/master index aeaa1d88591..cdba8f7dacc 100644 --- a/conf/suse/master +++ b/conf/suse/master @@ -560,11 +560,12 @@ syndic_user: salt # all data that has a result of True and no changes will be suppressed. #state_verbose: True -# The state_output setting changes if the output is the full multi line -# output for each changed state if set to 'full', but if set to 'terse' -# the output will be shortened to a single line. If set to 'mixed', the output -# will be terse unless a state failed, in which case that output will be full. -# If set to 'changes', the output will be full unless the state didn't change. +# The state_output setting controls which results will be output full multi line +# full, terse - each state will be full/terse +# mixed - only states with errors will be full +# changes - states with changes and errors will be full +# full_id, mixed_id, changes_id and terse_id are also allowed; +# when set, the state ID will be used as name in the output #state_output: full # The state_output_diff setting changes whether or not the output from diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index 0c6ad6f9199..8dc4f83ca15 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -2011,11 +2011,14 @@ output for states that failed or states that have changes. Default: ``full`` -The state_output setting changes if the output is the full multi line -output for each changed state if set to 'full', but if set to 'terse' -the output will be shortened to a single line. If set to 'mixed', the output -will be terse unless a state failed, in which case that output will be full. -If set to 'changes', the output will be full unless the state didn't change. +The state_output setting controls which results will be output full multi line: + +* ``full``, ``terse`` - each state will be full/terse +* ``mixed`` - only states with errors will be full +* ``changes`` - states with changes and errors will be full + +``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed; +when set, the state ID will be used as name in the output. .. code-block:: yaml diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 3438bfca035..4a440526ad2 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -1664,15 +1664,19 @@ output for states that failed or states that have changes. Default: ``full`` -The state_output setting changes if the output is the full multi line -output for each changed state if set to 'full', but if set to 'terse' -the output will be shortened to a single line. +The state_output setting controls which results will be output full multi line: + +* ``full``, ``terse`` - each state will be full/terse +* ``mixed`` - only states with errors will be full +* ``changes`` - states with changes and errors will be full + +``full_id``, ``mixed_id``, ``changes_id`` and ``terse_id`` are also allowed; +when set, the state ID will be used as name in the output. .. code-block:: yaml state_output: full - .. conf_minion:: state_output_diff ``state_output_diff`` diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 4c651bfce95..5c414a71432 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -88,6 +88,12 @@ environments (i.e. ``saltenvs``) have been added: ignore all tags and use branches only, and also to keep SHAs from being made available as saltenvs. +Additional output modes +------------------ + +The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``terse_id``. +Just like ``mixed_id``, these use the state ID as name in the highstate output + Salt Cloud Features ------------------- diff --git a/salt/output/highstate.py b/salt/output/highstate.py index 944fac87778..7f7620557c2 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -16,30 +16,30 @@ state_verbose: instruct the highstate outputter to omit displaying anything in green, this means that nothing with a result of True and no changes will not be printed state_output: - The highstate outputter has six output modes, ``full``, ``terse``, - ``mixed``, ``mixed_id``, ``changes`` and ``filter``. - + The highstate outputter has six output modes, + ``full``, ``terse``, ``mixed``, ``changes`` and ``filter`` * The default is set to ``full``, which will display many lines of detailed information for each executed chunk. * If ``terse`` is used, then the output is greatly simplified and shown in only one line. * If ``mixed`` is used, then terse output will be used unless a state failed, in which case full output will be used. - * If ``mixed_id`` is used, then the mixed form will be used, but the value for ``name`` - will be drawn from the state ID. This is useful for cases where the name - value might be very long and hard to read. * If ``changes`` is used, then terse output will be used if there was no error and no changes, otherwise full output will be used. * If ``filter`` is used, then either or both of two different filters can be used: ``exclude`` or ``terse``. - * for ``exclude``, state.highstate expects a list of states to be excluded - (or ``None``) - followed by ``True`` for terse output or ``False`` for regular output. - Because of parsing nuances, if only one of these is used, it must still - contain a comma. For instance: `exclude=True,`. - * for ``terse``, state.highstate expects simply ``True`` or ``False``. + * for ``exclude``, state.highstate expects a list of states to be excluded (or ``None``) + followed by ``True`` for terse output or ``False`` for regular output. + Because of parsing nuances, if only one of these is used, it must still + contain a comma. For instance: `exclude=True,`. + * for ``terse``, state.highstate expects simply ``True`` or ``False``. These can be set as such from the command line, or in the Salt config as `state_output_exclude` or `state_output_terse`, respectively. + The output modes have one modifier: + ``full_id``, ``terse_id``, ``mixed_id``, ``changes_id`` and ``filter_id`` + If ``_id`` is used, then the corresponding form will be used, but the value for ``name`` + will be drawn from the state ID. This is useful for cases where the name + value might be very long and hard to read. state_tabular: If `state_output` uses the terse output, set this to `True` for an aligned output format. If you wish to use a custom format, this can be set to a From a2234e45e2d81c76f1e30b8f5ae5344c208c9b57 Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Mon, 25 Sep 2017 18:37:48 +0200 Subject: [PATCH 217/348] Update release note docs regarding _id highstate output modes --- doc/topics/releases/oxygen.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 5c414a71432..5bd9ec8a809 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -92,7 +92,8 @@ Additional output modes ------------------ The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``terse_id``. -Just like ``mixed_id``, these use the state ID as name in the highstate output +Just like ``mixed_id``, these use the state ID as name in the highstate output. +For more information on these output modes, see the docs for the :mod:`Highstate Outputter `. Salt Cloud Features ------------------- From 846af152b27a334e9cf7f9f85d9271aab234cb77 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 25 Sep 2017 12:54:32 -0400 Subject: [PATCH 218/348] Update mocked values in some master/masterapi unit tests The addition of checking for the `auth_list` in PR #43467 requires that the mocked return of `get_auth_list` actually contains something in the list. These mock calls need to be updated so we can check for the SaltInvocationErrors. --- tests/unit/daemons/test_masterapi.py | 12 ++++++------ tests/unit/test_master.py | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/unit/daemons/test_masterapi.py b/tests/unit/daemons/test_masterapi.py index 29ea37ecd47..d2f59312279 100644 --- a/tests/unit/daemons/test_masterapi.py +++ b/tests/unit/daemons/test_masterapi.py @@ -63,7 +63,7 @@ class LocalFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.runner(load) self.assertDictEqual(mock_ret, ret) @@ -93,7 +93,7 @@ class LocalFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_runner_eauth_salt_invocation_errpr(self): + def test_runner_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -102,7 +102,7 @@ class LocalFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.runner(load) self.assertDictEqual(mock_ret, ret) @@ -146,7 +146,7 @@ class LocalFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.wheel(load) self.assertDictEqual(mock_ret, ret) @@ -176,7 +176,7 @@ class LocalFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_wheel_eauth_salt_invocation_errpr(self): + def test_wheel_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -185,7 +185,7 @@ class LocalFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.local_funcs.wheel(load) self.assertDictEqual(mock_ret, ret) diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py index c663d2c45ca..b2dc733198c 100644 --- a/tests/unit/test_master.py +++ b/tests/unit/test_master.py @@ -93,7 +93,7 @@ class ClearFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_runner_eauth_salt_invocation_errpr(self): + def test_runner_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -102,7 +102,7 @@ class ClearFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.runner(clear_load) self.assertDictEqual(mock_ret, ret) @@ -155,7 +155,7 @@ class ClearFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.wheel(clear_load) self.assertDictEqual(mock_ret, ret) @@ -185,7 +185,7 @@ class ClearFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) - def test_wheel_eauth_salt_invocation_errpr(self): + def test_wheel_eauth_salt_invocation_error(self): ''' Asserts that an EauthAuthenticationError is returned when the user authenticates, but the command is malformed. @@ -194,7 +194,7 @@ class ClearFuncsTestCase(TestCase): mock_ret = {u'error': {u'name': u'SaltInvocationError', u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.wheel(clear_load) self.assertDictEqual(mock_ret, ret) From cdb028b794f82c898e25bdef7a28068772087704 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 13:09:38 -0400 Subject: [PATCH 219/348] Added key sorting to have deterministing string repr of RecursiveDictDiffer objects --- salt/utils/dictdiffer.py | 2 +- tests/unit/utils/test_dictdiffer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index abe8bfc1c54..6dc7799a570 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -217,7 +217,7 @@ class RecursiveDictDiffer(DictDiffer): Each inner difference is tabulated two space deeper ''' changes_strings = [] - for p in diff_dict.keys(): + for p in sorted(diff_dict.keys()): if sorted(diff_dict[p].keys()) == ['new', 'old']: # Some string formatting old_value = diff_dict[p]['old'] diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py index 23fa5955ebc..c2706d72a34 100644 --- a/tests/unit/utils/test_dictdiffer.py +++ b/tests/unit/utils/test_dictdiffer.py @@ -89,7 +89,7 @@ class RecursiveDictDifferTestCase(TestCase): 'a:\n' ' c from 2 to 4\n' ' e from \'old_value\' to \'new_value\'\n' - ' g from nothing to \'new_key\'\n' ' f from \'old_key\' to nothing\n' + ' g from nothing to \'new_key\'\n' 'h from nothing to \'new_key\'\n' 'i from nothing to None') From 3c26d4e3be2bb5261fc69f277d9efe80df3429aa Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 13:11:08 -0400 Subject: [PATCH 220/348] Updated all list_differ tests to compare dicts so the key order is not assumed --- tests/unit/utils/test_listdiffer.py | 57 +++++++++++++++-------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/tests/unit/utils/test_listdiffer.py b/tests/unit/utils/test_listdiffer.py index ae8288c81c9..2df44278e3e 100644 --- a/tests/unit/utils/test_listdiffer.py +++ b/tests/unit/utils/test_listdiffer.py @@ -32,34 +32,43 @@ class ListDictDifferTestCase(TestCase): continue def test_added(self): - self.assertEqual(self.list_diff.added, - [{'key': 5, 'value': 'foo5', 'int_value': 105}]) + self.assertEqual(len(self.list_diff.added), 1) + self.assertDictEqual(self.list_diff.added[0], + {'key': 5, 'value': 'foo5', 'int_value': 105}) def test_removed(self): - self.assertEqual(self.list_diff.removed, - [{'key': 3, 'value': 'foo3', 'int_value': 103}]) + self.assertEqual(len(self.list_diff.removed), 1) + self.assertDictEqual(self.list_diff.removed[0], + {'key': 3, 'value': 'foo3', 'int_value': 103}) def test_diffs(self): - self.assertEqual(self.list_diff.diffs, - [{2: {'int_value': {'new': 112, 'old': 102}}}, - # Added items - {5: {'int_value': {'new': 105, 'old': NONE}, - 'key': {'new': 5, 'old': NONE}, - 'value': {'new': 'foo5', 'old': NONE}}}, - # Removed items - {3: {'int_value': {'new': NONE, 'old': 103}, - 'key': {'new': NONE, 'old': 3}, - 'value': {'new': NONE, 'old': 'foo3'}}}]) + self.assertEqual(len(self.list_diff.diffs), 3) + self.assertDictEqual(self.list_diff.diffs[0], + {2: {'int_value': {'new': 112, 'old': 102}}}) + self.assertDictEqual(self.list_diff.diffs[1], + # Added items + {5: {'int_value': {'new': 105, 'old': NONE}, + 'key': {'new': 5, 'old': NONE}, + 'value': {'new': 'foo5', 'old': NONE}}}) + self.assertDictEqual(self.list_diff.diffs[2], + # Removed items + {3: {'int_value': {'new': NONE, 'old': 103}, + 'key': {'new': NONE, 'old': 3}, + 'value': {'new': NONE, 'old': 'foo3'}}}) def test_new_values(self): - self.assertEqual(self.list_diff.new_values, - [{'key': 2, 'int_value': 112}, - {'key': 5, 'value': 'foo5', 'int_value': 105}]) + self.assertEqual(len(self.list_diff.new_values), 2) + self.assertDictEqual(self.list_diff.new_values[0], + {'key': 2, 'int_value': 112}) + self.assertDictEqual(self.list_diff.new_values[1], + {'key': 5, 'value': 'foo5', 'int_value': 105}) def test_old_values(self): - self.assertEqual(self.list_diff.old_values, - [{'key': 2, 'int_value': 102}, - {'key': 3, 'value': 'foo3', 'int_value': 103}]) + self.assertEqual(len(self.list_diff.old_values), 2) + self.assertDictEqual(self.list_diff.old_values[0], + {'key': 2, 'int_value': 102}) + self.assertDictEqual(self.list_diff.old_values[1], + {'key': 3, 'value': 'foo3', 'int_value': 103}) def test_changed_all(self): self.assertEqual(self.list_diff.changed(selection='all'), @@ -78,11 +87,3 @@ class ListDictDifferTestCase(TestCase): '\twill be removed\n' '\tidentified by key 5:\n' '\twill be added\n') - - def test_changes_str2(self): - self.assertEqual(self.list_diff.changes_str2, - ' key=2 (updated):\n' - ' int_value from 102 to 112\n' - ' key=3 (removed)\n' - ' key=5 (added): {\'int_value\': 105, \'key\': 5, ' - '\'value\': \'foo5\'}') From f98a555f9819eb40ce881286ced8c478dfe68e18 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 25 Sep 2017 15:37:38 -0400 Subject: [PATCH 221/348] Missed updating one of the master unit test mocks --- tests/unit/test_master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py index b2dc733198c..b12fcb6a93b 100644 --- a/tests/unit/test_master.py +++ b/tests/unit/test_master.py @@ -63,7 +63,7 @@ class ClearFuncsTestCase(TestCase): u'message': u'A command invocation error occurred: Check syntax.'}} with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ - patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])): ret = self.clear_funcs.runner(clear_load) self.assertDictEqual(mock_ret, ret) From dc1b36b7e239fd84ad0241b9a7ddd34b338340a6 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 25 Sep 2017 15:06:44 -0600 Subject: [PATCH 222/348] Change expected return for Windows --- tests/unit/beacons/test_status.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tests/unit/beacons/test_status.py b/tests/unit/beacons/test_status.py index fca75763445..4ab3d83a779 100644 --- a/tests/unit/beacons/test_status.py +++ b/tests/unit/beacons/test_status.py @@ -12,6 +12,7 @@ # Python libs from __future__ import absolute_import +import sys # Salt libs import salt.config @@ -45,14 +46,32 @@ class StatusBeaconTestCase(TestCase, LoaderModuleMockMixin): def test_empty_config(self, *args, **kwargs): config = {} ret = status.beacon(config) - self.assertEqual(sorted(list(ret[0]['data'])), sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time'])) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time']) + + self.assertEqual(sorted(list(ret[0]['data'])), expected) def test_deprecated_dict_config(self): config = {'time': ['all']} ret = status.beacon(config) - self.assertEqual(list(ret[0]['data']), ['time']) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = ['time'] + + self.assertEqual(list(ret[0]['data']), expected) def test_list_config(self): config = [{'time': ['all']}] ret = status.beacon(config) - self.assertEqual(list(ret[0]['data']), ['time']) + + if sys.platform.startswith('win'): + expected = [] + else: + expected = ['time'] + + self.assertEqual(list(ret[0]['data']), expected) From 922e60fa673d656462b9ff2b11f17f878a834e40 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 25 Sep 2017 15:25:31 -0600 Subject: [PATCH 223/348] Add os agnostic paths --- tests/unit/modules/test_poudriere.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/unit/modules/test_poudriere.py b/tests/unit/modules/test_poudriere.py index 52e8f322e37..9a181b59c5f 100644 --- a/tests/unit/modules/test_poudriere.py +++ b/tests/unit/modules/test_poudriere.py @@ -50,10 +50,12 @@ class PoudriereTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it make jail ``jname`` pkgng aware. ''' - ret1 = 'Could not create or find required directory /tmp/salt' - ret2 = 'Looks like file /tmp/salt/salt-make.conf could not be created' - ret3 = {'changes': 'Created /tmp/salt/salt-make.conf'} - mock = MagicMock(return_value='/tmp/salt') + temp_dir = os.path.join('tmp', 'salt') + conf_file = os.path.join('tmp', 'salt', 'salt-make.conf') + ret1 = 'Could not create or find required directory {0}'.format(temp_dir) + ret2 = 'Looks like file {0} could not be created'.format(conf_file) + ret3 = {'changes': 'Created {0}'.format(conf_file)} + mock = MagicMock(return_value=temp_dir) mock_true = MagicMock(return_value=True) with patch.dict(poudriere.__salt__, {'config.option': mock, 'file.write': mock_true}): From c369e337e4d8b0a6eee894e075cc1ebe688fbcff Mon Sep 17 00:00:00 2001 From: Eric Radman Date: Mon, 25 Sep 2017 16:47:20 -0400 Subject: [PATCH 224/348] Skip ZFS module check on OpenBSD Avoids the following error when running `salt-call` on OpenBSD: [ERROR ] Command '/usr/sbin/rcctl get zfs-fuse' failed with return code: 2 [ERROR ] output: rcctl: service zfs-fuse does not exist --- salt/modules/zfs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index dc42400796c..fd8b291f821 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -77,6 +77,9 @@ def __virtual__(): ) == 0: return 'zfs' + if __grains__['kernel'] == 'OpenBSD': + return False + _zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse') if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')): return 'zfs' From e5ebd28ee12fc06465441936e895db03e667b98f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 12:34:25 -0400 Subject: [PATCH 225/348] Added get_new_service_instance_stub that creates a new service instance stub --- salt/utils/vmware.py | 52 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index d54dbced042..cbfb741dc0f 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- ''' +import sys +import ssl Connection library for VMware .. versionadded:: 2015.8.2 @@ -79,6 +81,8 @@ import atexit import errno import logging import time +import sys +import ssl # Import Salt Libs import salt.exceptions @@ -92,8 +96,9 @@ import salt.utils.stringutils from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: - from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub - from pyVmomi import vim, vmodl + from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \ + SoapStubAdapter + from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -405,6 +410,49 @@ def get_service_instance(host, username=None, password=None, protocol=None, return service_instance +def get_new_service_instance_stub(service_instance, path, ns=None, + version=None): + ''' + Returns a stub that points to a different path, + created from an existing connection. + + service_instance + The Service Instance. + + path + Path of the new stub. + + ns + Namespace of the new stub. + Default value is None + + version + Version of the new stub. + Default value is None. + ''' + #For python 2.7.9 and later, the defaul SSL conext has more strict + #connection handshaking rule. We may need turn of the hostname checking + #and client side cert verification + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + stub = service_instance._stub + hostname = stub.host.split(':')[0] + session_cookie = stub.cookie.split('"')[1] + VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie + new_stub = SoapStubAdapter(host=hostname, + ns=ns, + path=path, + version=version, + poolSize=0, + sslContext=context) + new_stub.cookie = stub.cookie + return new_stub + + def get_service_instance_from_managed_object(mo_ref, name=''): ''' Retrieves the service instance from a managed object. From dd54f8ab15fc5038d356b49cb8b945e04344bff2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:16:58 -0400 Subject: [PATCH 226/348] Added tests for salt.utils.vmware.get_new_service_instance_stub --- tests/unit/utils/vmware/test_connection.py | 93 +++++++++++++++++++++- 1 file changed, 92 insertions(+), 1 deletion(-) diff --git a/tests/unit/utils/vmware/test_connection.py b/tests/unit/utils/vmware/test_connection.py index 4a95e9b67fc..dd357d48708 100644 --- a/tests/unit/utils/vmware/test_connection.py +++ b/tests/unit/utils/vmware/test_connection.py @@ -13,6 +13,7 @@ import ssl import sys # Import Salt testing libraries +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ PropertyMock @@ -24,7 +25,7 @@ import salt.utils.vmware from salt.ext import six try: - from pyVmomi import vim, vmodl + from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -852,6 +853,96 @@ class IsConnectionToAVCenterTestCase(TestCase): excinfo.exception.strerror) +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetNewServiceInstanceStub(TestCase, LoaderModuleMockMixin): + '''Tests for salt.utils.vmware.get_new_service_instance_stub''' + def setup_loader_modules(self): + return {salt.utils.vmware: { + '__virtual__': MagicMock(return_value='vmware'), + 'sys': MagicMock(), + 'ssl': MagicMock()}} + + def setUp(self): + self.mock_stub = MagicMock( + host='fake_host:1000', + cookie='ignore"fake_cookie') + self.mock_si = MagicMock( + _stub=self.mock_stub) + self.mock_ret = MagicMock() + self.mock_new_stub = MagicMock() + self.context_dict = {} + patches = (('salt.utils.vmware.VmomiSupport.GetRequestContext', + MagicMock( + return_value=self.context_dict)), + ('salt.utils.vmware.SoapStubAdapter', + MagicMock(return_value=self.mock_new_stub))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 9)) + self.mock_context = MagicMock() + self.mock_create_default_context = \ + MagicMock(return_value=self.mock_context) + salt.utils.vmware.ssl.create_default_context = \ + self.mock_create_default_context + + def tearDown(self): + for attr in ('mock_stub', 'mock_si', 'mock_ret', 'mock_new_stub', + 'context_dict', 'mock_context', + 'mock_create_default_context'): + delattr(self, attr) + + def test_ssl_default_context_loaded(self): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.mock_create_default_context.assert_called_once_with() + self.assertFalse(self.mock_context.check_hostname) + self.assertEqual(self.mock_context.verify_mode, + salt.utils.vmware.ssl.CERT_NONE) + + def test_ssl_default_context_not_loaded(self): + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 8)) + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.mock_create_default_context.call_count, 0) + + def test_session_cookie_in_context(self): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.context_dict['vcSessionCookie'], 'fake_cookie') + + def test_get_new_stub(self): + mock_get_new_stub = MagicMock() + with patch('salt.utils.vmware.SoapStubAdapter', mock_get_new_stub): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path', 'fake_ns', 'fake_version') + mock_get_new_stub.assert_called_once_with( + host='fake_host', ns='fake_ns', path='fake_path', + version='fake_version', poolSize=0, sslContext=self.mock_context) + + def test_get_new_stub_2_7_8_python(self): + type(salt.utils.vmware.sys).version_info = \ + PropertyMock(return_value=(2, 7, 8)) + mock_get_new_stub = MagicMock() + with patch('salt.utils.vmware.SoapStubAdapter', mock_get_new_stub): + salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path', 'fake_ns', 'fake_version') + mock_get_new_stub.assert_called_once_with( + host='fake_host', ns='fake_ns', path='fake_path', + version='fake_version', poolSize=0, sslContext=None) + + def test_new_stub_returned(self): + ret = salt.utils.vmware.get_new_service_instance_stub( + self.mock_si, 'fake_path') + self.assertEqual(self.mock_new_stub.cookie, 'ignore"fake_cookie') + self.assertEqual(ret, self.mock_new_stub) + + @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') class GetServiceInstanceFromManagedObjectTestCase(TestCase): From 3e8ed5934d97e33a2dd5f1d19841b4e15cb87b16 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:33:12 -0400 Subject: [PATCH 227/348] Added initial sysdoc and imports of salt.utils.pbm --- salt/utils/pbm.py | 70 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 salt/utils/pbm.py diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py new file mode 100644 index 00000000000..9d9e7bb9898 --- /dev/null +++ b/salt/utils/pbm.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +''' +Library for VMware Storage Policy management (via the pbm endpoint) + +This library is used to manage the various policies available in VMware + +:codeauthor: Alexandru Bleotu + +Dependencies +~~~~~~~~~~~~ + +- pyVmomi Python Module + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +# Import Salt Libs +import salt.utils.vmware +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError + + +try: + from pyVmomi import pbm, vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only load if PyVmomi is installed. + ''' + if HAS_PYVMOMI: + return True + else: + return False, 'Missing dependency: The salt.utils.pbm module ' \ + 'requires the pyvmomi library' From e77b912f2cb65b1901f2a297386b50f55a826dc8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:34:33 -0400 Subject: [PATCH 228/348] Added salt.utils.pbm.get_profile_manager --- salt/utils/pbm.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index 9d9e7bb9898..aec53411122 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -68,3 +68,28 @@ def __virtual__(): else: return False, 'Missing dependency: The salt.utils.pbm module ' \ 'requires the pyvmomi library' + + +def get_profile_manager(service_instance): + ''' + Returns a profile manager + + service_instance + Service instance to the host or vCenter + ''' + stub = salt.utils.vmware.get_new_service_instance_stub( + service_instance, ns='pbm/2.0', path='/pbm/sdk') + pbm_si = pbm.ServiceInstance('ServiceInstance', stub) + try: + profile_manager = pbm_si.RetrieveContent().profileManager + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + return profile_manager From 6b2ddffb4c7a0585ddbce6d4c9fad8c7c150fc97 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:35:04 -0400 Subject: [PATCH 229/348] Added tests for salt.utils.pbm.get_profile_manager --- tests/unit/utils/test_pbm.py | 105 +++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 tests/unit/utils/test_pbm.py diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py new file mode 100644 index 00000000000..11256c9b32e --- /dev/null +++ b/tests/unit/utils/test_pbm.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests functions in salt.utils.vsan +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ + PropertyMock + +# Import Salt libraries +from salt.exceptions import VMwareApiError, VMwareRuntimeError +import salt.utils.pbm + +try: + from pyVmomi import vim, vmodl, pbm + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +# Get Logging Started +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetProfileManagerTestCase(TestCase): + '''Tests for salt.utils.pbm.get_profile_manager''' + def setUp(self): + self.mock_si = MagicMock() + self.mock_stub = MagicMock() + self.mock_prof_mgr = MagicMock() + self.mock_content = MagicMock() + self.mock_pbm_si = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_content)) + type(self.mock_content).profileManager = \ + PropertyMock(return_value=self.mock_prof_mgr) + patches = ( + ('salt.utils.vmware.get_new_service_instance_stub', + MagicMock(return_value=self.mock_stub)), + ('salt.utils.pbm.pbm.ServiceInstance', + MagicMock(return_value=self.mock_pbm_si))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_stub', 'mock_content', + 'mock_pbm_si', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_new_service_stub(self): + mock_get_new_service_stub = MagicMock() + with patch('salt.utils.vmware.get_new_service_instance_stub', + mock_get_new_service_stub): + salt.utils.pbm.get_profile_manager(self.mock_si) + mock_get_new_service_stub.assert_called_once_with( + self.mock_si, ns='pbm/2.0', path='/pbm/sdk') + + def test_pbm_si(self): + mock_get_pbm_si = MagicMock() + with patch('salt.utils.pbm.pbm.ServiceInstance', + mock_get_pbm_si): + salt.utils.pbm.get_profile_manager(self.mock_si) + mock_get_pbm_si.assert_called_once_with('ServiceInstance', + self.mock_stub) + + def test_return_profile_manager(self): + ret = salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(ret, self.mock_prof_mgr) + + def test_profile_manager_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_profile_manager_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_profile_manager_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_content).profileManager = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_profile_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From c790107d17ba097dcdc71cfc21b6d6cf126665bd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:42:21 -0400 Subject: [PATCH 230/348] Added salt.utils.pbm.get_placement_solver --- salt/utils/pbm.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index aec53411122..eb2cf268873 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -93,3 +93,28 @@ def get_profile_manager(service_instance): log.exception(exc) raise VMwareRuntimeError(exc.msg) return profile_manager + + +def get_placement_solver(service_instance): + ''' + Returns a placement solver + + service_instance + Service instance to the host or vCenter + ''' + stub = salt.utils.vmware.get_new_service_instance_stub( + service_instance, ns='pbm/2.0', path='/pbm/sdk') + pbm_si = pbm.ServiceInstance('ServiceInstance', stub) + try: + profile_manager = pbm_si.RetrieveContent().placementSolver + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + return profile_manager From 68f48d123ae51a66f61cefe844d39ce1c34af697 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 13:42:56 -0400 Subject: [PATCH 231/348] Added tests for salt.utils.pbm.get_placement_solver --- tests/unit/utils/test_pbm.py | 75 ++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 11256c9b32e..8bdcbaa0752 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -103,3 +103,78 @@ class GetProfileManagerTestCase(TestCase): with self.assertRaises(VMwareRuntimeError) as excinfo: salt.utils.pbm.get_profile_manager(self.mock_si) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetPlacementSolverTestCase(TestCase): + '''Tests for salt.utils.pbm.get_placement_solver''' + def setUp(self): + self.mock_si = MagicMock() + self.mock_stub = MagicMock() + self.mock_prof_mgr = MagicMock() + self.mock_content = MagicMock() + self.mock_pbm_si = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_content)) + type(self.mock_content).placementSolver = \ + PropertyMock(return_value=self.mock_prof_mgr) + patches = ( + ('salt.utils.vmware.get_new_service_instance_stub', + MagicMock(return_value=self.mock_stub)), + ('salt.utils.pbm.pbm.ServiceInstance', + MagicMock(return_value=self.mock_pbm_si))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_stub', 'mock_content', + 'mock_pbm_si', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_new_service_stub(self): + mock_get_new_service_stub = MagicMock() + with patch('salt.utils.vmware.get_new_service_instance_stub', + mock_get_new_service_stub): + salt.utils.pbm.get_placement_solver(self.mock_si) + mock_get_new_service_stub.assert_called_once_with( + self.mock_si, ns='pbm/2.0', path='/pbm/sdk') + + def test_pbm_si(self): + mock_get_pbm_si = MagicMock() + with patch('salt.utils.pbm.pbm.ServiceInstance', + mock_get_pbm_si): + salt.utils.pbm.get_placement_solver(self.mock_si) + mock_get_pbm_si.assert_called_once_with('ServiceInstance', + self.mock_stub) + + def test_return_profile_manager(self): + ret = salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(ret, self.mock_prof_mgr) + + def test_placement_solver_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_placement_solver_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_placement_solver_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_content).placementSolver = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_placement_solver(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From eac509bab8bf1a6d8f9102b59f6e1daa3618984b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:39:47 -0400 Subject: [PATCH 232/348] Added salt.utils.pbm.get_capability_definitions --- salt/utils/pbm.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index eb2cf268873..5ca85ce4d99 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -118,3 +118,30 @@ def get_placement_solver(service_instance): log.exception(exc) raise VMwareRuntimeError(exc.msg) return profile_manager + + +def get_capability_definitions(profile_manager): + ''' + Returns a list of all capability definitions. + + profile_manager + Reference to the profile manager. + ''' + res_type = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + try: + cap_categories = profile_manager.FetchCapabilityMetadata(res_type) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + cap_definitions = [] + for cat in cap_categories: + cap_definitions.extend(cat.capabilityMetadata) + return cap_definitions From e980407c54dea63ea35e642bb9ef2ba2d3bdc6a9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:40:41 -0400 Subject: [PATCH 233/348] Added tests for salt.utils.pbm.get_capability_definitions --- tests/unit/utils/test_pbm.py | 71 ++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 8bdcbaa0752..d59ce1afdd8 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -178,3 +178,74 @@ class GetPlacementSolverTestCase(TestCase): with self.assertRaises(VMwareRuntimeError) as excinfo: salt.utils.pbm.get_placement_solver(self.mock_si) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetCapabilityDefinitionsTestCase(TestCase): + '''Tests for salt.utils.pbm.get_capability_definitions''' + def setUp(self): + self.mock_res_type = MagicMock() + self.mock_cap_cats =[MagicMock(capabilityMetadata=['fake_cap_meta1', + 'fake_cap_meta2']), + MagicMock(capabilityMetadata=['fake_cap_meta3'])] + self.mock_prof_mgr = MagicMock( + FetchCapabilityMetadata=MagicMock(return_value=self.mock_cap_cats)) + patches = ( + ('salt.utils.pbm.pbm.profile.ResourceType', + MagicMock(return_value=self.mock_res_type)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_res_type', 'mock_cap_cats', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_res_type(self): + mock_get_res_type = MagicMock() + with patch('salt.utils.pbm.pbm.profile.ResourceType', + mock_get_res_type): + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + mock_get_res_type.assert_called_once_with( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + + def test_fetch_capabilities(self): + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.mock_prof_mgr.FetchCapabilityMetadata.assert_callend_once_with( + self.mock_res_type) + + def test_fetch_capabilities_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_fetch_capabilities_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_fetch_capabilities_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.FetchCapabilityMetadata = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_return_cap_definitions(self): + ret = salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) + self.assertEqual(ret, ['fake_cap_meta1', 'fake_cap_meta2', + 'fake_cap_meta3']) From f42de9c66b9e8df28dc10b1d32a197990ec1a849 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:41:41 -0400 Subject: [PATCH 234/348] Added salt.utils.pbm.get_policies_by_id --- salt/utils/pbm.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index 5ca85ce4d99..bf589b06c00 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -145,3 +145,27 @@ def get_capability_definitions(profile_manager): for cat in cap_categories: cap_definitions.extend(cat.capabilityMetadata) return cap_definitions + + +def get_policies_by_id(profile_manager, policy_ids): + ''' + Returns a list of policies with the specified ids. + + profile_manager + Reference to the profile manager. + + policy_ids + List of policy ids to retrieve. + ''' + try: + return profile_manager.RetrieveContent(policy_ids) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) From d8e0cbde9ac679c7beb64ea91b892e717dc31523 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:42:29 -0400 Subject: [PATCH 235/348] Added tests for salt.utils.pbm.get_policies_by_id --- tests/unit/utils/test_pbm.py | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index d59ce1afdd8..100a7313bf6 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -249,3 +249,53 @@ class GetCapabilityDefinitionsTestCase(TestCase): ret = salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) self.assertEqual(ret, ['fake_cap_meta1', 'fake_cap_meta2', 'fake_cap_meta3']) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetPoliciesById(TestCase): + '''Tests for salt.utils.pbm.get_policies_by_id''' + def setUp(self): + self.policy_ids = MagicMock() + self.mock_policies = MagicMock() + self.mock_prof_mgr = MagicMock( + RetrieveContent=MagicMock(return_value=self.mock_policies)) + + def tearDown(self): + for attr in ('policy_ids', 'mock_policies', 'mock_prof_mgr'): + delattr(self, attr) + + def test_retrieve_policies(self): + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.mock_prof_mgr.RetrieveContent.assert_callend_once_with( + self.policy_ids) + + def test_retrieve_policies_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_retrieve_policies_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_retrieve_policies_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.RetrieveContent = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_return_policies(self): + ret = salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) + self.assertEqual(ret, self.mock_policies) From df16bdb686446867c2f1e79c43bbfdc069232b0e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:43:22 -0400 Subject: [PATCH 236/348] Added salt.utils.pbm.get_storage_policies --- salt/utils/pbm.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index bf589b06c00..8bab7144352 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -169,3 +169,42 @@ def get_policies_by_id(profile_manager, policy_ids): except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) + + +def get_storage_policies(profile_manager, policy_names=[], + get_all_policies=False): + ''' + Returns a list of the storage policies, filtered by name. + + profile_manager + Reference to the profile manager. + + policy_names + List of policy names to filter by. + + get_all_policies + Flag specifying to return all policies, regardless of the specified + filter. + ''' + res_type = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + try: + policy_ids = profile_manager.QueryProfile(res_type) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + log.trace('policy_ids = {0}'.format(policy_ids)) + # More policies are returned so we need to filter again + policies = [p for p in get_policies_by_id(profile_manager, policy_ids) + if p.resourceType.resourceType == + pbm.profile.ResourceTypeEnum.STORAGE] + if get_all_policies: + return policies + return [p for p in policies if p.name in policy_names] From 75764567c44002a3e71a8ca375a37b6c3dad3a09 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 18:44:07 -0400 Subject: [PATCH 237/348] Added tests for salt.utils.pbm.get_storage_policies --- tests/unit/utils/test_pbm.py | 90 ++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 100a7313bf6..829f6c293ae 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -299,3 +299,93 @@ class GetPoliciesById(TestCase): def test_return_policies(self): ret = salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) self.assertEqual(ret, self.mock_policies) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetStoragePoliciesTestCase(TestCase): + '''Tests for salt.utils.pbm.get_storage_policies''' + def setUp(self): + self.mock_res_type = MagicMock() + self.mock_policy_ids = MagicMock() + self.mock_prof_mgr = MagicMock( + QueryProfile=MagicMock(return_value=self.mock_policy_ids)) + # Policies + self.mock_policies=[] + for i in range(4): + mock_obj = MagicMock(resourceType=MagicMock( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE)) + mock_obj.name = 'fake_policy{0}'.format(i) + self.mock_policies.append(mock_obj) + patches = ( + ('salt.utils.pbm.pbm.profile.ResourceType', + MagicMock(return_value=self.mock_res_type)), + ('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=self.mock_policies))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_res_type', 'mock_policy_ids', 'mock_policies', + 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_res_type(self): + mock_get_res_type = MagicMock() + with patch('salt.utils.pbm.pbm.profile.ResourceType', + mock_get_res_type): + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_get_res_type.assert_called_once_with( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + + def test_retrieve_policy_ids(self): + mock_retrieve_policy_ids = MagicMock(return_value=self.mock_policy_ids) + self.mock_prof_mgr.QueryProfile = mock_retrieve_policy_ids + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_retrieve_policy_ids.asser_called_once_with(self.mock_res_type) + + def test_retrieve_policy_ids_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_retrieve_policy_ids_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_retrieve_policy_ids_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.QueryProfile = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_get_policies_by_id(self): + mock_get_policies_by_id = MagicMock(return_value=self.mock_policies) + with patch('salt.utils.pbm.get_policies_by_id', + mock_get_policies_by_id): + salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) + mock_get_policies_by_id.assert_called_once_with( + self.mock_prof_mgr, self.mock_policy_ids) + + def test_return_all_policies(self): + ret = salt.utils.pbm.get_storage_policies(self.mock_prof_mgr, + get_all_policies=True) + self.assertEqual(ret, self.mock_policies) + + def test_return_filtered_policies(self): + ret = salt.utils.pbm.get_storage_policies( + self.mock_prof_mgr, policy_names=['fake_policy1', 'fake_policy3']) + self.assertEqual(ret, [self.mock_policies[1], self.mock_policies[3]]) From d3744c80030a1d54c2978fa73fa7ed80eac76f35 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:44:48 -0400 Subject: [PATCH 238/348] Added salt.utils.pbm.create_storage_policy --- salt/utils/pbm.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index 8bab7144352..eb45c96da28 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -208,3 +208,27 @@ def get_storage_policies(profile_manager, policy_names=[], if get_all_policies: return policies return [p for p in policies if p.name in policy_names] + + +def create_storage_policy(profile_manager, policy_spec): + ''' + Creates a storage policy. + + profile_manager + Reference to the profile manager. + + policy_spec + Policy update spec. + ''' + try: + profile_manager.Create(policy_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) From c80df65776c9caaafa7c8e900bf9b5b9705dfa10 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:46:47 -0400 Subject: [PATCH 239/348] Fixed tests for salt.utils.pbm.get_policies_by_id --- tests/unit/utils/test_pbm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 829f6c293ae..538448720dc 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -253,7 +253,7 @@ class GetCapabilityDefinitionsTestCase(TestCase): @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') -class GetPoliciesById(TestCase): +class GetPoliciesByIdTestCase(TestCase): '''Tests for salt.utils.pbm.get_policies_by_id''' def setUp(self): self.policy_ids = MagicMock() @@ -344,7 +344,7 @@ class GetStoragePoliciesTestCase(TestCase): mock_retrieve_policy_ids = MagicMock(return_value=self.mock_policy_ids) self.mock_prof_mgr.QueryProfile = mock_retrieve_policy_ids salt.utils.pbm.get_storage_policies(self.mock_prof_mgr) - mock_retrieve_policy_ids.asser_called_once_with(self.mock_res_type) + mock_retrieve_policy_ids.assert_called_once_with(self.mock_res_type) def test_retrieve_policy_ids_raises_no_permissions(self): exc = vim.fault.NoPermission() From d43e3421350fb5ef81bb68780004dceda4d26ac8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:47:36 -0400 Subject: [PATCH 240/348] Added tests for salt.utils.pbm.create_storage_policy --- tests/unit/utils/test_pbm.py | 48 ++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 538448720dc..789d0c56d4d 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -389,3 +389,51 @@ class GetStoragePoliciesTestCase(TestCase): ret = salt.utils.pbm.get_storage_policies( self.mock_prof_mgr, policy_names=['fake_policy1', 'fake_policy3']) self.assertEqual(ret, [self.mock_policies[1], self.mock_policies[3]]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class CreateStoragePolicyTestCase(TestCase): + '''Tests for salt.utils.pbm.create_storage_policy''' + def setUp(self): + self.mock_policy_spec = MagicMock() + self.mock_prof_mgr = MagicMock() + + def tearDown(self): + for attr in ('mock_policy_spec', 'mock_prof_mgr'): + delattr(self, attr) + + def test_create_policy(self): + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.mock_prof_mgr.Create.assert_called_once_with( + self.mock_policy_spec) + + def test_create_policy_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_policy_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_policy_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.Create = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, + self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From 9c05f7c7341ee1b1de218c299d04be800e4e10d3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:49:24 -0400 Subject: [PATCH 241/348] Added salt.utils.pbm.update_storage_policy --- salt/utils/pbm.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index eb45c96da28..57d2f598d42 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -232,3 +232,30 @@ def create_storage_policy(profile_manager, policy_spec): except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) + + +def update_storage_policy(profile_manager, policy, policy_spec): + ''' + Updates a storage policy. + + profile_manager + Reference to the profile manager. + + policy + Reference to the policy to be updated. + + policy_spec + Policy update spec. + ''' + try: + profile_manager.Update(policy.profileId, policy_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) From 79419702d934e53a0f621da3bb47328a9928ca62 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:50:14 -0400 Subject: [PATCH 242/348] Added tests for salt.utils.pbm.update_storage_policy --- tests/unit/utils/test_pbm.py | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 789d0c56d4d..f398f5a4ea4 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -437,3 +437,52 @@ class CreateStoragePolicyTestCase(TestCase): salt.utils.pbm.create_storage_policy(self.mock_prof_mgr, self.mock_policy_spec) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class UpdateStoragePolicyTestCase(TestCase): + '''Tests for salt.utils.pbm.update_storage_policy''' + def setUp(self): + self.mock_policy_spec = MagicMock() + self.mock_policy = MagicMock() + self.mock_prof_mgr = MagicMock() + + def tearDown(self): + for attr in ('mock_policy_spec', 'mock_policy', 'mock_prof_mgr'): + delattr(self, attr) + + def test_create_policy(self): + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.mock_prof_mgr.Update.assert_called_once_with( + self.mock_policy.profileId, self.mock_policy_spec) + + def test_create_policy_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_create_policy_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_create_policy_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.Update = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.update_storage_policy( + self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From 61c226c086e370e00546adf77f8e3c1039d7772c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:51:07 -0400 Subject: [PATCH 243/348] Added salt.utils.pbm.get_default_storage_policy_of_datastore --- salt/utils/pbm.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index 57d2f598d42..cb6474be852 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -259,3 +259,36 @@ def update_storage_policy(profile_manager, policy, policy_spec): except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) + + +def get_default_storage_policy_of_datastore(profile_manager, datastore): + ''' + Returns the default storage policy reference assigned to a datastore. + + profile_manager + Reference to the profile manager. + + datastore + Reference to the datastore. + ''' + # Retrieve all datastores visible + hub = pbm.placement.PlacementHub( + hubId=datastore._moId, hubType='Datastore') + log.trace('placement_hub = {0}'.format(hub)) + try: + policy_id = profile_manager.QueryDefaultRequirementProfile(hub) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + policy_refs = get_policies_by_id(profile_manager, [policy_id]) + if not policy_refs: + raise VMwareObjectRetrievalError('Storage policy with id \'{0}\' was ' + 'not found'.format(policy_id)) + return policy_refs[0] From 5dbbac182d86bbd51cb22dcad89ae495a5730f57 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 19:51:38 -0400 Subject: [PATCH 244/348] Added tests for salt.utils.pbm.get_default_storage_policy_of_datastore --- tests/unit/utils/test_pbm.py | 106 ++++++++++++++++++++++++++++++++++- 1 file changed, 105 insertions(+), 1 deletion(-) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index f398f5a4ea4..b8803c475f0 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -16,7 +16,8 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ PropertyMock # Import Salt libraries -from salt.exceptions import VMwareApiError, VMwareRuntimeError +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError import salt.utils.pbm try: @@ -486,3 +487,106 @@ class UpdateStoragePolicyTestCase(TestCase): salt.utils.pbm.update_storage_policy( self.mock_prof_mgr, self.mock_policy, self.mock_policy_spec) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDefaultStoragePolicyOfDatastoreTestCase(TestCase): + '''Tests for salt.utils.pbm.get_default_storage_policy_of_datastore''' + def setUp(self): + self.mock_ds = MagicMock(_moId='fake_ds_moid') + self.mock_hub = MagicMock() + self.mock_policy_id = 'fake_policy_id' + self.mock_prof_mgr = MagicMock( + QueryDefaultRequirementProfile=MagicMock( + return_value=self.mock_policy_id)) + self.mock_policy_refs = [MagicMock()] + patches = ( + ('salt.utils.pbm.pbm.placement.PlacementHub', + MagicMock(return_value=self.mock_hub)), + ('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=self.mock_policy_refs))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ds', 'mock_hub', 'mock_policy_id', 'mock_prof_mgr', + 'mock_policy_refs'): + delattr(self, attr) + + def test_get_placement_hub(self): + mock_get_placement_hub = MagicMock() + with patch('salt.utils.pbm.pbm.placement.PlacementHub', + mock_get_placement_hub): + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_get_placement_hub.assert_called_once_with( + hubId='fake_ds_moid', hubType='Datastore') + + def test_query_default_requirement_profile(self): + mock_query_prof = MagicMock(return_value=self.mock_policy_id) + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + mock_query_prof + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_query_prof.assert_called_once_with(self.mock_hub) + + def test_query_default_requirement_profile_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_query_default_requirement_profile_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_query_default_requirement_profile_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.QueryDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_get_policies_by_id(self): + mock_get_policies_by_id = MagicMock() + with patch('salt.utils.pbm.get_policies_by_id', + mock_get_policies_by_id): + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + mock_get_policies_by_id.assert_called_once_with( + self.mock_prof_mgr, [self.mock_policy_id]) + + def test_no_policy_refs(self): + mock_get_policies_by_id = MagicMock() + with path('salt.utils.pbm.get_policies_by_id', + MagicMock(return_value=None)): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Storage policy with id \'fake_policy_id\' was not ' + 'found') + + def test_no_policy_refs(self): + mock_get_policies_by_id = MagicMock() + ret = salt.utils.pbm.get_default_storage_policy_of_datastore( + self.mock_prof_mgr, self.mock_ds) + self.assertEqual(ret, self.mock_policy_refs[0]) From 20fca4be441df1e0794f312a5e752c20534f6e0a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 20:05:05 -0400 Subject: [PATCH 245/348] Added salt.utils.pbm.assign_default_storage_policy_to_datastore --- salt/utils/pbm.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index cb6474be852..17b25acecaa 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -292,3 +292,35 @@ def get_default_storage_policy_of_datastore(profile_manager, datastore): raise VMwareObjectRetrievalError('Storage policy with id \'{0}\' was ' 'not found'.format(policy_id)) return policy_refs[0] + + +def assign_default_storage_policy_to_datastore(profile_manager, policy, + datastore): + ''' + Assigns a storage policy as the default policy to a datastore. + + profile_manager + Reference to the profile manager. + + policy + Reference to the policy to assigned. + + datastore + Reference to the datastore. + ''' + placement_hub = pbm.placement.PlacementHub( + hubId=datastore._moId, hubType='Datastore') + log.trace('placement_hub = {0}'.format(placement_hub)) + try: + profile_manager.AssignDefaultRequirementProfile(policy.profileId, + [placement_hub]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) From a3047ad3071c4c64d0e18035207c2bbf8d188519 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 19 Sep 2017 20:05:43 -0400 Subject: [PATCH 246/348] Added tests for salt.utils.pbm.assign_default_storage_policy_to_datastore --- tests/unit/utils/test_pbm.py | 72 ++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index b8803c475f0..4e08229e261 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -590,3 +590,75 @@ class GetDefaultStoragePolicyOfDatastoreTestCase(TestCase): ret = salt.utils.pbm.get_default_storage_policy_of_datastore( self.mock_prof_mgr, self.mock_ds) self.assertEqual(ret, self.mock_policy_refs[0]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class AssignDefaultStoragePolicyToDatastoreTestCase(TestCase): + '''Tests for salt.utils.pbm.assign_default_storage_policy_to_datastore''' + def setUp(self): + self.mock_ds = MagicMock(_moId='fake_ds_moid') + self.mock_policy = MagicMock() + self.mock_hub = MagicMock() + self.mock_prof_mgr = MagicMock() + patches = ( + ('salt.utils.pbm.pbm.placement.PlacementHub', + MagicMock(return_value=self.mock_hub)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ds', 'mock_hub', 'mock_policy', 'mock_prof_mgr'): + delattr(self, attr) + + def test_get_placement_hub(self): + mock_get_placement_hub = MagicMock() + with patch('salt.utils.pbm.pbm.placement.PlacementHub', + mock_get_placement_hub): + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + mock_get_placement_hub.assert_called_once_with( + hubId='fake_ds_moid', hubType='Datastore') + + def test_assign_default_requirement_profile(self): + mock_assign_prof = MagicMock() + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + mock_assign_prof + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + mock_assign_prof.assert_called_once_with( + self.mock_policy.profileId, [self.mock_hub]) + + def test_assign_default_requirement_profile_raises_no_permissions(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_assign_default_requirement_profile_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_assign_default_requirement_profile_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_prof_mgr.AssignDefaultRequirementProfile = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.pbm.assign_default_storage_policy_to_datastore( + self.mock_prof_mgr, self.mock_policy, self.mock_ds) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From 6da3ff5d933aa4631f7d8d5d9faaad582d30ebdb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 10:03:36 -0400 Subject: [PATCH 247/348] Added salt.modules.vsphere._get_policy_dict that transforms a policy VMware object into a dict representation --- salt/modules/vsphere.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index bde7c9c98e1..84f9a7ace64 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -177,6 +177,7 @@ import salt.utils.http import salt.utils.path import salt.utils.vmware import salt.utils.vsan +import salt.utils.pbm from salt.exceptions import CommandExecutionError, VMwareSaltError, \ ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \ VMwareApiError, InvalidEntityError @@ -193,7 +194,7 @@ except ImportError: HAS_JSONSCHEMA = False try: - from pyVmomi import vim, vmodl, VmomiSupport + from pyVmomi import vim, vmodl, pbm, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -4608,6 +4609,43 @@ def remove_dvportgroup(portgroup, dvs, service_instance=None): return True +def _get_policy_dict(policy): + '''Returns a dictionary representation of a policy''' + profile_dict = {'name': policy.name, + 'description': policy.description, + 'resource_type': policy.resourceType.resourceType} + subprofile_dicts = [] + if isinstance(policy, pbm.profile.CapabilityBasedProfile) and \ + isinstance(policy.constraints, + pbm.profile.SubProfileCapabilityConstraints): + + for subprofile in policy.constraints.subProfiles: + subprofile_dict = {'name': subprofile.name, + 'force_provision': subprofile.forceProvision} + cap_dicts = [] + for cap in subprofile.capability: + cap_dict = {'namespace': cap.id.namespace, + 'id': cap.id.id} + # We assume there is one constraint with one value set + val = cap.constraint[0].propertyInstance[0].value + if isinstance(val, pbm.capability.types.Range): + val_dict = {'type': 'range', + 'min': val.min, + 'max': val.max} + elif isinstance(val, pbm.capability.types.DiscreteSet): + val_dict = {'type': 'set', + 'values': val.values} + else: + val_dict = {'type': 'scalar', + 'value': val} + cap_dict['setting'] = val_dict + cap_dicts.append(cap_dict) + subprofile_dict['capabilities'] = cap_dicts + subprofile_dicts.append(subprofile_dict) + profile_dict['subprofiles'] = subprofile_dicts + return profile_dict + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 6bb0111b327134d908e2061471d7732b362b3926 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 10:04:49 -0400 Subject: [PATCH 248/348] Added salt.modules.vsphere.list_storage_policies that retrieves dict representations of storage policies, filtered by name --- salt/modules/vsphere.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 84f9a7ace64..59181fd6349 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4646,6 +4646,36 @@ def _get_policy_dict(policy): return profile_dict +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_storage_policies(policy_names=None, service_instance=None): + ''' + Returns a list of storage policies. + + policy_names + Names of policies to list. If None, all policies are listed. + Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_storage_policies + + salt '*' vsphere.list_storage_policy policy_names=[policy_name] + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + if not policy_names: + policies = salt.utils.pbm.get_storage_policies(profile_manager, + get_all_policies=True) + else: + policies = salt.utils.pbm.get_storage_policies(profile_manager, + policy_names) + return [_get_policy_dict(p) for p in policies] + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From f9f84fde9ab8f45b183570665a315b31bd30d3e5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 10:06:42 -0400 Subject: [PATCH 249/348] Added salt.modules.vsphere.list_default_vsan_policy that retrieves dict representation of the default storage policies --- salt/modules/vsphere.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 59181fd6349..96b2ac037e6 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4676,6 +4676,33 @@ def list_storage_policies(policy_names=None, service_instance=None): return [_get_policy_dict(p) for p in policies] +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_default_vsan_policy(service_instance=None): + ''' + Returns the default vsan storage policy. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_storage_policies + + salt '*' vsphere.list_storage_policy policy_names=[policy_name] + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policies = salt.utils.pbm.get_storage_policies(profile_manager, + get_all_policies=True) + def_policies = [p for p in policies + if p.systemCreatedProfileType == 'VsanDefaultProfile'] + if not def_policies: + raise excs.VMwareObjectRetrievalError('Default VSAN policy was not ' + 'retrieved') + return _get_policy_dict(def_policies[0]) + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 8275e5710681c3dcc585089014bd5887279ed728 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 10:10:48 -0400 Subject: [PATCH 250/348] Added salt.modules.vsphere._get_capability_definition_dict that transforms a VMware capability definition into a dict representation --- salt/modules/vsphere.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 96b2ac037e6..9655fd39fa8 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4703,6 +4703,17 @@ def list_default_vsan_policy(service_instance=None): return _get_policy_dict(def_policies[0]) +def _get_capability_definition_dict(cap_metadata): + # We assume each capability definition has one property with the same id + # as the capability so we display its type as belonging to the capability + # The object model permits multiple properties + return {'namespace': cap_metadata.id.namespace, + 'id': cap_metadata.id.id, + 'mandatory': cap_metadata.mandatory, + 'description': cap_metadata.summary.summary, + 'type': cap_metadata.propertyMetadata[0].type.typeName} + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From c88c207011821c7c2aad8d80a9eecb8be4befc4c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 10:12:58 -0400 Subject: [PATCH 251/348] Added salt.modules.vsphere.list_capability_definitions that returns dict representations of VMware capability definition --- salt/modules/vsphere.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 9655fd39fa8..f92c3b6339c 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4714,6 +4714,26 @@ def _get_capability_definition_dict(cap_metadata): 'type': cap_metadata.propertyMetadata[0].type.typeName} +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_capability_definitions(service_instance=None): + ''' + Returns a list of the metadata of all capabilities in the vCenter. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_capabilities + ''' + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + ret_list = [_get_capability_definition_dict(c) for c in + salt.utils.pbm.get_capability_definitions(profile_manager)] + return ret_list + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From ee2af6fc9c129539e39412c4b9ff79d6287d822c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 12:26:14 -0400 Subject: [PATCH 252/348] Added salt.modules.vsphere._apply_policy_config that applies a storage dict representations values to a object --- salt/modules/vsphere.py | 49 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index f92c3b6339c..2a0000b8c5b 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4734,6 +4734,55 @@ def list_capability_definitions(service_instance=None): return ret_list +def _apply_policy_config(policy_spec, policy_dict): + '''Applies a policy dictionary to a policy spec''' + log.trace('policy_dict = {0}'.format(policy_dict)) + if policy_dict.get('name'): + policy_spec.name = policy_dict['name'] + if policy_dict.get('description'): + policy_spec.description = policy_dict['description'] + if policy_dict.get('subprofiles'): + # Incremental changes to subprofiles and capabilities are not + # supported because they would complicate updates too much + # The whole configuration of all sub-profiles is expected and applied + policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() + subprofiles = [] + for subprofile_dict in policy_dict['subprofiles']: + subprofile_spec = \ + pbm.profile.SubProfileCapabilityConstraints.SubProfile( + name=subprofile_dict['name']) + cap_specs = [] + if subprofile_dict.get('force_provision'): + subprofile_spec.forceProvision = \ + subprofile_dict['force_provision'] + for cap_dict in subprofile_dict['capabilities']: + prop_inst_spec = pbm.capability.PropertyInstance( + id=cap_dict['id'] + ) + setting_type = cap_dict['setting']['type'] + if setting_type == 'set': + prop_inst_spec.value = pbm.capability.types.DiscreteSet() + prop_inst_spec.value.values = cap_dict['setting']['values'] + elif setting_type == 'range': + prop_inst_spec.value = pbm.capability.types.Range() + prop_inst_spec.value.max = cap_dict['setting']['max'] + prop_inst_spec.value.min = cap_dict['setting']['min'] + elif setting_type == 'scalar': + prop_inst_spec.value = cap_dict['setting']['value'] + cap_spec = pbm.capability.CapabilityInstance( + id=pbm.capability.CapabilityMetadata.UniqueId( + id=cap_dict['id'], + namespace=cap_dict['namespace']), + constraint=[pbm.capability.ConstraintInstance( + propertyInstance=[prop_inst_spec])]) + cap_specs.append(cap_spec) + subprofile_spec.capability = cap_specs + subprofiles.append(subprofile_spec) + policy_spec.constraints.subProfiles = subprofiles + log.trace('updated policy_spec = {0}'.format(policy_spec)) + return policy_spec + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From a5ae51f6166267efdf1986b122dd978560e1db68 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 12:36:56 -0400 Subject: [PATCH 253/348] Added salt.modules.vsphere.create_storage_policy --- salt/modules/vsphere.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 2a0000b8c5b..551ecedc7dc 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4783,6 +4783,47 @@ def _apply_policy_config(policy_spec, policy_dict): return policy_spec +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def create_storage_policy(policy_name, policy_dict, service_instance=None): + ''' + Creates a storage policy. + + Supported capability types: scalar, set, range. + + policy_name + Name of the policy to create. + The value of the argument will override any existing name in + ``policy_dict``. + + policy_dict + Dictionary containing the changes to apply to the policy. + (exmaple in salt.states.pbm) + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.create_storage_policy policy_name='policy name' + policy_dict="$policy_dict" + ''' + log.trace('create storage policy \'{0}\', dict = {1}' + ''.format(policy_name, policy_dict)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec() + # Hardcode the storage profile resource type + policy_create_spec.resourceType = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE) + # Set name argument + policy_dict['name'] = policy_name + log.trace('Setting policy values in policy_update_spec') + _apply_policy_config(policy_create_spec, policy_dict) + salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec) + return {'create_storage_policy': True} + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 41a65bf4140d31a84f395d533eaabbbbc8abb8c2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 12:37:41 -0400 Subject: [PATCH 254/348] Added salt.modules.vsphere.update_storage_policy --- salt/modules/vsphere.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 551ecedc7dc..481df498a93 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4824,6 +4824,47 @@ def create_storage_policy(policy_name, policy_dict, service_instance=None): return {'create_storage_policy': True} +@depends(HAS_PYVMOMI) +@supports_proxies('esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def update_storage_policy(policy, policy_dict, service_instance=None): + ''' + Updates a storage policy. + + Supported capability types: scalar, set, range. + + policy + Name of the policy to update. + + policy_dict + Dictionary containing the changes to apply to the policy. + (exmaple in salt.states.pbm) + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.update_storage_policy policy='policy name' + policy_dict="$policy_dict" + ''' + log.trace('updating storage policy, dict = {0}'.format(policy_dict)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) + if not policies: + raise excs.VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) + policy_ref = policies[0] + policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec() + log.trace('Setting policy values in policy_update_spec') + for prop in ['description', 'constraints']: + setattr(policy_update_spec, prop, getattr(policy_ref, prop)) + _apply_policy_config(policy_update_spec, policy_dict) + salt.utils.pbm.update_storage_policy(profile_manager, policy_ref, + policy_update_spec) + return {'update_storage_policy': True} + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 582919f5513ad25625cf9d81854a68129184dc1d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 12:38:30 -0400 Subject: [PATCH 255/348] Added salt.modules.vsphere.list_default_storage_policy_of_datastore that lists the dict representation of the policy assigned by default to a datastore --- salt/modules/vsphere.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 481df498a93..cb6f6953c6f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4865,6 +4865,41 @@ def update_storage_policy(policy, policy_dict, service_instance=None): return {'update_storage_policy': True} +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_default_storage_policy_of_datastore(datastore, service_instance=None): + ''' + Returns a list of datastores assign the the storage policies. + + datastore + Name of the datastore to assign. + The datastore needs to be visible to the VMware entity the proxy + points to. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 + ''' + log.trace('Listing the default storage policy of datastore \'{0}\'' + ''.format(datastore)) + # Find datastore + target_ref = _get_proxy_target(service_instance) + ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, + datastore_names=[datastore]) + if not ds_refs: + raise excs.VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) + policy = salt.utils.pbm.get_default_storage_policy_of_datastore( + profile_manager, ds_refs[0]) + return _get_policy_dict(policy) + + + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy From 0b2b79692a056498a5f1c87b0a1f1bb306e11627 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 18:10:40 -0400 Subject: [PATCH 256/348] Added salt.modules.vsphere.assign_default_storage_policy_to_datastore --- salt/modules/vsphere.py | 45 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index cb6f6953c6f..dce73ffa1a6 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4899,6 +4899,51 @@ def list_default_storage_policy_of_datastore(datastore, service_instance=None): return _get_policy_dict(policy) +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def assign_default_storage_policy_to_datastore(policy, datastore, + service_instance=None): + ''' + Assigns a storage policy as the default policy to a datastore. + + policy + Name of the policy to assign. + + datastore + Name of the datastore to assign. + The datastore needs to be visible to the VMware entity the proxy + points to. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + salt '*' vsphere.assign_storage_policy_to_datastore + policy='policy name' datastore=ds1 + ''' + log.trace('Assigning policy {0} to datastore {1}' + ''.format(policy, datastore)) + profile_manager = utils_pbm.get_profile_manager(service_instance) + # Find policy + policies = utils_pbm.get_storage_policies(profile_manager, [policy]) + if not policies: + raise excs.VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) + policy_ref = policies[0] + # Find datastore + target_ref = _get_proxy_target(service_instance) + ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, + datastore_names=[datastore]) + if not ds_refs: + raise excs.VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) + ds_ref = ds_refs[0] + utils_pbm.assign_default_storage_policy_to_datastore(profile_manager, + policy_ref, ds_ref) + return {'assign_storage_policy_to_datastore': True} + @depends(HAS_PYVMOMI) @supports_proxies('esxdatacenter', 'esxcluster') From 507910b9560bcfe248f6ed4c6815d4625e62420f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 20:10:04 -0400 Subject: [PATCH 257/348] Added VCenterProxySchema JSON schema that validates the vcenter proxy --- salt/config/schemas/vcenter.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/config/schemas/vcenter.py b/salt/config/schemas/vcenter.py index 4867923f27a..1d76fb43a51 100644 --- a/salt/config/schemas/vcenter.py +++ b/salt/config/schemas/vcenter.py @@ -14,6 +14,8 @@ from __future__ import absolute_import # Import Salt libs from salt.utils.schema import (Schema, + ArrayItem, + IntegerItem, StringItem) @@ -31,3 +33,25 @@ class VCenterEntitySchema(Schema): vcenter = StringItem(title='vCenter', description='Specifies the vcenter hostname', required=True) + + +class VCenterProxySchema(Schema): + ''' + Schema for the configuration for the proxy to connect to a VCenter. + ''' + title = 'VCenter Proxy Connection Schema' + description = 'Schema that describes the connection to a VCenter' + additional_properties = False + proxytype = StringItem(required=True, + enum=['vcenter']) + vcenter = StringItem(required=True, pattern=r'[^\s]+') + mechanism = StringItem(required=True, enum=['userpass', 'sspi']) + username = StringItem() + passwords = ArrayItem(min_items=1, + items=StringItem(), + unique_items=True) + + domain = StringItem() + principal = StringItem(default='host') + protocol = StringItem(default='https') + port = IntegerItem(minimum=1) From 176222b0cf262b0197930d91b9ac4fce07d5e687 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 20:27:19 -0400 Subject: [PATCH 258/348] Added vcenter proxy --- salt/proxy/vcenter.py | 338 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 338 insertions(+) create mode 100644 salt/proxy/vcenter.py diff --git a/salt/proxy/vcenter.py b/salt/proxy/vcenter.py new file mode 100644 index 00000000000..7b9c9f95e30 --- /dev/null +++ b/salt/proxy/vcenter.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +''' +Proxy Minion interface module for managing VMWare vCenters. + +:codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)` +:codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + +Dependencies +============ + +- pyVmomi Python Module + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original ESXi State +Module was developed against. + + +Configuration +============= +To use this proxy module, please use on of the following configurations: + + +.. code-block:: yaml + + proxy: + proxytype: vcenter + vcenter: + username: + mechanism: userpass + passwords: + - first_password + - second_password + - third_password + + proxy: + proxytype: vcenter + vcenter: + username: + domain: + mechanism: sspi + principal: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this Proxy Module, set this to +``vcenter``. + +vcenter +^^^^^^^ +The location of the VMware vCenter server (host of ip). Required + +username +^^^^^^^^ +The username used to login to the vcenter, such as ``root``. +Required only for userpass. + +mechanism +^^^^^^^^ +The mechanism used to connect to the vCenter server. Supported values are +``userpass`` and ``sspi``. Required. + +passwords +^^^^^^^^^ +A list of passwords to be used to try and login to the vCenter server. At least +one password in this list is required if mechanism is ``userpass`` + +The proxy integration will try the passwords listed in order. + +domain +^^^^^^ +User domain. Required if mechanism is ``sspi`` + +principal +^^^^^^^^ +Kerberos principal. Rquired if mechanism is ``sspi`` + +protocol +^^^^^^^^ +If the vCenter is not using the default protocol, set this value to an +alternate protocol. Default is ``https``. + +port +^^^^ +If the ESXi host is not using the default port, set this value to an +alternate port. Default is ``443``. + + +Salt Proxy +---------- + +After your pillar is in place, you can test the proxy. The proxy can run on +any machine that has network connectivity to your Salt Master and to the +vCenter server in the pillar. SaltStack recommends that the machine running the +salt-proxy process also run a regular minion, though it is not strictly +necessary. + +On the machine that will run the proxy, make sure there is an ``/etc/salt/proxy`` +file with at least the following in it: + +.. code-block:: yaml + + master: + +You can then start the salt-proxy process with: + +.. code-block:: bash + + salt-proxy --proxyid + +You may want to add ``-l debug`` to run the above in the foreground in +debug mode just to make sure everything is OK. + +Next, accept the key for the proxy on your salt-master, just like you +would for a regular minion: + +.. code-block:: bash + + salt-key -a + +You can confirm that the pillar data is in place for the proxy: + +.. code-block:: bash + + salt pillar.items + +And now you should be able to ping the ESXi host to make sure it is +responding: + +.. code-block:: bash + + salt test.ping + +At this point you can execute one-off commands against the vcenter. For +example, you can get if the proxy can actually connect to the vCenter: + +.. code-block:: bash + + salt vsphere.test_vcenter_connection + +Note that you don't need to provide credentials or an ip/hostname. Salt +knows to use the credentials you stored in Pillar. + +It's important to understand how this particular proxy works. +:mod:`Salt.modules.vsphere ` is a +standard Salt execution module. + + If you pull up the docs for it you'll see +that almost every function in the module takes credentials and a targets either +a vcenter or a host. When credentials and a host aren't passed, Salt runs commands +through ``pyVmomi`` against the local machine. If you wanted, you could run +functions from this module on any host where an appropriate version of +``pyVmomi`` is installed, and that host would reach out over the network +and communicate with the ESXi host. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import os + +# Import Salt Libs +import salt.exceptions +from salt.config.schemas.vcenter import VCenterProxySchema +from salt.utils.dictupdate import merge + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['vcenter'] + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Variables are scoped to this module so we can have persistent data +# across calls to fns in here. +DETAILS = {} + + +# Set up logging +log = logging.getLogger(__name__) +# Define the module's virtual name +__virtualname__ = 'vcenter' + + +def __virtual__(): + ''' + Only load if the vsphere execution module is available. + ''' + if HAS_JSONSCHEMA: + return __virtualname__ + + return False, 'The vcenter proxy module did not load.' + + +def init(opts): + ''' + This function gets called when the proxy starts up. + For login the protocol and port are cached. + ''' + log.info('Initting vcenter proxy module in process {0}' + ''.format(os.getpid())) + log.trace('VCenter Proxy Validating vcenter proxy input') + schema = VCenterProxySchema.serialize() + log.trace('schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) + try: + jsonschema.validate(proxy_conf, schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidConfigError(exc) + + # Save mandatory fields in cache + for key in ('vcenter', 'mechanism'): + DETAILS[key] = proxy_conf[key] + + # Additional validation + if DETAILS['mechanism'] == 'userpass': + if 'username' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\' , but no ' + '\'username\' key found in proxy config') + if not 'passwords' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\' , but no ' + '\'passwords\' key found in proxy config') + for key in ('username', 'passwords'): + DETAILS[key] = proxy_conf[key] + else: + if not 'domain' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\' , but no ' + '\'domain\' key found in proxy config') + if not 'principal' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\' , but no ' + '\'principal\' key found in proxy config') + for key in ('domain', 'principal'): + DETAILS[key] = proxy_conf[key] + + # Save optional + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') + + # Test connection + if DETAILS['mechanism'] == 'userpass': + # Get the correct login details + log.info('Retrieving credentials and testing vCenter connection for ' + 'mehchanism \'userpass\'') + try: + username, password = find_credentials() + DETAILS['password'] = password + except salt.exceptions.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + return True + + +def ping(): + ''' + Returns True. + + CLI Example: + + .. code-block:: bash + + salt vcenter test.ping + ''' + return True + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('VCenter proxy shutdown() called...') + + +def find_credentials(): + ''' + Cycle through all the possible credentials and return the first one that + works. + ''' + + # if the username and password were already found don't fo though the + # connection process again + if 'username' in DETAILS and 'password' in DETAILS: + return DETAILS['username'], DETAILS['password'] + + passwords = __pillar__['proxy']['passwords'] + for password in passwords: + DETAILS['password'] = password + if not __salt__['vsphere.test_vcenter_connection'](): + # We are unable to authenticate + continue + # If we have data returned from above, we've successfully authenticated. + return DETAILS['username'], password + # We've reached the end of the list without successfully authenticating. + raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' + 'incorrect credentials.') + + +def get_details(): + ''' + Function that returns the cached details + ''' + return DETAILS From 483fa0d8382ef1a10a656afe41bde10964b373c2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 20:36:57 -0400 Subject: [PATCH 259/348] Added salt.modules.vcenter shim execution module between the proxy and other execution modules --- salt/modules/vcenter.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 salt/modules/vcenter.py diff --git a/salt/modules/vcenter.py b/salt/modules/vcenter.py new file mode 100644 index 00000000000..bac3c674b49 --- /dev/null +++ b/salt/modules/vcenter.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +''' +Module used to access the vcenter proxy connection methods +''' +from __future__ import absolute_import + +# Import python libs +import logging +import salt.utils + + +log = logging.getLogger(__name__) + +__proxyenabled__ = ['vcenter'] +# Define the module's virtual name +__virtualname__ = 'vcenter' + + +def __virtual__(): + ''' + Only work on proxy + ''' + if salt.utils.is_proxy(): + return __virtualname__ + return False + + +def get_details(): + return __proxy__['vcenter.get_details']() From 94929d541520456583bb549aab6d98b9b84c9142 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 20:38:14 -0400 Subject: [PATCH 260/348] Added support for vcenter proxy in salt.modules.vsphere --- salt/modules/vsphere.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index dce73ffa1a6..d4421ce1de2 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -208,7 +208,7 @@ else: log = logging.getLogger(__name__) __virtualname__ = 'vsphere' -__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter'] +__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] def __virtual__(): @@ -255,6 +255,8 @@ def _get_proxy_connection_details(): details = __salt__['esxcluster.get_details']() elif proxytype == 'esxdatacenter': details = __salt__['esxdatacenter.get_details']() + elif proxytype == 'vcenter': + details = __salt__['vcenter.get_details']() else: raise CommandExecutionError('\'{0}\' proxy is not supported' ''.format(proxytype)) @@ -380,7 +382,7 @@ def gets_service_instance_via_proxy(fn): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def get_service_instance_via_proxy(service_instance=None): ''' Returns a service instance to the proxied endpoint (vCenter/ESXi host). @@ -400,7 +402,7 @@ def get_service_instance_via_proxy(service_instance=None): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def disconnect(service_instance): ''' Disconnects from a vCenter or ESXi host @@ -1935,7 +1937,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') @gets_service_instance_via_proxy def test_vcenter_connection(service_instance=None): ''' @@ -4946,7 +4948,7 @@ def assign_default_storage_policy_to_datastore(policy, datastore, @depends(HAS_PYVMOMI) -@supports_proxies('esxdatacenter', 'esxcluster') +@supports_proxies('esxdatacenter', 'esxcluster', 'vcenter') @gets_service_instance_via_proxy def list_datacenters_via_proxy(datacenter_names=None, service_instance=None): ''' @@ -4984,7 +4986,7 @@ def list_datacenters_via_proxy(datacenter_names=None, service_instance=None): @depends(HAS_PYVMOMI) -@supports_proxies('esxdatacenter') +@supports_proxies('esxdatacenter', 'vcenter') @gets_service_instance_via_proxy def create_datacenter(datacenter_name, service_instance=None): ''' @@ -6439,7 +6441,7 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, @depends(HAS_PYVMOMI) -@supports_proxies('esxcluster', 'esxdatacenter') +@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') def _get_proxy_target(service_instance): ''' Returns the target object of a proxy. @@ -6467,6 +6469,9 @@ def _get_proxy_target(service_instance): reference = salt.utils.vmware.get_datacenter(service_instance, datacenter) + elif proxy_type == 'vcenter': + # vcenter proxy - the target is the root folder + reference = salt.utils.vmware.get_root_folder(service_instance) log.trace('reference = {0}'.format(reference)) return reference From 58445e927b8295bcd74f4047b619565458d0aeaa Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 21:00:39 -0400 Subject: [PATCH 261/348] Updated all vsphere tests to support the vcenter proxy --- tests/unit/modules/test_vsphere.py | 47 +++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index 56669b900e8..9ebad773631 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -639,6 +639,15 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'mechanism': 'fake_mechanism', 'principal': 'fake_principal', 'domain': 'fake_domain'} + self.vcenter_details = {'vcenter': 'fake_vcenter', + 'username': 'fake_username', + 'password': 'fake_password', + 'protocol': 'fake_protocol', + 'port': 'fake_port', + 'mechanism': 'fake_mechanism', + 'principal': 'fake_principal', + 'domain': 'fake_domain'} + def tearDown(self): for attrname in ('esxi_host_details', 'esxi_vcenter_details', @@ -693,6 +702,17 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'fake_protocol', 'fake_port', 'fake_mechanism', 'fake_principal', 'fake_domain'), ret) + def test_vcenter_proxy_details(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='vcenter')): + with patch.dict(vsphere.__salt__, + {'vcenter.get_details': MagicMock( + return_value=self.vcenter_details)}): + ret = vsphere._get_proxy_connection_details() + self.assertEqual(('fake_vcenter', 'fake_username', 'fake_password', + 'fake_protocol', 'fake_port', 'fake_mechanism', + 'fake_principal', 'fake_domain'), ret) + def test_unsupported_proxy_details(self): with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value='unsupported')): @@ -890,7 +910,7 @@ class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -933,7 +953,7 @@ class DisconnectTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -974,7 +994,7 @@ class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1049,7 +1069,7 @@ class ListDatacentersViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxcluster', 'esxdatacenter'] + supported_proxies = ['esxcluster', 'esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1127,7 +1147,7 @@ class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxdatacenter'] + supported_proxies = ['esxdatacenter', 'vcenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1339,12 +1359,15 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): def setUp(self): attrs = (('mock_si', MagicMock()), ('mock_dc', MagicMock()), - ('mock_cl', MagicMock())) + ('mock_cl', MagicMock()), + ('mock_root', MagicMock())) for attr, mock_obj in attrs: setattr(self, attr, mock_obj) self.addCleanup(delattr, self, attr) attrs = (('mock_get_datacenter', MagicMock(return_value=self.mock_dc)), - ('mock_get_cluster', MagicMock(return_value=self.mock_cl))) + ('mock_get_cluster', MagicMock(return_value=self.mock_cl)), + ('mock_get_root_folder', + MagicMock(return_value=self.mock_root))) for attr, mock_obj in attrs: setattr(self, attr, mock_obj) self.addCleanup(delattr, self, attr) @@ -1360,7 +1383,8 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): MagicMock(return_value=(None, None, None, None, None, None, None, None, 'datacenter'))), ('salt.utils.vmware.get_datacenter', self.mock_get_datacenter), - ('salt.utils.vmware.get_cluster', self.mock_get_cluster)) + ('salt.utils.vmware.get_cluster', self.mock_get_cluster), + ('salt.utils.vmware.get_root_folder', self.mock_get_root_folder)) for module, mock_obj in patches: patcher = patch(module, mock_obj) patcher.start() @@ -1409,3 +1433,10 @@ class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): MagicMock(return_value='esxdatacenter')): ret = vsphere._get_proxy_target(self.mock_si) self.assertEqual(ret, self.mock_dc) + + def test_vcenter_proxy_return(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='vcenter')): + ret = vsphere._get_proxy_target(self.mock_si) + self.mock_get_root_folder.assert_called_once_with(self.mock_si) + self.assertEqual(ret, self.mock_root) From da39e7ce842d969032cd3184dde1024c450efdab Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 21:05:53 -0400 Subject: [PATCH 262/348] Comments, imports, init function in salt.states.pbm --- salt/states/pbm.py | 134 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 salt/states/pbm.py diff --git a/salt/states/pbm.py b/salt/states/pbm.py new file mode 100644 index 00000000000..3026368f4bc --- /dev/null +++ b/salt/states/pbm.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +''' +Manages VMware storage policies +(called pbm because the vCenter endpoint is /pbm) + +Examples +======== + +Storage policy +-------------- + +.. code-block:: python + +{ + "name": "salt_storage_policy" + "description": "Managed by Salt. Random capability values.", + "resource_type": "STORAGE", + "subprofiles": [ + { + "capabilities": [ + { + "setting": { + "type": "scalar", + "value": 2 + }, + "namespace": "VSAN", + "id": "hostFailuresToTolerate" + }, + { + "setting": { + "type": "scalar", + "value": 2 + }, + "namespace": "VSAN", + "id": "stripeWidth" + }, + { + "setting": { + "type": "scalar", + "value": true + }, + "namespace": "VSAN", + "id": "forceProvisioning" + }, + { + "setting": { + "type": "scalar", + "value": 50 + }, + "namespace": "VSAN", + "id": "proportionalCapacity" + }, + { + "setting": { + "type": "scalar", + "value": 0 + }, + "namespace": "VSAN", + "id": "cacheReservation" + } + ], + "name": "Rule-Set 1: VSAN", + "force_provision": null + } + ], +} + +Dependencies +============ + + +- pyVmomi Python Module + + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 +''' + +# Import Python Libs +from __future__ import absolute_import +import sys +import logging +import json +import time +import copy + +# Import Salt Libs +from salt.exceptions import CommandExecutionError, ArgumentValueError +import salt.modules.vsphere as vsphere +from salt.utils import is_proxy +from salt.utils.dictdiffer import recursive_diff +from salt.utils.listdiffer import list_diff + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Get Logging Started +log = logging.getLogger(__name__) +# TODO change with vcenter +ALLOWED_PROXY_TYPES = ['esxcluster', 'vcenter'] +LOGIN_DETAILS = {} + +def __virtual__(): + if HAS_JSONSCHEMA: + return True + return False + + +def mod_init(low): + ''' + Init function + ''' + return True From 9f96c1fcc091452c844165d8b1cd10cc4bdc1914 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 21:07:19 -0400 Subject: [PATCH 263/348] Added salt.states.pbm.default_vsan_policy_configured state that configures the default storage policy --- salt/states/pbm.py | 141 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/salt/states/pbm.py b/salt/states/pbm.py index 3026368f4bc..a30eba6456b 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -132,3 +132,144 @@ def mod_init(low): Init function ''' return True + + +def default_vsan_policy_configured(name, policy): + ''' + Configures the default VSAN policy on a vCenter. + The state assumes there is only one default VSAN policy on a vCenter. + + policy + Dict representation of a policy + ''' + # TODO Refactor when recurse_differ supports list_differ + # It's going to make the whole thing much easier + policy_copy = copy.deepcopy(policy) + proxy_type = __salt__['vsphere.get_proxy_type']() + log.trace('proxy_type = {0}'.format(proxy_type)) + # All allowed proxies have a shim execution module with the same + # name which implementes a get_details function + # All allowed proxies have a vcenter detail + vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter'] + log.info('Running {0} on vCenter ' + '\'{1}\''.format(name, vcenter)) + log.trace('policy = {0}'.format(policy)) + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + comments = [] + changes = {} + changes_required = False + si = None + + try: + #TODO policy schema validation + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_policy = __salt__['vsphere.list_default_vsan_policy'](si) + log.trace('current_policy = {0}'.format(current_policy)) + # Building all diffs between the current and expected policy + # XXX We simplify the comparison by assuming we have at most 1 + # sub_profile + if policy.get('subprofiles'): + if len(policy['subprofiles']) > 1: + raise ArgumentValueError('Multiple sub_profiles ({0}) are not ' + 'supported in the input policy') + subprofile = policy['subprofiles'][0] + current_subprofile = current_policy['subprofiles'][0] + capabilities_differ = list_diff(current_subprofile['capabilities'], + subprofile.get('capabilities', []), + key='id') + del policy['subprofiles'] + if subprofile.get('capabilities'): + del subprofile['capabilities'] + del current_subprofile['capabilities'] + # Get the subprofile diffs without the capability keys + subprofile_differ = recursive_diff(current_subprofile, + dict(subprofile)) + + del current_policy['subprofiles'] + policy_differ = recursive_diff(current_policy, policy) + if policy_differ.diffs or capabilities_differ.diffs or \ + subprofile_differ.diffs: + + if 'name' in policy_differ.new_values or \ + 'description' in policy_differ.new_values: + + raise ArgumentValueError( + '\'name\' and \'description\' of the default VSAN policy ' + 'cannot be updated') + changes_required = True + if __opts__['test']: + str_changes = [] + if policy_differ.diffs: + str_changes.extend([change for change in + policy_differ.changes_str.split('\n')]) + if subprofile_differ.diffs or capabilities_differ.diffs: + str_changes.append('subprofiles:') + if subprofile_differ.diffs: + str_changes.extend( + [' {0}'.format(change) for change in + subprofile_differ.changes_str.split('\n')]) + if capabilities_differ.diffs: + str_changes.append(' capabilities:') + str_changes.extend( + [' {0}'.format(change) for change in + capabilities_differ.changes_str2.split('\n')]) + comments.append( + 'State {0} will update the default VSAN policy on ' + 'vCenter \'{1}\':\n{2}' + ''.format(name, vcenter, '\n'.join(str_changes))) + else: + __salt__['vsphere.update_storage_policy']( + policy=current_policy['name'], + policy_dict=policy_copy, + service_instance=si) + comments.append('Updated the default VSAN policy in vCenter ' + '\'{0}\''.format(vcenter)) + log.info(comments[-1]) + + new_values = policy_differ.new_values + new_values['subprofiles'] = [subprofile_differ.new_values] + new_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.new_values + if not new_values['subprofiles'][0]['capabilities']: + del new_values['subprofiles'][0]['capabilities'] + if not new_values['subprofiles'][0]: + del new_values['subprofiles'] + old_values = policy_differ.old_values + old_values['subprofiles'] = [subprofile_differ.old_values] + old_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.old_values + if not old_values['subprofiles'][0]['capabilities']: + del old_values['subprofiles'][0]['capabilities'] + if not old_values['subprofiles'][0]: + del old_values['subprofiles'] + changes.update({'default_vsan_policy': + {'new': new_values, + 'old': old_values}}) + log.trace(changes) + __salt__['vsphere.disconnect'](si) + except CommandExecutionError as exc: + log.error('Error: {}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('Default VSAN policy in vCenter ' + '\'{0}\' is correctly configured. ' + 'Nothing to be done.'.format(vcenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': changes, + 'result': None}) + else: + ret.update({'changes': changes, + 'result': True}) + return ret From bb52e0d3318d1026061a1cc2350654ad86bdfb3e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 21:08:42 -0400 Subject: [PATCH 264/348] Added salt.states.pbm.storage_policies_configured state that creates/configures storage policies --- salt/states/pbm.py | 164 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) diff --git a/salt/states/pbm.py b/salt/states/pbm.py index a30eba6456b..54833151954 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -273,3 +273,167 @@ def default_vsan_policy_configured(name, policy): ret.update({'changes': changes, 'result': True}) return ret + + +def storage_policies_configured(name, policies): + ''' + Configures storage policies on a vCenter. + + policies + List of dict representation of the required storage policies + ''' + comments = [] + changes = [] + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + log.trace('policies = {0}'.format(policies)) + si = None + try: + proxy_type = __salt__['vsphere.get_proxy_type']() + log.trace('proxy_type = {0}'.format(proxy_type)) + # All allowed proxies have a shim execution module with the same + # name which implementes a get_details function + # All allowed proxies have a vcenter detail + vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter'] + log.info('Running state \'{0}\' on vCenter ' + '\'{0}\''.format(name, vcenter)) + si = __salt__['vsphere.get_service_instance_via_proxy']() + current_policies = __salt__['vsphere.list_storage_policies']( + policy_names=[policy['name'] for policy in policies], + service_instance=si) + log.trace('current_policies = {0}'.format(current_policies)) + # TODO Refactor when recurse_differ supports list_differ + # It's going to make the whole thing much easier + for policy in policies: + policy_copy = copy.deepcopy(policy) + filtered_policies = [p for p in current_policies + if p['name'] == policy['name']] + current_policy = filtered_policies[0] \ + if filtered_policies else None + + if not current_policy: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create the storage policy ' + '\'{1}\' on vCenter \'{2}\'' + ''.format(name, policy['name'], vcenter)) + else: + __salt__['vsphere.create_storage_policy']( + policy['name'], policy, service_instance=si) + comments.append('Created storage policy \'{0}\' on ' + 'vCenter \'{1}\''.format(policy['name'], + vcenter)) + changes.append({'new': policy, 'old': None}) + log.trace(comments[-1]) + # Continue with next + continue + + # Building all diffs between the current and expected policy + # XXX We simplify the comparison by assuming we have at most 1 + # sub_profile + if policy.get('subprofiles'): + if len(policy['subprofiles']) > 1: + raise ArgumentValueError('Multiple sub_profiles ({0}) are not ' + 'supported in the input policy') + subprofile = policy['subprofiles'][0] + current_subprofile = current_policy['subprofiles'][0] + capabilities_differ = list_diff(current_subprofile['capabilities'], + subprofile.get('capabilities', []), + key='id') + del policy['subprofiles'] + if subprofile.get('capabilities'): + del subprofile['capabilities'] + del current_subprofile['capabilities'] + # Get the subprofile diffs without the capability keys + subprofile_differ = recursive_diff(current_subprofile, + dict(subprofile)) + + del current_policy['subprofiles'] + policy_differ = recursive_diff(current_policy, policy) + if policy_differ.diffs or capabilities_differ.diffs or \ + subprofile_differ.diffs: + + changes_required = True + if __opts__['test']: + str_changes = [] + if policy_differ.diffs: + str_changes.extend( + [change for change in + policy_differ.changes_str.split('\n')]) + if subprofile_differ.diffs or \ + capabilities_differ.diffs: + + str_changes.append('subprofiles:') + if subprofile_differ.diffs: + str_changes.extend( + [' {0}'.format(change) for change in + subprofile_differ.changes_str.split('\n')]) + if capabilities_differ.diffs: + str_changes.append(' capabilities:') + str_changes.extend( + [' {0}'.format(change) for change in + capabilities_differ.changes_str2.split('\n')]) + comments.append( + 'State {0} will update the storage policy \'{1}\'' + ' on vCenter \'{2}\':\n{3}' + ''.format(name, policy['name'], vcenter, + '\n'.join( str_changes))) + else: + __salt__['vsphere.update_storage_policy']( + policy=current_policy['name'], + policy_dict=policy_copy, + service_instance=si) + comments.append('Updated the storage policy \'{0}\'' + 'in vCenter \'{1}\'' + ''.format(policy['name'], vcenter)) + log.info(comments[-1]) + + # Build new/old values to report what was changed + new_values = policy_differ.new_values + new_values['subprofiles'] = [subprofile_differ.new_values] + new_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.new_values + if not new_values['subprofiles'][0]['capabilities']: + del new_values['subprofiles'][0]['capabilities'] + if not new_values['subprofiles'][0]: + del new_values['subprofiles'] + old_values = policy_differ.old_values + old_values['subprofiles'] = [subprofile_differ.old_values] + old_values['subprofiles'][0]['capabilities'] = \ + capabilities_differ.old_values + if not old_values['subprofiles'][0]['capabilities']: + del old_values['subprofiles'][0]['capabilities'] + if not old_values['subprofiles'][0]: + del old_values['subprofiles'] + changes.append({'new': new_values, + 'old': old_values}) + else: + # No diffs found - no updates required + comments.append('Storage policy \'{0}\' is up to date. ' + 'Nothing to be done.'.format(policy['name'])) + __salt__['vsphere.disconnect'](si) + except CommandExecutionError as exc: + log.error('Error: {0}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + if not __opts__['test']: + ret['result'] = False + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + if not changes_required: + # We have no changes + ret.update({'comment': ('All storage policy in vCenter ' + '\'{0}\' is correctly configured. ' + 'Nothing to be done.'.format(vcenter)), + 'result': True}) + else: + ret.update({'comment': '\n'.join(comments)}) + if __opts__['test']: + ret.update({'pchanges': {'storage_policies': changes}, + 'result': None}) + else: + ret.update({'changes': {'storage_policies': changes}, + 'result': True}) + return ret From 36fc89c9a2515c2ed4bdee6a375dae239377f403 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 21:09:45 -0400 Subject: [PATCH 265/348] Added salt.states.pbm.default_storage_policy_assigned state that manages default storage policies to datastore assigments --- salt/states/pbm.py | 61 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/salt/states/pbm.py b/salt/states/pbm.py index 54833151954..e77f16f48bc 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -437,3 +437,64 @@ def storage_policies_configured(name, policies): ret.update({'changes': {'storage_policies': changes}, 'result': True}) return ret + + +def default_storage_policy_assigned(name, policy, datastore): + ''' + Assigns a default storage policy to a datastore + + policy + Name of storage policy + + datastore + Name of datastore + ''' + log.info('Running state {0} for policy \'{1}\, datastore \'{2}\'.' + ''.format(name, policy, datastore)) + changes = {} + changes_required = False + ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, + 'pchanges': {}} + si = None + try: + si = __salt__['vsphere.get_service_instance_via_proxy']() + existing_policy = \ + __salt__['vsphere.list_default_storage_policy_of_datastore']( + datastore=datastore, service_instance=si) + if existing_policy['name'] == policy: + comment = ('Storage policy \'{0}\' is already assigned to ' + 'datastore \'{1}\'. Nothing to be done.' + ''.format(policy, datastore)) + else: + changes_required = True + changes = { + 'default_storage_policy': {'old': existing_policy['name'], + 'new': policy}} + if (__opts__['test']): + comment = ('State {0} will assign storage policy \'{1}\' to ' + 'datastore \'{2}\'.').format(name, policy, + datastore) + else: + __salt__['vsphere.assign_default_storage_policy_to_datastore']( + policy=policy, datastore=datastore, service_instance=si) + comment = ('Storage policy \'{0} was assigned to datastore ' + '\'{1}\'.').format(policy, name) + log.info(comment) + except CommandExecutionError as exc: + log.error('Error: {}'.format(exc)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({'comment': exc.strerror, + 'result': False if not __opts__['test'] else None}) + return ret + ret['comment'] = comment + if changes_required: + if __opts__['test']: + ret.update({'result': None, + 'pchanges': changes}) + else: + ret.update({'result': True, + 'changes': changes}) + else: + ret['result'] = True + return ret From b6577e432894ad31cd3781201415b51e0af1c541 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 12:54:23 -0400 Subject: [PATCH 266/348] pylint --- salt/modules/vsphere.py | 26 +++++++++++++------------- salt/proxy/vcenter.py | 8 ++++---- salt/states/pbm.py | 21 +++++---------------- salt/utils/pbm.py | 5 ++++- salt/utils/vmware.py | 2 +- tests/unit/modules/test_vsphere.py | 1 - tests/unit/utils/test_pbm.py | 14 +++++++------- 7 files changed, 34 insertions(+), 43 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index d4421ce1de2..bc59077b1af 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4700,8 +4700,8 @@ def list_default_vsan_policy(service_instance=None): def_policies = [p for p in policies if p.systemCreatedProfileType == 'VsanDefaultProfile'] if not def_policies: - raise excs.VMwareObjectRetrievalError('Default VSAN policy was not ' - 'retrieved') + raise VMwareObjectRetrievalError('Default VSAN policy was not ' + 'retrieved') return _get_policy_dict(def_policies[0]) @@ -4854,8 +4854,8 @@ def update_storage_policy(policy, policy_dict, service_instance=None): profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) if not policies: - raise excs.VMwareObjectRetrievalError('Policy \'{0}\' was not found' - ''.format(policy)) + raise VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) policy_ref = policies[0] policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec() log.trace('Setting policy values in policy_update_spec') @@ -4893,8 +4893,8 @@ def list_default_storage_policy_of_datastore(datastore, service_instance=None): ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, datastore_names=[datastore]) if not ds_refs: - raise excs.VMwareObjectRetrievalError('Datastore \'{0}\' was not ' - 'found'.format(datastore)) + raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policy = salt.utils.pbm.get_default_storage_policy_of_datastore( profile_manager, ds_refs[0]) @@ -4927,12 +4927,12 @@ def assign_default_storage_policy_to_datastore(policy, datastore, ''' log.trace('Assigning policy {0} to datastore {1}' ''.format(policy, datastore)) - profile_manager = utils_pbm.get_profile_manager(service_instance) + profile_manager = salt.utils.pbm.get_profile_manager(service_instance) # Find policy - policies = utils_pbm.get_storage_policies(profile_manager, [policy]) + policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) if not policies: - raise excs.VMwareObjectRetrievalError('Policy \'{0}\' was not found' - ''.format(policy)) + raise VMwareObjectRetrievalError('Policy \'{0}\' was not found' + ''.format(policy)) policy_ref = policies[0] # Find datastore target_ref = _get_proxy_target(service_instance) @@ -4942,9 +4942,9 @@ def assign_default_storage_policy_to_datastore(policy, datastore, raise excs.VMwareObjectRetrievalError('Datastore \'{0}\' was not ' 'found'.format(datastore)) ds_ref = ds_refs[0] - utils_pbm.assign_default_storage_policy_to_datastore(profile_manager, - policy_ref, ds_ref) - return {'assign_storage_policy_to_datastore': True} + salt.utils.pbm.assign_default_storage_policy_to_datastore( + profile_manager, policy_ref, ds_ref) + return True @depends(HAS_PYVMOMI) diff --git a/salt/proxy/vcenter.py b/salt/proxy/vcenter.py index 7b9c9f95e30..5c5ad797d19 100644 --- a/salt/proxy/vcenter.py +++ b/salt/proxy/vcenter.py @@ -189,7 +189,7 @@ import os # Import Salt Libs import salt.exceptions -from salt.config.schemas.vcenter import VCenterProxySchema +from salt.config.schemas.vcenter import VCenterProxySchema from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. @@ -250,18 +250,18 @@ def init(opts): raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\' , but no ' '\'username\' key found in proxy config') - if not 'passwords' in proxy_conf: + if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\' , but no ' '\'passwords\' key found in proxy config') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: - if not 'domain' in proxy_conf: + if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\' , but no ' '\'domain\' key found in proxy config') - if not 'principal' in proxy_conf: + if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\' , but no ' '\'principal\' key found in proxy config') diff --git a/salt/states/pbm.py b/salt/states/pbm.py index e77f16f48bc..bf54f620ad0 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -95,32 +95,21 @@ PyVmomi can be installed via pip: # Import Python Libs from __future__ import absolute_import -import sys import logging -import json -import time import copy # Import Salt Libs from salt.exceptions import CommandExecutionError, ArgumentValueError -import salt.modules.vsphere as vsphere -from salt.utils import is_proxy from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff -# External libraries -try: - import jsonschema - HAS_JSONSCHEMA = True -except ImportError: - HAS_JSONSCHEMA = False - # Get Logging Started log = logging.getLogger(__name__) # TODO change with vcenter ALLOWED_PROXY_TYPES = ['esxcluster', 'vcenter'] LOGIN_DETAILS = {} + def __virtual__(): if HAS_JSONSCHEMA: return True @@ -297,7 +286,7 @@ def storage_policies_configured(name, policies): # All allowed proxies have a vcenter detail vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter'] log.info('Running state \'{0}\' on vCenter ' - '\'{0}\''.format(name, vcenter)) + '\'{1}\''.format(name, vcenter)) si = __salt__['vsphere.get_service_instance_via_proxy']() current_policies = __salt__['vsphere.list_storage_policies']( policy_names=[policy['name'] for policy in policies], @@ -378,7 +367,7 @@ def storage_policies_configured(name, policies): 'State {0} will update the storage policy \'{1}\'' ' on vCenter \'{2}\':\n{3}' ''.format(name, policy['name'], vcenter, - '\n'.join( str_changes))) + '\n'.join(str_changes))) else: __salt__['vsphere.update_storage_policy']( policy=current_policy['name'], @@ -449,7 +438,7 @@ def default_storage_policy_assigned(name, policy, datastore): datastore Name of datastore ''' - log.info('Running state {0} for policy \'{1}\, datastore \'{2}\'.' + log.info('Running state {0} for policy \'{1}\', datastore \'{2}\'.' ''.format(name, policy, datastore)) changes = {} changes_required = False @@ -470,7 +459,7 @@ def default_storage_policy_assigned(name, policy, datastore): changes = { 'default_storage_policy': {'old': existing_policy['name'], 'new': policy}} - if (__opts__['test']): + if __opts__['test']: comment = ('State {0} will assign storage policy \'{1}\' to ' 'datastore \'{2}\'.').format(name, policy, datastore) diff --git a/salt/utils/pbm.py b/salt/utils/pbm.py index 17b25acecaa..c7fa43eaa4b 100644 --- a/salt/utils/pbm.py +++ b/salt/utils/pbm.py @@ -171,7 +171,7 @@ def get_policies_by_id(profile_manager, policy_ids): raise VMwareRuntimeError(exc.msg) -def get_storage_policies(profile_manager, policy_names=[], +def get_storage_policies(profile_manager, policy_names=None, get_all_policies=False): ''' Returns a list of the storage policies, filtered by name. @@ -181,6 +181,7 @@ def get_storage_policies(profile_manager, policy_names=[], policy_names List of policy names to filter by. + Default is None. get_all_policies Flag specifying to return all policies, regardless of the specified @@ -207,6 +208,8 @@ def get_storage_policies(profile_manager, policy_names=[], pbm.profile.ResourceTypeEnum.STORAGE] if get_all_policies: return policies + if not policy_names: + policy_names = [] return [p for p in policies if p.name in policy_names] diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index cbfb741dc0f..018bb104175 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -434,7 +434,7 @@ def get_new_service_instance_stub(service_instance, path, ns=None, #connection handshaking rule. We may need turn of the hostname checking #and client side cert verification context = None - if sys.version_info[:3] > (2,7,8): + if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index 9ebad773631..ed043f27283 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -648,7 +648,6 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'principal': 'fake_principal', 'domain': 'fake_domain'} - def tearDown(self): for attrname in ('esxi_host_details', 'esxi_vcenter_details', 'esxdatacenter_details', 'esxcluster_details'): diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index 4e08229e261..aec9a51da5c 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -10,7 +10,6 @@ from __future__ import absolute_import import logging # Import Salt testing libraries -from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ PropertyMock @@ -18,6 +17,7 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ # Import Salt libraries from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ VMwareObjectRetrievalError +from salt.ext.six.moves import range import salt.utils.pbm try: @@ -187,9 +187,9 @@ class GetCapabilityDefinitionsTestCase(TestCase): '''Tests for salt.utils.pbm.get_capability_definitions''' def setUp(self): self.mock_res_type = MagicMock() - self.mock_cap_cats =[MagicMock(capabilityMetadata=['fake_cap_meta1', - 'fake_cap_meta2']), - MagicMock(capabilityMetadata=['fake_cap_meta3'])] + self.mock_cap_cats = [MagicMock(capabilityMetadata=['fake_cap_meta1', + 'fake_cap_meta2']), + MagicMock(capabilityMetadata=['fake_cap_meta3'])] self.mock_prof_mgr = MagicMock( FetchCapabilityMetadata=MagicMock(return_value=self.mock_cap_cats)) patches = ( @@ -312,7 +312,7 @@ class GetStoragePoliciesTestCase(TestCase): self.mock_prof_mgr = MagicMock( QueryProfile=MagicMock(return_value=self.mock_policy_ids)) # Policies - self.mock_policies=[] + self.mock_policies = [] for i in range(4): mock_obj = MagicMock(resourceType=MagicMock( resourceType=pbm.profile.ResourceTypeEnum.STORAGE)) @@ -576,7 +576,7 @@ class GetDefaultStoragePolicyOfDatastoreTestCase(TestCase): def test_no_policy_refs(self): mock_get_policies_by_id = MagicMock() - with path('salt.utils.pbm.get_policies_by_id', + with patch('salt.utils.pbm.get_policies_by_id', MagicMock(return_value=None)): with self.assertRaises(VMwareObjectRetrievalError) as excinfo: salt.utils.pbm.get_default_storage_policy_of_datastore( @@ -585,7 +585,7 @@ class GetDefaultStoragePolicyOfDatastoreTestCase(TestCase): 'Storage policy with id \'fake_policy_id\' was not ' 'found') - def test_no_policy_refs(self): + def test_return_policy_ref(self): mock_get_policies_by_id = MagicMock() ret = salt.utils.pbm.get_default_storage_policy_of_datastore( self.mock_prof_mgr, self.mock_ds) From f484bd52fd863af1ab55d729e9bf4182858cb6a6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 15:33:00 -0400 Subject: [PATCH 267/348] more pylint --- salt/states/pbm.py | 4 +--- tests/unit/utils/vmware/test_connection.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/states/pbm.py b/salt/states/pbm.py index bf54f620ad0..775b716f446 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -111,9 +111,7 @@ LOGIN_DETAILS = {} def __virtual__(): - if HAS_JSONSCHEMA: - return True - return False + return True def mod_init(low): diff --git a/tests/unit/utils/vmware/test_connection.py b/tests/unit/utils/vmware/test_connection.py index dd357d48708..d8afbb0504c 100644 --- a/tests/unit/utils/vmware/test_connection.py +++ b/tests/unit/utils/vmware/test_connection.py @@ -25,7 +25,7 @@ import salt.utils.vmware from salt.ext import six try: - from pyVmomi import vim, vmodl, VmomiSupport + from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False From e1bfe248915d6fc623e3bfb4a96c008821dd760a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 22 Sep 2017 08:59:33 -0400 Subject: [PATCH 268/348] Removed excs reference from new methods in salt.modules.vsphere --- salt/modules/vsphere.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index bc59077b1af..0c923858042 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4939,8 +4939,8 @@ def assign_default_storage_policy_to_datastore(policy, datastore, ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref, datastore_names=[datastore]) if not ds_refs: - raise excs.VMwareObjectRetrievalError('Datastore \'{0}\' was not ' - 'found'.format(datastore)) + raise VMwareObjectRetrievalError('Datastore \'{0}\' was not ' + 'found'.format(datastore)) ds_ref = ds_refs[0] salt.utils.pbm.assign_default_storage_policy_to_datastore( profile_manager, policy_ref, ds_ref) From 4ff745d2c5d30805222d9d7981aa7302b94c4542 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 22 Sep 2017 15:15:47 -0400 Subject: [PATCH 269/348] Added python/pyvmomi compatibility check to salt.states.pbm + removed reference to Python 2.6 --- salt/states/pbm.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/salt/states/pbm.py b/salt/states/pbm.py index 775b716f446..00945fc65cf 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -97,20 +97,34 @@ PyVmomi can be installed via pip: from __future__ import absolute_import import logging import copy +import sys # Import Salt Libs from salt.exceptions import CommandExecutionError, ArgumentValueError from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff +# External libraries +try: + from pyVmomi import VmomiSupport + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + # Get Logging Started log = logging.getLogger(__name__) -# TODO change with vcenter -ALLOWED_PROXY_TYPES = ['esxcluster', 'vcenter'] -LOGIN_DETAILS = {} def __virtual__(): + if not HAS_PYVMOMI: + return False, 'State module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('State module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') return True From ac79f89ffa92bbb2c27c3a1418f6fb63b93838e0 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 04:59:00 -0400 Subject: [PATCH 270/348] Fixed utils.pbm unit tests --- tests/unit/utils/test_pbm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/utils/test_pbm.py b/tests/unit/utils/test_pbm.py index aec9a51da5c..6c2be0f9b58 100644 --- a/tests/unit/utils/test_pbm.py +++ b/tests/unit/utils/test_pbm.py @@ -214,7 +214,7 @@ class GetCapabilityDefinitionsTestCase(TestCase): def test_fetch_capabilities(self): salt.utils.pbm.get_capability_definitions(self.mock_prof_mgr) - self.mock_prof_mgr.FetchCapabilityMetadata.assert_callend_once_with( + self.mock_prof_mgr.FetchCapabilityMetadata.assert_called_once_with( self.mock_res_type) def test_fetch_capabilities_raises_no_permissions(self): @@ -268,7 +268,7 @@ class GetPoliciesByIdTestCase(TestCase): def test_retrieve_policies(self): salt.utils.pbm.get_policies_by_id(self.mock_prof_mgr, self.policy_ids) - self.mock_prof_mgr.RetrieveContent.assert_callend_once_with( + self.mock_prof_mgr.RetrieveContent.assert_called_once_with( self.policy_ids) def test_retrieve_policies_raises_no_permissions(self): From 553335b1c939df6b4cbe6871e8764178a9a50d5e Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 26 Sep 2017 07:52:59 -0500 Subject: [PATCH 271/348] Fix incorrect value in docstring --- salt/modules/win_wua.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py index 237fb749246..63409951e27 100644 --- a/salt/modules/win_wua.py +++ b/salt/modules/win_wua.py @@ -110,7 +110,7 @@ def available(software=True, Include software updates in the results (default is True) drivers (bool): - Include driver updates in the results (default is False) + Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. From 7b9c3726771e911c9590e2f399077c93e8870cd6 Mon Sep 17 00:00:00 2001 From: Eric Radman Date: Tue, 26 Sep 2017 09:38:41 -0400 Subject: [PATCH 272/348] Only inspect file attribute if lsattr(1) is installed lsattr/chattr is not installed on many Unix-like platforms by default, including *BSD, Solaris, and minimal Linux distributions such as Alpine. --- salt/modules/file.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 7dfd5ced011..f2ee22655a3 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -4281,7 +4281,8 @@ def extract_hash(hash_fn, def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False): ''' - Check the permissions on files, modify attributes and chown if needed + Check the permissions on files, modify attributes and chown if needed. File + attributes are only verified if lsattr(1) is installed. CLI Example: @@ -4293,6 +4294,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) ``follow_symlinks`` option added ''' name = os.path.expanduser(name) + lsattr_cmd = salt.utils.path.which('lsattr') if not ret: ret = {'name': name, @@ -4318,7 +4320,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) perms['lmode'] = salt.utils.normalize_mode(cur['mode']) is_dir = os.path.isdir(name) - if not salt.utils.platform.is_windows() and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd: # List attributes on file perms['lattrs'] = ''.join(lsattr(name)[name]) # Remove attributes on file so changes can be enforced. @@ -4429,7 +4431,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if __opts__['test'] is True and ret['changes']: ret['result'] = None - if not salt.utils.platform.is_windows() and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd: # Replace attributes on file if it had been removed if perms['lattrs']: chattr(name, operator='add', attributes=perms['lattrs']) From 617c5b72acf76c77f48c966d7d3ad07111f9abe9 Mon Sep 17 00:00:00 2001 From: Eric Radman Date: Tue, 26 Sep 2017 15:11:57 -0400 Subject: [PATCH 273/348] Fix DeprecationWarning for use of 'salt.utils.is_windows' --- salt/modules/kubernetes.py | 4 ++-- tests/unit/modules/test_hosts.py | 4 ++-- tests/unit/returners/test_local_cache.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 22575802703..36d7cc4df14 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -83,7 +83,7 @@ def __virtual__(): return False, 'python kubernetes library not found' -if not salt.utils.is_windows(): +if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): @@ -713,7 +713,7 @@ def delete_deployment(name, namespace='default', **kwargs): namespace=namespace, body=body) mutable_api_response = api_response.to_dict() - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: diff --git a/tests/unit/modules/test_hosts.py b/tests/unit/modules/test_hosts.py index 56f01f56ab2..7cd76994537 100644 --- a/tests/unit/modules/test_hosts.py +++ b/tests/unit/modules/test_hosts.py @@ -94,7 +94,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): Tests true if the alias is set ''' hosts_file = '/etc/hosts' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' with patch('salt.modules.hosts.__get_hosts_filename', @@ -198,7 +198,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): Tests if specified host entry gets added from the hosts file ''' hosts_file = '/etc/hosts' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' with patch('salt.utils.files.fopen', mock_open()), \ diff --git a/tests/unit/returners/test_local_cache.py b/tests/unit/returners/test_local_cache.py index 741957ffd87..aa7117efb5d 100644 --- a/tests/unit/returners/test_local_cache.py +++ b/tests/unit/returners/test_local_cache.py @@ -97,7 +97,7 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin): local_cache.clean_old_jobs() # Get the name of the JID directory that was created to test against - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): jid_dir_name = jid_dir.rpartition('\\')[2] else: jid_dir_name = jid_dir.rpartition('/')[2] From 8b16300495ce93686f8b835fc7f0db057a85d6bc Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 26 Sep 2017 15:24:58 -0500 Subject: [PATCH 274/348] Fix some regressions in recent legacy git_pillar deprecation These didn't get caught in PR 42823 because of how we invoke the git_pillar code. Firstly, the "pillar" argument needed to stay. This is because even though we're not using it, _external_pillar_data() is still passing it now that git_pillar is not specially invoked there. Secondly, since the input comes in as a list, and _external_pillar_data uses single-asterisk expansion, the repos are passed separately when they should be passed as a single list. To fix these issues, I've done the following: 1. Re-introduced the "pillar" argument in git_pillar's ext_pillar function. 2. Changed the "pillar" variable to avoid confusion with the (unused) "pillar" argument being passed in. 3. Instead of git_pillar accepting the repos as a list, the ext_pillar function now uses single-asterisk expansion to make it conform with how _external_pillar_data() invokes it. --- salt/pillar/git_pillar.py | 16 ++++++++-------- tests/support/gitfs.py | 3 ++- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 53e58be0ac1..1c0f7b700fd 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -374,20 +374,20 @@ def __virtual__(): return False -def ext_pillar(minion_id, repo): +def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument ''' Checkout the ext_pillar sources and compile the resulting pillar SLS ''' opts = copy.deepcopy(__opts__) opts['pillar_roots'] = {} opts['__git_pillar'] = True - pillar = salt.utils.gitfs.GitPillar(opts) - pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + git_pillar = salt.utils.gitfs.GitPillar(opts) + git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) if __opts__.get('__role') == 'minion': # If masterless, fetch the remotes. We'll need to remove this once # we make the minion daemon able to run standalone. - pillar.fetch_remotes() - pillar.checkout() + git_pillar.fetch_remotes() + git_pillar.checkout() ret = {} merge_strategy = __opts__.get( 'pillar_source_merging_strategy', @@ -397,7 +397,7 @@ def ext_pillar(minion_id, repo): 'pillar_merge_lists', False ) - for pillar_dir, env in six.iteritems(pillar.pillar_dirs): + for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs): # If pillarenv is set, only grab pillars with that match pillarenv if opts['pillarenv'] and env != opts['pillarenv']: log.debug( @@ -406,7 +406,7 @@ def ext_pillar(minion_id, repo): env, pillar_dir, opts['pillarenv'] ) continue - if pillar_dir in pillar.pillar_linked_dirs: + if pillar_dir in git_pillar.pillar_linked_dirs: log.debug( 'git_pillar is skipping processing on %s as it is a ' 'mounted repo', pillar_dir @@ -433,7 +433,7 @@ def ext_pillar(minion_id, repo): # list, so that its top file is sourced from the correct # location and not from another git_pillar remote. pillar_roots.extend( - [d for (d, e) in six.iteritems(pillar.pillar_dirs) + [d for (d, e) in six.iteritems(git_pillar.pillar_dirs) if env == e and d != pillar_dir] ) diff --git a/tests/support/gitfs.py b/tests/support/gitfs.py index 411bfd27ce1..72871476014 100644 --- a/tests/support/gitfs.py +++ b/tests/support/gitfs.py @@ -341,7 +341,8 @@ class GitPillarTestBase(GitTestBase, LoaderModuleMockMixin): with patch.dict(git_pillar.__opts__, ext_pillar_opts): return git_pillar.ext_pillar( 'minion', - ext_pillar_opts['ext_pillar'][0]['git'], + {}, + *ext_pillar_opts['ext_pillar'][0]['git'] ) def make_repo(self, root_dir, user='root'): From 26b23b37bcb0414dc3595edeed17e3bce8c5b58f Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 26 Sep 2017 15:51:22 -0600 Subject: [PATCH 275/348] Skip test if missing binaries --- tests/unit/modules/test_disk.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/modules/test_disk.py b/tests/unit/modules/test_disk.py index 1c5459a5306..7ff2fef60ec 100644 --- a/tests/unit/modules/test_disk.py +++ b/tests/unit/modules/test_disk.py @@ -152,6 +152,7 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(disk.__salt__, {'cmd.retcode': mock}): self.assertEqual(disk.format_(device), True) + @skipIf(not salt.utils.which('lsblk') and not salt.utils.which('df'), 'lsblk or df not found') def test_fstype(self): ''' unit tests for disk.fstype From 35505ac966a7956bf41627a31b3df53b8522ed19 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 26 Sep 2017 15:52:04 -0600 Subject: [PATCH 276/348] Honor 80 char limit --- tests/unit/modules/test_disk.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_disk.py b/tests/unit/modules/test_disk.py index 7ff2fef60ec..d5db2530713 100644 --- a/tests/unit/modules/test_disk.py +++ b/tests/unit/modules/test_disk.py @@ -152,7 +152,8 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(disk.__salt__, {'cmd.retcode': mock}): self.assertEqual(disk.format_(device), True) - @skipIf(not salt.utils.which('lsblk') and not salt.utils.which('df'), 'lsblk or df not found') + @skipIf(not salt.utils.which('lsblk') and not salt.utils.which('df'), + 'lsblk or df not found') def test_fstype(self): ''' unit tests for disk.fstype From 5c41268dd74171befb03b0f0343de8047d537ba3 Mon Sep 17 00:00:00 2001 From: Benjamin Schiborr Date: Tue, 26 Sep 2017 15:32:03 -0700 Subject: [PATCH 277/348] Fix return code of puppet module Fixes #43762. Successful puppet return codes are 0 and 2. When return code is 2 salt will fail. puppet.py intercepted that for the json return, however, the salt job will still fail, because it only parses the return code of the actual process. This commit changes the actual process to return 0 for 0 and 2. --- salt/modules/puppet.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/salt/modules/puppet.py b/salt/modules/puppet.py index 58b3963c8cd..0462152e037 100644 --- a/salt/modules/puppet.py +++ b/salt/modules/puppet.py @@ -68,9 +68,7 @@ class _Puppet(object): self.vardir = 'C:\\ProgramData\\PuppetLabs\\puppet\\var' self.rundir = 'C:\\ProgramData\\PuppetLabs\\puppet\\run' self.confdir = 'C:\\ProgramData\\PuppetLabs\\puppet\\etc' - self.useshell = True else: - self.useshell = False self.puppet_version = __salt__['cmd.run']('puppet --version') if 'Enterprise' in self.puppet_version: self.vardir = '/var/opt/lib/pe-puppet' @@ -106,7 +104,10 @@ class _Puppet(object): ' --{0} {1}'.format(k, v) for k, v in six.iteritems(self.kwargs)] ) - return '{0} {1}'.format(cmd, args) + # Ensure that the puppet call will return 0 in case of exit code 2 + if salt.utils.platform.is_windows(): + return 'cmd /V:ON /c {0} {1} ^& if !ERRORLEVEL! EQU 2 (EXIT 0) ELSE (EXIT /B)'.format(cmd, args) + return '({0} {1}) || test $? -eq 2'.format(cmd, args) def arguments(self, args=None): ''' @@ -169,12 +170,7 @@ def run(*args, **kwargs): puppet.kwargs.update(salt.utils.args.clean_kwargs(**kwargs)) - ret = __salt__['cmd.run_all'](repr(puppet), python_shell=puppet.useshell) - if ret['retcode'] in [0, 2]: - ret['retcode'] = 0 - else: - ret['retcode'] = 1 - + ret = __salt__['cmd.run_all'](repr(puppet), python_shell=True) return ret From 46203c630c8f06b2a8d151ec1fb498fb92b9437f Mon Sep 17 00:00:00 2001 From: assaf shapira Date: Wed, 27 Sep 2017 15:28:46 +0300 Subject: [PATCH 278/348] ignore_ssl returned to _get_session --- salt/cloud/clouds/xen.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/cloud/clouds/xen.py b/salt/cloud/clouds/xen.py index 7359796c202..959688ac847 100644 --- a/salt/cloud/clouds/xen.py +++ b/salt/cloud/clouds/xen.py @@ -151,8 +151,15 @@ def _get_session(): __opts__, search_global=False ) + ignore_ssl = config.get_cloud_config_value( + 'ignore_ssl', + get_configured_provider(), + __opts__, + default=False, + search_global=False + ) try: - session = XenAPI.Session(url) + session = XenAPI.Session(url, ignore_ssl=ignore_ssl) log.debug('url: {} user: {} password: {}, originator: {}'.format( url, user, From 5e4b122b56418e66f2feeedc4cfef777dbdec1c9 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Wed, 27 Sep 2017 06:24:52 -0700 Subject: [PATCH 279/348] Fix ident issue to ensure code block ends correctly --- salt/modules/purefa.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index aeb4104ee7d..8bcf06fbe81 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -30,7 +30,7 @@ Installation Prerequisites - Configure Pure Storage FlashArray authentication. Use one of the following three methods. - 1) From the minion config + 1) From the minion config .. code-block:: yaml pure_tags: @@ -38,8 +38,8 @@ Installation Prerequisites san_ip: management vip or hostname for the FlashArray api_token: A valid api token for the FlashArray being managed - 2) From environment (PUREFA_IP and PUREFA_API) - 3) From the pillar (PUREFA_IP and PUREFA_API) + 2) From environment (PUREFA_IP and PUREFA_API) + 3) From the pillar (PUREFA_IP and PUREFA_API) :maintainer: Simon Dodsley (simon@purestorage.com) :maturity: new From 1de6791069552f80812dc4cab4c0ded0762030d3 Mon Sep 17 00:00:00 2001 From: Kees Bos Date: Thu, 21 Sep 2017 08:43:48 +0200 Subject: [PATCH 280/348] Fix git-pillar ext_pillar for __env__ usage The env must be mapped from '__env__' before validation of the env is done. Otherwise it will (naturally) fail, since __env__ in itself will never be a valid branch name. --- salt/pillar/git_pillar.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 1c0f7b700fd..12bab065d81 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -398,6 +398,13 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument False ) for pillar_dir, env in six.iteritems(git_pillar.pillar_dirs): + # Map env if env == '__env__' before checking the env value + if env == '__env__': + env = opts.get('pillarenv') \ + or opts.get('environment') \ + or opts.get('git_pillar_base') + log.debug('__env__ maps to %s', env) + # If pillarenv is set, only grab pillars with that match pillarenv if opts['pillarenv'] and env != opts['pillarenv']: log.debug( @@ -418,12 +425,6 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument 'env \'%s\'', pillar_dir, env ) - if env == '__env__': - env = opts.get('pillarenv') \ - or opts.get('environment') \ - or opts.get('git_pillar_base') - log.debug('__env__ maps to %s', env) - pillar_roots = [pillar_dir] if __opts__['git_pillar_includes']: From 5c3109ff071a7e1b18680a9217b0539d2c9ae4e1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 27 Sep 2017 12:55:46 -0400 Subject: [PATCH 281/348] Removed commented imports --- salt/utils/vmware.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 018bb104175..b0552996e32 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- ''' -import sys -import ssl Connection library for VMware .. versionadded:: 2015.8.2 From 19acf9b1496828fc4f5860cd090c1fa233a9d7b7 Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Thu, 28 Sep 2017 09:12:24 +0200 Subject: [PATCH 282/348] Properly merge pillar data obtained from multiple nodegroups for cases where the minion belongs to more than one Fixes #43788 --- salt/pillar/file_tree.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index 323958e2f91..2af4560c49e 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -343,14 +343,15 @@ def ext_pillar(minion_id, if minion_id in match: ngroup_dir = os.path.join( nodegroups_dir, str(nodegroup)) - ngroup_pillar.update( + ngroup_pillar = salt.utils.dictupdate.merge(ngroup_pillar, _construct_pillar(ngroup_dir, follow_dir_links, keep_newline, render_default, renderer_blacklist, renderer_whitelist, - template) + template), + strategy='recurse' ) else: if debug is True: From 6bd5c236459363afbf41de4b46d2eecb38fdd82e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 16:42:25 -0400 Subject: [PATCH 283/348] Added sspi mechanism support and __pillar__ and config merging to salt.proxy.esxi --- salt/proxy/esxi.py | 172 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 133 insertions(+), 39 deletions(-) diff --git a/salt/proxy/esxi.py b/salt/proxy/esxi.py index 4edd50ac31d..f358a710da0 100644 --- a/salt/proxy/esxi.py +++ b/salt/proxy/esxi.py @@ -273,13 +273,22 @@ for standing up an ESXi host from scratch. # Import Python Libs from __future__ import absolute_import import logging +import os # Import Salt Libs from salt.exceptions import SaltSystemExit +from salt.config.schemas.esxi import EsxiProxySchema +from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['esxi'] +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False # Variables are scoped to this module so we can have persistent data # across calls to fns in here. @@ -288,53 +297,122 @@ DETAILS = {} # Set up logging log = logging.getLogger(__file__) - # Define the module's virtual name __virtualname__ = 'esxi' - def __virtual__(): ''' Only load if the ESXi execution module is available. ''' - if 'vsphere.system_info' in __salt__: + if HAS_JSONSCHEMA: return __virtualname__ return False, 'The ESXi Proxy Minion module did not load.' - def init(opts): ''' This function gets called when the proxy starts up. For ESXi devices, the host, login credentials, and, if configured, the protocol and port are cached. ''' - if 'host' not in opts['proxy']: - log.critical('No \'host\' key found in pillar for this proxy.') - return False - if 'username' not in opts['proxy']: - log.critical('No \'username\' key found in pillar for this proxy.') - return False - if 'passwords' not in opts['proxy']: - log.critical('No \'passwords\' key found in pillar for this proxy.') - return False - - host = opts['proxy']['host'] - - # Get the correct login details + log.debug('Initting esxi proxy module in process \'{}\'' + ''.format(os.getpid())) + log.debug('Validating esxi proxy input') + schema = EsxiProxySchema.serialize() + log.trace('esxi_proxy_schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) try: - username, password = find_credentials(host) - except SaltSystemExit as err: - log.critical('Error: {0}'.format(err)) - return False + jsonschema.validate(proxy_conf, schema) + except jsonschema.exceptions.ValidationError as exc: + raise excs.InvalidProxyInputError(exc) - # Set configuration details - DETAILS['host'] = host - DETAILS['username'] = username - DETAILS['password'] = password - DETAILS['protocol'] = opts['proxy'].get('protocol', 'https') - DETAILS['port'] = opts['proxy'].get('port', '443') - DETAILS['credstore'] = opts['proxy'].get('credstore') + DETAILS['proxytype'] = proxy_conf['proxytype'] + if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf): + log.critical('Neither \'host\' nor \'vcenter\' keys found in pillar ' + 'for this proxy.') + return False + if 'host' in proxy_conf: + # We have started the proxy by connecting directly to the host + if 'username' not in proxy_conf: + log.critical('No \'username\' key found in pillar for this proxy.') + return False + if 'passwords' not in proxy_conf: + log.critical('No \'passwords\' key found in pillar for this proxy.') + return False + host = proxy_conf['host'] + + # Get the correct login details + try: + username, password = find_credentials(host) + except excs.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + + # Set configuration details + DETAILS['host'] = host + DETAILS['username'] = username + DETAILS['password'] = password + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') + return True + + if 'vcenter' in proxy_conf: + vcenter = proxy_conf['vcenter'] + if not proxy_conf.get('esxi_host'): + log.critical('No \'esxi_host\' key found in pillar for this proxy.') + DETAILS['esxi_host'] = proxy_conf['esxi_host'] + # We have started the proxy by connecting via the vCenter + if 'mechanism' not in proxy_conf: + log.critical('No \'mechanism\' key found in pillar for this proxy.') + return False + mechanism = proxy_conf['mechanism'] + # Save mandatory fields in cache + for key in ('vcenter', 'mechanism'): + DETAILS[key] = proxy_conf[key] + + if mechanism == 'userpass': + if 'username' not in proxy_conf: + log.critical('No \'username\' key found in pillar for this ' + 'proxy.') + return False + if not 'passwords' in proxy_conf and \ + len(proxy_conf['passwords']) > 0: + + log.critical('Mechanism is set to \'userpass\' , but no ' + '\'passwords\' key found in pillar for this ' + 'proxy.') + return False + for key in ('username', 'passwords'): + DETAILS[key] = proxy_conf[key] + elif mechanism == 'sspi': + if not 'domain' in proxy_conf: + log.critical('Mechanism is set to \'sspi\' , but no ' + '\'domain\' key found in pillar for this proxy.') + return False + if not 'principal' in proxy_conf: + log.critical('Mechanism is set to \'sspi\' , but no ' + '\'principal\' key found in pillar for this ' + 'proxy.') + return False + for key in ('domain', 'principal'): + DETAILS[key] = proxy_conf[key] + + if mechanism == 'userpass': + # Get the correct login details + log.debug('Retrieving credentials and testing vCenter connection' + ' for mehchanism \'userpass\'') + try: + username, password = find_credentials() + DETAILS['password'] = password + except excs.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + + # Save optional + DETAILS['protocol'] = proxy_conf.get('protocol', 'https') + DETAILS['port'] = proxy_conf.get('port', '443') + DETAILS['credstore'] = proxy_conf.get('credstore') def grains(): @@ -358,8 +436,9 @@ def grains_refresh(): def ping(): ''' - Check to see if the host is responding. Returns False if the host didn't - respond, True otherwise. + Returns True if connection is to be done via a vCenter (no connection is attempted). + Check to see if the host is responding when connecting directly via an ESXi + host. CLI Example: @@ -367,15 +446,19 @@ def ping(): salt esxi-host test.ping ''' - # find_credentials(DETAILS['host']) - try: - __salt__['vsphere.system_info'](host=DETAILS['host'], - username=DETAILS['username'], - password=DETAILS['password']) - except SaltSystemExit as err: - log.warning(err) - return False - + if DETAILS.get('esxi_host'): + return True + else: + # TODO Check connection if mechanism is SSPI + if DETAILS['mechanism'] == 'userpass': + find_credentials(DETAILS['host']) + try: + __salt__['vsphere.system_info'](host=DETAILS['host'], + username=DETAILS['username'], + password=DETAILS['password']) + except excs.SaltSystemExit as err: + log.warning(err) + return False return True @@ -461,3 +544,14 @@ def _grains(host, protocol=None, port=None): port=port) GRAINS_CACHE.update(ret) return GRAINS_CACHE + + +def is_connected_via_vcenter(): + return True if 'vcenter' in DETAILS else False + + +def get_details(): + ''' + Return the proxy details + ''' + return DETAILS From 3369a3def79e41f0abcf84ba387ae319fdc279cb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 16:48:15 -0400 Subject: [PATCH 284/348] Added the EsxiProxySchema JSON schema --- salt/config/schemas/esxi.py | 44 +++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 salt/config/schemas/esxi.py diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py new file mode 100644 index 00000000000..affd14be593 --- /dev/null +++ b/salt/config/schemas/esxi.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + + + salt.config.schemas.esxi + ~~~~~~~~~~~~~~~~~~~~~~~~ + + ESXi host configuration schemas +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +from salt.utils.schema import (Schema, + ArrayItem, + IntegerItem, + StringItem) + + +class EsxiProxySchema(Schema): + ''' + Schema of the esxi proxy input + ''' + + title = 'Esxi Proxy Schema' + description = 'Esxi proxy schema' + additional_properties = False + proxytype = StringItem(required=True, + enum=['esxi']) + host = StringItem(pattern=r'[^\s]+') # Used when connecting directly + vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter + esxi_host = StringItem() + username = StringItem() + passwords = ArrayItem(min_items=1, + items=StringItem(), + unique_items=True) + mechanism = StringItem(enum=['userpass', 'sspi']) + # TODO Should be changed when anyOf is supported for schemas + domain = StringItem() + principal = StringItem() + protocol = StringItem() + port = IntegerItem(minimum=1) From 434d88b9a4cf5053b2f8ecdf819eba911d7e025d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 16:52:03 -0400 Subject: [PATCH 285/348] Added salt.modules.esxi.get_details that returns the proxy details --- salt/modules/esxi.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/modules/esxi.py b/salt/modules/esxi.py index a4c1f8ddcc4..ee1f981022e 100644 --- a/salt/modules/esxi.py +++ b/salt/modules/esxi.py @@ -56,3 +56,7 @@ def cmd(command, *args, **kwargs): proxy_cmd = proxy_prefix + '.ch_config' return __proxy__[proxy_cmd](command, *args, **kwargs) + + +def get_details(): + return __proxy__['esxi.get_details']() From 5c795129048a5400cecba82253eddcb05d921863 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 17:06:36 -0400 Subject: [PATCH 286/348] Added salt.modules.vsphere._get_esxi_proxy_details --- salt/modules/vsphere.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 0c923858042..87088cfb3bb 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6495,3 +6495,19 @@ def _get_esxcluster_proxy_details(): det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter'), \ det.get('cluster') + + +def _get_esxi_proxy_details(): + ''' + Returns the running esxi's proxy details + ''' + det = __proxy__['esxi.get_details']() + host = det.get('host') + if det.get('vcenter'): + host = det['vcenter'] + esxi_hosts = None + if det.get('esxi_host'): + esxi_hosts = [det['esxi_host']] + return host, det.get('username'), det.get('password'), \ + det.get('protocol'), det.get('port'), det.get('mechanism'), \ + det.get('principal'), det.get('domain'), esxi_hosts From ffbab2ce896d29103804aac624d87df49db325b9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 07:56:27 -0400 Subject: [PATCH 287/348] Added esxi proxy support and retrieval of esxi reference in salt.modules.vsphere._get_proxy_target --- salt/modules/vsphere.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 87088cfb3bb..efde0b6d07f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6441,7 +6441,7 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, @depends(HAS_PYVMOMI) -@supports_proxies('esxcluster', 'esxdatacenter', 'vcenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') def _get_proxy_target(service_instance): ''' Returns the target object of a proxy. @@ -6472,6 +6472,18 @@ def _get_proxy_target(service_instance): elif proxy_type == 'vcenter': # vcenter proxy - the target is the root folder reference = salt.utils.vmware.get_root_folder(service_instance) + elif proxy_type == 'esxi': + # esxi proxy + details = __proxy__['esxi.get_details']() + if 'vcenter' not in details: + raise InvalidEntityError('Proxies connected directly to ESXi ' + 'hosts are not supported') + references = salt.utils.vmware.get_hosts( + service_instance, host_names=details['esxi_host']) + if not references: + raise VMwareObjectRetrievalError( + 'ESXi host \'{0}\' was not found'.format(details['esxi_host'])) + reference = references[0] log.trace('reference = {0}'.format(reference)) return reference From 230c17e7043f99c5946dc2bb7b7ce5f6e56f90dd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 23 Sep 2017 07:33:57 -0400 Subject: [PATCH 288/348] Added salt.utils.vsan.get_vsan_disk_management_system --- salt/utils/vsan.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 8ad713cd3e2..a411d4ec972 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -129,6 +129,30 @@ def get_vsan_cluster_config_system(service_instance): return vc_mos['vsan-cluster-config-system'] +def get_vsan_disk_management_system(service_instance): + ''' + Returns a vim.VimClusterVsanVcDiskManagementSystem object + + service_instance + Service instance to the host or vCenter + ''' + + #TODO Replace when better connection mechanism is available + + #For python 2.7.9 and later, the defaul SSL conext has more strict + #connection handshaking rule. We may need turn of the hostname checking + #and client side cert verification + context = None + if sys.version_info[:3] > (2, 7, 8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + stub = service_instance._stub + vc_mos = vsanapiutils.GetVsanVcMos(stub, context=context) + return vc_mos['vsan-disk-management-system'] + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From 67afc2f84110cc14ab71cc71ac6fb37be2da4fb2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 23 Sep 2017 07:36:10 -0400 Subject: [PATCH 289/348] Added salt.utils.vsan.get_host_vsan_system --- salt/utils/vsan.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index a411d4ec972..c79b95155a2 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -153,6 +153,35 @@ def get_vsan_disk_management_system(service_instance): return vc_mos['vsan-disk-management-system'] +def get_host_vsan_system(service_instance, host_ref, hostname=None): + ''' + Returns a host's vsan system + + service_instance + Service instance to the host or vCenter + + host_ref + Refernce to ESXi host + + hostname + Name of ESXi host. Default value is None. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.vsanSystem', + type=vim.HostSystem, + skip=False) + objs = salt.utils.vmware.get_mors_with_properties( + service_instance, vim.HostVsanSystem, property_list=['config.enabled'], + container_ref=host_ref, traversal_spec=traversal_spec) + if not objs: + raise VMwareObjectRetrievalError('Host\'s \'{0}\' VSAN system was ' + 'not retrieved'.format(hostname)) + log.trace('[{0}] Retrieved VSAN system'.format(hostname)) + return objs[0]['object'] + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From 6a31c437dfaf681c170cffbf8f972dad6881d0a6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 23 Sep 2017 07:37:45 -0400 Subject: [PATCH 290/348] Added salt.utils.vsan.create_diskgroup --- salt/utils/vsan.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index c79b95155a2..20b6f954d43 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -182,6 +182,63 @@ def get_host_vsan_system(service_instance, host_ref, hostname=None): return objs[0]['object'] +def create_diskgroup(service_instance, vsan_disk_mgmt_system, + host_ref, cache_disk, capacity_disks): + ''' + Creates a disk group + + service_instance + Service instance to the host or vCenter + + vsan_disk_mgmt_system + vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk + management system retrieved from the vsan endpoint. + + host_ref + vim.HostSystem object representing the target host the disk group will + be created on + + cache_disk + The vim.HostScsidisk to be used as a cache disk. It must be an ssd disk. + + capacity_disks + List of vim.HostScsiDisk objects representing of disks to be used as + capacity disks. Can be either ssd or non-ssd. There must be a minimum + of 1 capacity disk in the list. + ''' + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk_id = cache_disk.canonicalName + log.debug('Creating a new disk group with cache disk \'{0}\' on host ' + '\'{1}\''.format(cache_disk_id, hostname)) + log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in + capacity_disks])) + spec = vim.VimVsanHostDiskMappingCreationSpec() + spec.cacheDisks = [cache_disk] + spec.capacityDisks = capacity_disks + # All capacity disks must be either ssd or non-ssd (mixed disks are not + # supported) + spec.creationType = 'allFlash' if getattr(capacity_disks[0], 'ssd') \ + else 'hybrid' + spec.host = host_ref + try: + task = vsan_disk_mgmt_system.InitializeDiskMappings(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.fault.MethodNotFound as exc: + log.exception(exc) + raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method)) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + _wait_for_tasks([task], service_instance) + return True + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From d637b074b9a2b1f9f1cac0c0edd43e00845ca9ef Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:03:41 -0400 Subject: [PATCH 291/348] Added salt.utils.vsan.add_capacity_to_diskgroup --- salt/utils/vsan.py | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 20b6f954d43..a9dfe3775ca 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -239,6 +239,65 @@ def create_diskgroup(service_instance, vsan_disk_mgmt_system, return True +def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system, + host_ref, diskgroup, new_capacity_disks): + ''' + Adds capacity disk(s) to a disk group. + + service_instance + Service instance to the host or vCenter + + vsan_disk_mgmt_system + vim.VimClusterVsanVcDiskManagemenetSystem representing the vSan disk + management system retrieved from the vsan endpoint. + + host_ref + vim.HostSystem object representing the target host the disk group will + be created on + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup where + the additional capacity needs to be added + + new_capacity_disks + List of vim.HostScsiDisk objects representing the disks to be added as + capacity disks. Can be either ssd or non-ssd. There must be a minimum + of 1 new capacity disk in the list. + ''' + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk = diskgroup.ssd + cache_disk_id = cache_disk.canonicalName + log.debug('Adding capacity to disk group with cache disk \'{0}\' on host ' + '\'{1}\''.format(cache_disk_id, hostname)) + log.trace('new_capacity_disk_ids = {0}'.format([c.canonicalName for c in + new_capacity_disks])) + spec = vim.VimVsanHostDiskMappingCreationSpec() + spec.cacheDisks = [cache_disk] + spec.capacityDisks = new_capacity_disks + # All new capacity disks must be either ssd or non-ssd (mixed disks are not + # supported); also they need to match the type of the existing capacity + # disks; we assume disks are already validated + spec.creationType = 'allFlash' if getattr(new_capacity_disks[0], 'ssd') \ + else 'hybrid' + spec.host = host_ref + try: + task = vsan_disk_mgmt_system.InitializeDiskMappings(spec) + except fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.fault.MethodNotFound as exc: + log.exception(exc) + raise VMwareRuntimeError('Method \'{0}\' not found'.format(exc.method)) + except vmodl.RuntimeFault as exc: + raise VMwareRuntimeError(exc.msg) + _wait_for_tasks([task], service_instance) + return True + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From d8a2724c428885248c6b4c4a10a0a929599bfa06 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:17:50 -0400 Subject: [PATCH 292/348] Added salt.utils.vsan.remove_capacity_from_diskgroup --- salt/utils/vsan.py | 72 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index a9dfe3775ca..5e6093dfff0 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -298,6 +298,78 @@ def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system, return True +def remove_capacity_from_diskgroup(service_instance, host_ref, diskgroup, + capacity_disks, data_evacuation=True, + hostname=None, + host_vsan_system=None): + ''' + Removes capacity disk(s) from a disk group. + + service_instance + Service instance to the host or vCenter + + host_vsan_system + ESXi host's VSAN system + + host_ref + Reference to the ESXi host + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup from + where the capacity needs to be removed + + capacity_disks + List of vim.HostScsiDisk objects representing the capacity disks to be + removed. Can be either ssd or non-ssd. There must be a minimum + of 1 capacity disk in the list. + + data_evacuation + Specifies whether to gracefully evacuate the data on the capacity disks + before removing them from the disk group. Default value is True. + + hostname + Name of ESXi host. Default value is None. + + host_vsan_system + ESXi host's VSAN system. Default value is None. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk = diskgroup.ssd + cache_disk_id = cache_disk.canonicalName + log.debug('Removing capacity from disk group with cache disk \'{0}\' on ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('capacity_disk_ids = {0}'.format([c.canonicalName for c in + capacity_disks])) + if not host_vsan_system: + host_vsan_system = get_host_vsan_system(service_instance, + host_ref, hostname) + # Set to evacuate all data before removing the disks + maint_spec = vim.HostMaintenanceSpec() + maint_spec.vsanMode = vim.VsanHostDecommissionMode() + if data_evacuation: + maint_spec.vsanMode.objectAction = \ + vim.VsanHostDecommissionModeObjectAction.evacuateAllData + else: + maint_spec.vsanMode.objectAction = \ + vim.VsanHostDecommissionModeObjectAction.noAction + try: + task = host_vsan_system.RemoveDisk_Task(disk=capacity_disks, + maintenanceSpec=maint_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + salt.utils.vmware.wait_for_task(task, hostname, 'remove_capacity') + return True + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From a8406fb3b2f8695f7c4f638753bf125d9161f89f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 23 Sep 2017 07:42:09 -0400 Subject: [PATCH 293/348] Added salt.utils.vsan.remove_diskgroup --- salt/utils/vsan.py | 61 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 5e6093dfff0..b2ec11f80d9 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -370,6 +370,67 @@ def remove_capacity_from_diskgroup(service_instance, host_ref, diskgroup, return True +def remove_diskgroup(service_instance, host_ref, diskgroup, hostname=None, + host_vsan_system=None, erase_disk_partitions=False, + data_accessibility=True): + ''' + Removes a disk group. + + service_instance + Service instance to the host or vCenter + + host_ref + Reference to the ESXi host + + diskgroup + The vsan.HostDiskMapping object representing the host's diskgroup from + where the capacity needs to be removed + + hostname + Name of ESXi host. Default value is None. + + host_vsan_system + ESXi host's VSAN system. Default value is None. + + data_accessibility + Specifies whether to ensure data accessibility. Default value is True. + ''' + if not hostname: + hostname = salt.utils.vmware.get_managed_object_name(host_ref) + cache_disk_id = diskgroup.ssd.canonicalName + log.debug('Removing disk group with cache disk \'{0}\' on ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + if not host_vsan_system: + host_vsan_system = get_host_vsan_system( + service_instance, host_ref, hostname) + # Set to evacuate all data before removing the disks + maint_spec = vim.HostMaintenanceSpec() + maint_spec.vsanMode = vim.VsanHostDecommissionMode() + object_action = vim.VsanHostDecommissionModeObjectAction + if data_accessibility: + maint_spec.vsanMode.objectAction = \ + object_action.ensureObjectAccessibility + else: + maint_spec.vsanMode.objectAction = object_action.noAction + try: + task = host_vsan_system.RemoveDiskMapping_Task( + mapping=[diskgroup], maintenanceSpec=maint_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + salt.utils.vmware.wait_for_task(task, hostname, 'remove_diskgroup') + log.debug('Removed disk group with cache disk \'{0}\' ' + 'on host \'{1}\''.format(cache_disk_id, hostname)) + return True + + def get_cluster_vsan_info(cluster_ref): ''' Returns the extended cluster vsan configuration object From 273afc10159866676492a343c004433d5f4a2f37 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:51:13 -0400 Subject: [PATCH 294/348] Added salt.exceptions.VMwareObjectNotFoundError --- salt/exceptions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/exceptions.py b/salt/exceptions.py index 1a253dff046..db93362c0f8 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -442,6 +442,12 @@ class VMwareObjectRetrievalError(VMwareSaltError): ''' +class VMwareObjectNotFoundError(VMwareSaltError): + ''' + Used when a VMware object was not found + ''' + + class VMwareApiError(VMwareSaltError): ''' Used when representing a generic VMware API error From 4d6eb4197a99625bf3d917d31dcfc0923acba82d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:46:58 -0400 Subject: [PATCH 295/348] Added salt.utils.vmware._get_partition_info --- salt/utils/vmware.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index b0552996e32..d3d6b33649b 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2048,6 +2048,30 @@ def get_storage_system(service_instance, host_ref, hostname=None): return objs[0]['object'] +def _get_partition_info(storage_system, device_path): + ''' + Returns partition informations for a device path, of type + vim.HostDiskPartitionInfo + ''' + try: + partition_infos = \ + storage_system.RetrieveDiskPartitionInfo( + devicePath=[device_path]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('partition_info = {0}'.format(partition_infos[0])) + return partition_infos[0] + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From cd4d2963d4cd6cf9ced57981b8297f500e6960bc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:47:59 -0400 Subject: [PATCH 296/348] Added salt.utils.vmware._get_new_computed_partition_spec --- salt/utils/vmware.py | 63 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index d3d6b33649b..3e22ec3a1f1 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2072,6 +2072,69 @@ def _get_partition_info(storage_system, device_path): return partition_infos[0] +def _get_new_computed_partition_spec(hostname, storage_system, device_path, + partition_info): + ''' + Computes the new disk partition info when adding a new vmfs partition that + uses up the remainder of the disk; returns a tuple + (new_partition_number, vim.HostDiskPartitionSpec + ''' + log.trace('Adding a partition at the end of the disk and getting the new ' + 'computed partition spec') + #TODO implement support for multiple partitions + # We support adding a partition add the end of the disk with partitions + free_partitions = [p for p in partition_info.layout.partition + if p.type == 'none'] + if not free_partitions: + raise salt.exceptions.VMwareObjectNotFoundError( + 'Free partition was not found on device \'{0}\'' + ''.format(partition_info.deviceName)) + free_partition = free_partitions[0] + + # Create a layout object that copies the existing one + layout = vim.HostDiskPartitionLayout( + total=partition_info.layout.total, + partition=partition_info.layout.partition) + # Create a partition with the free space on the disk + # Change the free partition type to vmfs + free_partition.type = 'vmfs' + try: + computed_partition_info = storage_system.ComputeDiskPartitionInfo( + devicePath=device_path, + partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt, + layout=layout) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('computed partition info = {0}' + ''.format(computed_partition_info)) + log.trace('Retrieving new partition number') + partition_numbers = [p.partition for p in + computed_partition_info.layout.partition + if (p.start.block == free_partition.start.block or + # XXX If the entire disk is free (i.e. the free + # disk partition starts at block 0) the newily + # created partition is created from block 1 + (free_partition.start.block == 0 and + p.start.block == 1)) and + p.end.block == free_partition.end.block and + p.type == 'vmfs'] + if not partition_numbers: + raise salt.exceptions.VMwareNotFoundError( + 'New partition was not found in computed partitions of device ' + '\'{0}\''.format(partition_info.deviceName)) + log.trace('new partition number = {0}'.format(partition_numbers[0])) + return (partition_numbers[0], computed_partition_info.spec) + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From 8e1eb19e4beb7104cd42f5d210f89e37ce2a5d06 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:48:57 -0400 Subject: [PATCH 297/348] Added salt.utils.vmware.create_vmfs_datastore --- salt/utils/vmware.py | 61 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 3e22ec3a1f1..23918857b13 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2135,6 +2135,67 @@ def _get_new_computed_partition_spec(hostname, storage_system, device_path, return (partition_numbers[0], computed_partition_info.spec) +def create_vmfs_datastore(host_ref, datastore_name, disk_ref, + vmfs_major_version, storage_system=None): + ''' + Creates a VMFS datastore from a disk_id + + host_ref + vim.HostSystem object referencing a host to create the datastore on + + datastore_name + Name of the datastore + + disk_ref + vim.HostScsiDislk on which the datastore is created + + vmfs_major_version + VMFS major version to use + ''' + # TODO Support variable sized partitions + hostname = get_managed_object_name(host_ref) + disk_id = disk_ref.canonicalName + log.debug('Creating datastore \'{0}\' on host \'{1}\', scsi disk \'{2}\', ' + 'vmfs v{3}'.format(datastore_name, hostname, disk_id, + vmfs_major_version)) + if not storage_system: + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + + target_disk = disk_ref + partition_info = _get_partition_info(storage_system, + target_disk.devicePath) + log.trace('partition_info = {0}'.format(partition_info)) + new_partition_number, partition_spec = _get_new_computed_partition_spec( + hostname, storage_system, target_disk.devicePath, partition_info) + spec = vim.VmfsDatastoreCreateSpec( + vmfs=vim.HostVmfsSpec( + majorVersion=vmfs_major_version, + volumeName=datastore_name, + extent=vim.HostScsiDiskPartition( + diskName=disk_id, + partition=new_partition_number)), + diskUuid=target_disk.uuid, + partition=partition_spec) + try: + ds_ref = \ + host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.debug('Created datastore \'{0}\' on host ' + '\'{1}\''.format(datastore_name, hostname)) + return ds_ref + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From 9831a5df77abfe1ec7f4338a12ff10bd5950a7fa Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:49:56 -0400 Subject: [PATCH 298/348] Added salt.utils.vmware.get_host_datastore_system --- salt/utils/vmware.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 23918857b13..70a1062040e 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2196,6 +2196,37 @@ def create_vmfs_datastore(host_ref, datastore_name, disk_ref, return ds_ref +def get_host_datastore_system(host_ref, hostname=None): + ''' + Returns a host's datastore system + + host_ref + Reference to the ESXi host + + hostname + Name of the host. This argument is optional. + ''' + + if not hostname: + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.datastoreSystem', + type=vim.HostSystem, + skip=False) + objs = get_mors_with_properties(service_instance, + vim.HostDatastoreSystem, + property_list=['datastore'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not objs: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' datastore system was not retrieved' + ''.format(hostname)) + log.trace('[{0}] Retrieved datastore system'.format(hostname)) + return objs[0]['object'] + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From a34cf1215b84cb31dd38f21c60e2ed680fae9d66 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 09:50:15 -0400 Subject: [PATCH 299/348] Added salt.utils.vmware.remove_datastore --- salt/utils/vmware.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 70a1062040e..518be5ccfb2 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2227,6 +2227,45 @@ def get_host_datastore_system(host_ref, hostname=None): return objs[0]['object'] +def remove_datastore(service_instance, datastore_ref): + ''' + Creates a VMFS datastore from a disk_id + + service_instance + The Service Instance Object containing the datastore + + datastore_ref + The reference to the datastore to remove + ''' + ds_props = get_properties_of_managed_object( + datastore_ref, ['host', 'info', 'name']) + ds_name = ds_props['name'] + log.debug('Removing datastore \'{}\''.format(ds_name)) + ds_info = ds_props['info'] + ds_hosts = ds_props.get('host') + if not ds_hosts: + raise salt.exceptions.VMwareApiError( + 'Datastore \'{0}\' can\'t be removed. No ' + 'attached hosts found'.format(ds_name)) + hostname = get_managed_object_name(ds_hosts[0].key) + host_ds_system = get_host_datastore_system(ds_hosts[0].key, + hostname=hostname) + try: + host_ds_system.RemoveDatastore(datastore_ref) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('[{0}] Removed datastore \'{1}\''.format(hostname, ds_name)) + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From 783a75a57c9a8ac864c1157f4abb730f08e561fb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 09:22:56 -0400 Subject: [PATCH 300/348] Improved logic to filter hosts based on parent in salt.utils.vmware.get_hosts --- salt/utils/vmware.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 518be5ccfb2..d5798674374 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2292,39 +2292,42 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None, properties = ['name'] if not host_names: host_names = [] - if cluster_name: - properties.append('parent') - if datacenter_name: - start_point = get_datacenter(service_instance, datacenter_name) - if cluster_name: - # Retrieval to test if cluster exists. Cluster existence only makes - # sense if the cluster has been specified - cluster = get_cluster(start_point, cluster_name) - else: + if get_all_hosts or not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) + else: + if cluster_name: + properties.append('parent') + if datacenter_name: + start_point = get_datacenter(service_instance, datacenter_name) + if cluster_name: + # Retrieval to test if cluster exists. Cluster existence only makes + # sense if the cluster has been specified + cluster = get_cluster(start_point, cluster_name) # Search for the objects hosts = get_mors_with_properties(service_instance, vim.HostSystem, container_ref=start_point, property_list=properties) + log.trace('Retrieved hosts: {0}'.format(h['name'] for h in hosts)) filtered_hosts = [] for h in hosts: # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) - name_condition = get_all_hosts or (h['name'] in host_names) - # the datacenter_name needs to be set in order for the cluster - # condition membership to be checked, otherwise the condition is - # ignored - cluster_condition = \ - (not datacenter_name or not cluster_name or - (isinstance(h['parent'], vim.ClusterComputeResource) and - h['parent'].name == cluster_name)) - if name_condition and cluster_condition: + if get_all_hosts: filtered_hosts.append(h['object']) + continue + if cluster_name: + if not isinstance(h['parent'], vim.ClusterComputeResource): + continue + parent_name = get_managed_object_name(h['parent']) + if parent_name != cluster_name: + continue + if h['name'] in host_names: + filtered_hosts.append(h['object']) return filtered_hosts From 27cd7cf8e72c8cdfdafaaa847192ef8a1b7abf2b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 09:30:09 -0400 Subject: [PATCH 301/348] Added salt.utils.vmware._get_scsi_address_to_lun_key_map --- salt/utils/vmware.py | 64 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index d5798674374..18d18859be9 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2331,6 +2331,70 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None, return filtered_hosts +def _get_scsi_address_to_lun_key_map(service_instance, + host_ref, + storage_system=None, + hostname=None): + ''' + Returns a map between the scsi addresses and the keys of all luns on an ESXi + host. + map[] = + + service_instance + The Service Instance Object from which to obtain the hosts + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. Default is None. + ''' + map = {} + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + try: + device_info = storage_system.storageDeviceInfo + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not device_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage device ' + 'info was not retrieved'.format(hostname)) + multipath_info = device_info.multipathInfo + if not multipath_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' multipath info was not retrieved' + ''.format(hostname)) + if multipath_info.lun is None: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No luns were retrieved from host \'{0}\''.format(hostname)) + lun_key_by_scsi_addr = {} + for l in multipath_info.lun: + # The vmware scsi_address may have multiple comma separated values + # The first one is the actual scsi address + lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun + for p in l.path}) + log.trace('Scsi address to lun id map on host \'{0}\': ' + '{1}'.format(hostname, lun_key_by_scsi_addr)) + return lun_key_by_scsi_addr + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From cc21f382d5cd9247ea630f4d980a796fde8eb3f1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 09:34:09 -0400 Subject: [PATCH 302/348] Added salt.utils.vmware.get_all_luns --- salt/utils/vmware.py | 50 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 18d18859be9..4a99edb17df 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2395,6 +2395,56 @@ def _get_scsi_address_to_lun_key_map(service_instance, return lun_key_by_scsi_addr +def get_all_luns(host_ref, storage_system=None, hostname=None): + ''' + Returns a list of all vim.HostScsiDisk objects in a disk + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. This argument is optional. + ''' + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + if not storage_system: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage system was not retrieved' + ''.format(hostname)) + try: + device_info = storage_system.storageDeviceInfo + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not device_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage device info was not retrieved' + ''.format(hostname)) + + scsi_luns = device_info.scsiLun + if scsi_luns: + log.trace('Retrieved scsi luns in host \'{0}\': {1}' + ''.format(hostname, [l.canonicalName for l in scsi_luns])) + return scsi_luns + log.trace('Retrieved no scsi_luns in host \'{0}\''.format(hostname)) + return [] + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 5a8cc2f19f96508a7b167ea0d6fd03f919f65062 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:27:31 -0400 Subject: [PATCH 303/348] Added salt.utils.vmware.get_scsi_address_to_lun_map --- salt/utils/vmware.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 4a99edb17df..42116941adf 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2445,6 +2445,35 @@ def get_all_luns(host_ref, storage_system=None, hostname=None): return [] +def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): + ''' + Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their + scsi address + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + storage_system + The host's storage system. Default is None. + + hostname + Name of the host. This argument is optional. + ''' + if not hostname: + hostname = get_managed_object_name(host_ref) + si = get_service_instance_from_managed_object(host_ref, name=hostname) + if not storage_system: + storage_system = get_storage_system(si, host_ref, hostname) + lun_ids_to_scsi_addr_map = \ + _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, + hostname) + luns_to_key_map = {d.key: d for d in + get_all_luns(host_ref, storage_system, hostname)} + return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in + lun_ids_to_scsi_addr_map.iteritems()} + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 8b7af00e275954cc448eaafc3c0a438e96b487db Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:36:28 -0400 Subject: [PATCH 304/348] Added salt.utils.vmware.get_disks --- salt/utils/vmware.py | 56 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 42116941adf..bb6628973ca 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2474,6 +2474,62 @@ def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): lun_ids_to_scsi_addr_map.iteritems()} +def get_disks(host_ref, disk_ids=None, scsi_addresses=None, + get_all_disks=False): + ''' + Returns a list of vim.HostScsiDisk objects representing disks + in a ESXi host, filtered by their cannonical names and scsi_addresses + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + disk_ids + The list of canonical names of the disks to be retrieved. Default value + is None + + scsi_addresses + The list of scsi addresses of the disks to be retrieved. Default value + is None + + get_all_disks + Specifies whether to retrieve all disks in the host. + Default value is False. + ''' + hostname = get_managed_object_name(host_ref) + if get_all_disks: + log.trace('Retrieving all disks in host \'{0}\''.format(hostname)) + else: + log.trace('Retrieving disks in host \'{0}\': ids = ({1}); scsi ' + 'addresses = ({2})'.format(hostname, disk_ids, + scsi_addresses)) + if not (disk_ids or scsi_addresses): + return [] + si = get_service_instance_from_managed_object(host_ref, name=hostname) + storage_system = get_storage_system(si, host_ref, hostname) + disk_keys = [] + if scsi_addresses: + # convert the scsi addresses to disk keys + lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, + storage_system, + hostname) + disk_keys = [key for scsi_addr, key in lun_key_by_scsi_addr.iteritems() + if scsi_addr in scsi_addresses] + log.trace('disk_keys based on scsi_addresses = {0}'.format(disk_keys)) + + scsi_luns = get_all_luns(host_ref, storage_system) + scsi_disks = [disk for disk in scsi_luns + if isinstance(disk, vim.HostScsiDisk) and ( + get_all_disks or + # Filter by canonical name + (disk_ids and (disk.canonicalName in disk_ids)) or + # Filter by disk keys from scsi addresses + (disk.key in disk_keys))] + log.trace('Retrieved disks in host \'{0}\': {1}' + ''.format(hostname, [d.canonicalName for d in scsi_disks])) + return scsi_disks + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 200159d76d4dae4e40d784db8e8b5056515386a6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:38:55 -0400 Subject: [PATCH 305/348] Added salt.utils.vmware.get_disk_partition_info --- salt/utils/vmware.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index bb6628973ca..959b0b8ecf7 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2530,6 +2530,48 @@ def get_disks(host_ref, disk_ids=None, scsi_addresses=None, return scsi_disks +def get_disk_partition_info(host_ref, disk_id, storage_system=None): + ''' + Returns all partitions on a disk + + host_ref + The reference of the ESXi host containing the disk + + disk_id + The canonical name of the disk whose partitions are to be removed + + storage_system + The ESXi host's storage system. Default is None. + ''' + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + + props = get_properties_of_managed_object(storage_system, + ['storageDeviceInfo.scsiLun']) + if not props.get('storageDeviceInfo.scsiLun'): + raise salt.exceptions.VMwareObjectRetrievalError( + 'No devices were retrieved in host \'{0}\''.format(hostname)) + log.trace('[{0}] Retrieved {1} devices: {2}'.format( + hostname, len(props['storageDeviceInfo.scsiLun']), + ', '.join([l.canonicalName + for l in props['storageDeviceInfo.scsiLun']]))) + disks = [l for l in props['storageDeviceInfo.scsiLun'] + if isinstance(l, vim.HostScsiDisk) and + l.canonicalName == disk_id] + if not disks: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\'' + ''.format(disk_id, hostname)) + log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath)) + partition_info = _get_partition_info(storage_system, disks[0].devicePath) + log.trace('[{0}] Retrieved {1} partition(s) on disk \'{2}\'' + ''.format(hostname, len(partition_info.spec.partition), disk_id)) + return partition_info + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From c386612c0769dcbdea5c1ef4b20f28a93a799074 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:41:33 -0400 Subject: [PATCH 306/348] Added salt.utils.vmware.erase_disk_partitions --- salt/utils/vmware.py | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 959b0b8ecf7..bc3e87da3ef 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2572,6 +2572,78 @@ def get_disk_partition_info(host_ref, disk_id, storage_system=None): return partition_info +def erase_disk_partitions(service_instance, host_ref, disk_id, + hostname=None, storage_system=None): + ''' + Erases all partitions on a disk + + in a vcenter filtered by their names and/or datacenter, cluster membership + + service_instance + The Service Instance Object from which to obtain all information + + host_ref + The reference of the ESXi host containing the disk + + disk_id + The canonical name of the disk whose partitions are to be removed + + hostname + The ESXi hostname. Default is None. + + storage_system + The ESXi host's storage system. Default is None. + ''' + + if not hostname: + hostname = get_managed_object_name(host_ref) + if not storage_system: + storage_system = get_storage_system(service_instance, host_ref, + hostname) + + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.storageSystem', + type=vim.HostSystem, + skip=False) + results = get_mors_with_properties(service_instance, + vim.HostStorageSystem, + ['storageDeviceInfo.scsiLun'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not results: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' devices were not retrieved'.format(hostname)) + log.trace('[{0}] Retrieved {1} devices: {2}'.format( + hostname, len(results[0].get('storageDeviceInfo.scsiLun', [])), + ', '.join([l.canonicalName for l in + results[0].get('storageDeviceInfo.scsiLun', [])]))) + disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', []) + if isinstance(l, vim.HostScsiDisk) and + l.canonicalName == disk_id] + if not disks: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\'' + ''.format(disk_id, hostname)) + log.trace('[{0}] device_path = {1}'.format(hostname, disks[0].devicePath)) + # Erase the partitions by setting an empty partition spec + try: + storage_system.UpdateDiskPartitions(disks[0].devicePath, + vim.HostDiskPartitionSpec()) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + log.trace('[{0}] Erased partitions on disk \'{1}\'' + ''.format(hostname, disk_id)) + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 3d0383694f38466535ad7df7e067b1e24cf7c9fb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:44:39 -0400 Subject: [PATCH 307/348] Added salt.utils.get_diskgroups --- salt/utils/vmware.py | 60 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index bc3e87da3ef..52e8838fa46 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2644,6 +2644,66 @@ def erase_disk_partitions(service_instance, host_ref, disk_id, ''.format(hostname, disk_id)) +def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): + ''' + Returns a list of vim.VsanHostDiskMapping objects representing disks + in a ESXi host, filtered by their cannonical names. + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + cache_disk_ids + The list of cannonical names of the cache disks to be retrieved. The + canonical name of the cache disk is enough to identify the disk group + because it is guaranteed to have one and only one cache disk. + Default is None. + + get_all_disk_groups + Specifies whether to retrieve all disks groups in the host. + Default value is False. + ''' + hostname = get_managed_object_name(host_ref) + if get_all_disk_groups: + log.trace('Retrieving all disk groups on host \'{0}\'' + ''.format(hostname)) + else: + log.trace('Retrieving disk groups from host \'{0}\', with cache disk ' + 'ids : ({1})'.format(hostname, cache_disk_ids)) + if not cache_disk_ids: + return [] + try: + vsan_host_config = host_ref.config.vsanHostConfig + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not vsan_host_config: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No host config found on host \'{0}\''.format(hostname)) + vsan_storage_info = vsan_host_config.storageInfo + if not vsan_storage_info: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No vsan storage info found on host \'{0}\''.format(hostname)) + vsan_disk_mappings = vsan_storage_info.diskMapping + if not vsan_disk_mappings: + return [] + disk_groups = [dm for dm in vsan_disk_mappings if \ + (get_all_disk_groups or \ + (dm.ssd.canonicalName in cache_disk_ids))] + log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : ' + '{1}'.format(hostname, + [d.ssd.canonicalName for d in disk_groups])) + return disk_groups + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 13e8bad397cfb3cbb5c9182b819093e3949174b7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 10:46:14 -0400 Subject: [PATCH 308/348] Added salt.utils._check_disks_in_diskgroup --- salt/utils/vmware.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 52e8838fa46..66ca37ed61d 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2704,6 +2704,27 @@ def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): return disk_groups +def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): + ''' + Checks that the disks in a disk group are as expected and raises + CheckError exceptions if the check fails + ''' + if not disk_group.ssd.canonicalName == cache_disk_id: + raise salt.exceptions.ArgumentValueError( + 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' + '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) + if sorted([d.canonicalName for d in disk_group.nonSsd]) != \ + sorted(capacity_disk_ids): + + raise salt.exceptions.ArgumentValueError( + 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' + ''.format(sorted([d.canonicalName for d in disk_group.nonSsd]), + sorted(capacity_disk_ids))) + log.trace('Checked disks in diskgroup with cache disk id \'{0}\'' + ''.format(cache_disk_id)) + return True + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From beb2b615889351338a36ba1eb785de119de9e020 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 16:06:20 -0400 Subject: [PATCH 309/348] Added salt.utils.vmware.get_host_cache --- salt/utils/vmware.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 66ca37ed61d..eb6132a1486 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2725,6 +2725,47 @@ def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): return True +#TODO Support host caches on multiple datastores +def get_host_cache(host_ref, host_cache_manager=None): + ''' + Returns a vim.HostScsiDisk if the host cache is configured on the specified + host, other wise returns None + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + host_cache_manager + The vim.HostCacheConfigurationManager object representing the cache + configuration manager on the specified host. Default is None. If None, + it will be retrieved in the method + ''' + hostname = get_managed_object_name(host_ref) + service_instance = get_service_instance_from_managed_object(host_ref) + log.trace('Retrieving the host cache on host \'{0}\''.format(hostname)) + if not host_cache_manager: + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.cacheConfigurationManager', + type=vim.HostSystem, + skip=False) + results = get_mors_with_properties(service_instance, + vim.HostCacheConfigurationManager, + ['cacheConfigurationInfo'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not results or not results[0].get('cacheConfigurationInfo'): + log.trace('Host \'{0}\' has no host cache'.format(hostname)) + return None + return results[0]['cacheConfigurationInfo'][0] + else: + results = get_properties_of_managed_object(host_cache_manager, + ['cacheConfigurationInfo']) + if not results: + log.trace('Host \'{0}\' has no host cache'.format(hostname)) + return None + return results['cacheConfigurationInfo'][0] + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From f84c55bf83ccd0a4f3fe9817622085fd970561d3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 16:11:58 -0400 Subject: [PATCH 310/348] Added salt.utils.vmware.configure_host_cache --- salt/utils/vmware.py | 56 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index eb6132a1486..1c226e9cc05 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2766,6 +2766,62 @@ def get_host_cache(host_ref, host_cache_manager=None): return results['cacheConfigurationInfo'][0] +#TODO Support host caches on multiple datastores +def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, + host_cache_manager=None): + ''' + Configures the host cahe of the specified host + + host_ref + The vim.HostSystem object representing the host that contains the + requested disks. + + datastore_ref + The vim.Datastore opject representing the datastore the host cache will + be configured on. + + swap_size_MiB + The size in Mibibytes of the swap. + + host_cache_manager + The vim.HostCacheConfigurationManager object representing the cache + configuration manager on the specified host. Default is None. If None, + it will be retrieved in the method + ''' + hostname = get_managed_object_name(host_ref) + if not host_cache_manager: + props = get_properties_of_managed_object( + host_ref, ['configManager.cacheConfigurationManager']) + if not props.get('configManager.cacheConfigurationManager'): + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host \'{0}\' has no host cache'.format(hostname)) + host_cache_manager = props['configManager.cacheConfigurationManager'] + log.trace('Configuring the host cache on host \'{0}\', datastore \'{1}\', ' + 'swap size={2} MiB'.format(hostname, datastore_ref.name, + swap_size_MiB)) + + spec = vim.HostCacheConfigurationSpec( + datastore=datastore_ref, + swapSize=swap_size_MiB) + log.trace('host_cache_spec={0}'.format(spec)) + try: + task = host_cache_manager.ConfigureHostCache_Task(spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + wait_for_task(task, hostname, 'HostCacheConfigurationTask') + log.trace('Configured host cache on host \'{0}\''.format(hostname)) + return True + + def list_hosts(service_instance): ''' Returns a list of hosts associated with a given service instance. From 6ad97b01e4f1393f2a8b1a28838cd2c4f3a5d1bc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 15:43:42 -0400 Subject: [PATCH 311/348] Change debug logs to trace logs in salt.utils.vmware.get_datastores --- salt/utils/vmware.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 1c226e9cc05..3c861c27c06 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1909,7 +1909,7 @@ def get_datastores(service_instance, reference, datastore_names=None, 'is set'.format(reference.__class__.__name__)) if (not get_all_datastores) and backing_disk_ids: # At this point we know the reference is a vim.HostSystem - log.debug('Filtering datastores with backing disk ids: {}' + log.trace('Filtering datastores with backing disk ids: {}' ''.format(backing_disk_ids)) storage_system = get_storage_system(service_instance, reference, obj_name) @@ -1925,11 +1925,11 @@ def get_datastores(service_instance, reference, datastore_names=None, # Skip volume if it doesn't contain an extent with a # canonical name of interest continue - log.debug('Found datastore \'{0}\' for disk id(s) \'{1}\'' + log.trace('Found datastore \'{0}\' for disk id(s) \'{1}\'' ''.format(vol.name, [e.diskName for e in vol.extent])) disk_datastores.append(vol.name) - log.debug('Datastore found for disk filter: {}' + log.trace('Datastore found for disk filter: {}' ''.format(disk_datastores)) if datastore_names: datastore_names.extend(disk_datastores) From 0186045169fd98bfe49cadba8ab846247314f15f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 15:44:20 -0400 Subject: [PATCH 312/348] Change debug logs to trace logs in salt.utils.vmware.rename_datastore --- salt/utils/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 3c861c27c06..f2fbf43f593 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2006,7 +2006,7 @@ def rename_datastore(datastore_ref, new_datastore_name): New datastore name ''' ds_name = get_managed_object_name(datastore_ref) - log.debug('Renaming datastore \'{0}\' to ' + log.trace('Renaming datastore \'{0}\' to ' '\'{1}\''.format(ds_name, new_datastore_name)) try: datastore_ref.RenameDatastore(new_datastore_name) From e9890106160b0571d3d0ab840fa862de3f17b7dd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 19:51:11 -0400 Subject: [PATCH 313/348] Added salt.modules.list_hosts_via_proxy --- salt/modules/vsphere.py | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index efde0b6d07f..15343fb260a 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5813,6 +5813,57 @@ def assign_license(license_key, license_name, entity, entity_display_name, entity_name=entity_display_name) +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter', 'vcenter') +@gets_service_instance_via_proxy +def list_hosts_via_proxy(hostnames=None, datacenter=None, + cluster=None, service_instance=None): + ''' + Returns a list of hosts for the the specified VMware environment. The list + of hosts can be filtered by datacenter name and/or cluster name + + hostnames + Hostnames to filter on. + + datacenter_name + Name of datacenter. Only hosts in this datacenter will be retrieved. + Default is None. + + cluster_name + Name of cluster. Only hosts in this cluster will be retrieved. If a + datacenter is not specified the first cluster with this name will be + considerred. Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + CLI Example: + + .. code-block:: bash + + salt '*' vsphere.list_hosts_via_proxy + + salt '*' vsphere.list_hosts_via_proxy hostnames=[esxi1.example.com] + + salt '*' vsphere.list_hosts_via_proxy datacenter=dc1 cluster=cluster1 + ''' + if cluster: + if not datacenter: + raise salt.exceptions.ArgumentValueError( + 'Datacenter is required when cluster is specified') + get_all_hosts = False + if not hostnames and not datacenter and not cluster: + get_all_hosts = True + hosts = salt.utils.vmware.get_hosts(service_instance, + datacenter_name=datacenter, + host_names=hostnames, + cluster_name=cluster, + get_all_hosts=get_all_hosts) + return [salt.utils.vmware.get_managed_object_name(h) for h in hosts] + + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 4230224fe4c04af87abbda1d86b97d94af808917 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 19:54:04 -0400 Subject: [PATCH 314/348] Added salt.modules.vsphere.list_disks --- salt/modules/vsphere.py | 49 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 15343fb260a..07983b74b92 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5863,6 +5863,55 @@ def list_hosts_via_proxy(hostnames=None, datacenter=None, return [salt.utils.vmware.get_managed_object_name(h) for h in hosts] +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None): + ''' + Returns a list of dict representations of the disks in an ESXi host. + The list of disks can be filtered by disk canonical names or + scsi addresses. + + disk_ids: + List of disk canonical names to be retrieved. Default is None. + + scsi_addresses + List of scsi addresses of disks to be retrieved. Default is None + + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_disks + + salt '*' vsphere.list_disks disk_ids='[naa.00, naa.001]' + + salt '*' vsphere.list_disks + scsi_addresses='[vmhba0:C0:T0:L0, vmhba1:C0:T0:L0]' + ''' + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + log.trace('Retrieving disks if host \'{0}\''.format(hostname)) + log.trace('disk ids = {0}'.format(disk_ids)) + log.trace('scsi_addresses = {0}'.format(scsi_addresses)) + # Default to getting all disks if no filtering is done + get_all_disks = True if not (disk_ids or scsi_addresses) else False + ret_list = [] + scsi_address_to_lun = salt.utils.vmware.get_scsi_address_to_lun_map( + host_ref, hostname=hostname) + canonical_name_to_scsi_address = { + lun.canonicalName: scsi_addr + for scsi_addr, lun in scsi_address_to_lun.iteritems()} + for d in salt.utils.vmware.get_disks(host_ref, disk_ids, scsi_addresses, + get_all_disks): + ret_list.append({'id': d.canonicalName, + 'scsi_address': + canonical_name_to_scsi_address[d.canonicalName]}) + return ret_list + def _check_hosts(service_instance, host, host_names): ''' From 5d89f7b7430ae4e26dbaa547a40765e8e9d63a9e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 19:56:10 -0400 Subject: [PATCH 315/348] Added salt.modules.vsphere.erase_disk_partitions --- salt/modules/vsphere.py | 55 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 07983b74b92..ae331579672 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5913,6 +5913,61 @@ def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None): return ret_list +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def erase_disk_partitions(disk_id=None, scsi_address=None, + service_instance=None): + ''' + Erases the partitions on a disk. + The disk can be specified either by the canonical name, or by the + scsi_address. + + disk_id + Canonical name of the disk. + Either ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + scsi_address` + Scsi address of the disk. + ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.erase_disk_partitions scsi_address='vmhaba0:C0:T0:L0' + + salt '*' vsphere.erase_disk_partitions disk_id='naa.000000000000001' + ''' + if not disk_id and not scsi_address: + raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' ' + 'needs to be specified') + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if not disk_id: + scsi_address_to_lun = \ + salt.utils.vmware.get_scsi_address_to_lun_map(host_ref) + if scsi_address not in scsi_address_to_lun: + raise VMwareObjectRetrievalError( + 'Scsi lun with address \'{0}\' was not found on host \'{1}\'' + ''.format(scsi_address, hostname)) + disk_id = scsi_address_to_lun[scsi_address].canonicalName + log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\'' + ''.format(hostname, disk_id, scsi_address)) + log.trace('Erasing disk partitions on disk \'{0}\' in host \'{1}\'' + ''.format(disk_id, hostname)) + salt.utils.vmware.erase_disk_partitions(service_instance, + host_ref, disk_id, + hostname=hostname) + log.info('Erased disk partitions on disk \'{0}\' on host \'{1}\'' + ''.format(disk_id, esxi_host)) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From f76115fc67063986158936af173642679c87b644 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 19:57:43 -0400 Subject: [PATCH 316/348] Added salt.modules.vsphere.list_disk_partitions --- salt/modules/vsphere.py | 69 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index ae331579672..a7b36c4a9c4 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5968,6 +5968,75 @@ def erase_disk_partitions(disk_id=None, scsi_address=None, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_disk_partitions(disk_id=None, scsi_address=None, + service_instance=None): + ''' + Lists the partitions on a disk. + The disk can be specified either by the canonical name, or by the + scsi_address. + + disk_id + Canonical name of the disk. + Either ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + scsi_address` + Scsi address of the disk. + ``disk_id`` or ``scsi_address`` needs to be specified + (``disk_id`` supersedes ``scsi_address``. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_disk_partitions scsi_address='vmhaba0:C0:T0:L0' + + salt '*' vsphere.list_disk_partitions disk_id='naa.000000000000001' + ''' + if not disk_id and not scsi_address: + raise ArgumentValueError('Either \'disk_id\' or \'scsi_address\' ' + 'needs to be specified') + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if not disk_id: + scsi_address_to_lun = \ + salt.utils.vmware.get_scsi_address_to_lun_map(host_ref) + if scsi_address not in scsi_address_to_lun: + raise VMwareObjectRetrievalError( + 'Scsi lun with address \'{0}\' was not found on host \'{1}\'' + ''.format(scsi_address, hostname)) + disk_id = scsi_address_to_lun[scsi_address].canonicalName + log.trace('[{0}] Got disk id \'{1}\' for scsi address \'{2}\'' + ''.format(hostname, disk_id, scsi_address)) + log.trace('Listing disk partitions on disk \'{0}\' in host \'{1}\'' + ''.format(disk_id, hostname)) + partition_info = \ + salt.utils.vmware.get_disk_partition_info(host_ref, disk_id) + ret_list = [] + # NOTE: 1. The layout view has an extra 'None' partition for free space + # 2. The orders in the layout/partition views are not the same + for part_spec in partition_info.spec.partition: + part_layout = [p for p in partition_info.layout.partition + if p.partition == part_spec.partition][0] + part_dict = {'hostname': hostname, + 'device': disk_id, + 'format': partition_info.spec.partitionFormat, + 'partition': part_spec.partition, + 'type': part_spec.type, + 'sectors': + part_spec.endSector - part_spec.startSector + 1, + 'size_KB': + (part_layout.end.block - part_layout.start.block + 1) * + part_layout.start.blockSize / 1024} + ret_list.append(part_dict) + return ret_list + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 23fbb26f31b1f84eab4cdce6973adb2d2f5586bc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 19:58:41 -0400 Subject: [PATCH 317/348] Added salt.modules.vsphere.list_diskgroups --- salt/modules/vsphere.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index a7b36c4a9c4..917e1ae07ff 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6037,6 +6037,46 @@ def list_disk_partitions(disk_id=None, scsi_address=None, return ret_list +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def list_diskgroups(cache_disk_ids=None, service_instance=None): + ''' + Returns a list of disk group dict representation on an ESXi host. + The list of disk groups can be filtered by the cache disks + canonical names. If no filtering is applied, all disk groups are returned. + + cache_disk_ids: + List of cache disk canonical names of the disk groups to be retrieved. + Default is None. + + use_proxy_details + Specify whether to use the proxy minion's details instead of the + arguments + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_diskgroups + + salt '*' vsphere.list_diskgroups cache_disk_ids='[naa.000000000000001]' + ''' + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + log.trace('Listing diskgroups in \'{0}\''.format(hostname)) + get_all_diskgroups = True if not cache_disk_ids else False + ret_list = [] + for dg in salt.utils.vmware.get_diskgroups(host_ref, cache_disk_ids, + get_all_diskgroups): + ret_list.append( + {'cache_disk': dg.ssd.canonicalName, + 'capacity_disks': [d.canonicalName for d in dg.nonSsd]}) + return ret_list + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 40589adc0cb0d8b144d41a0e900cee9a288373a6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:05:43 -0400 Subject: [PATCH 318/348] Added DiskGroupsDiskIdSchema JSON schema and DiskGroupDiskIdItem complex schema item --- salt/config/schemas/esxi.py | 40 ++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py index affd14be593..2a894188612 100644 --- a/salt/config/schemas/esxi.py +++ b/salt/config/schemas/esxi.py @@ -13,12 +13,50 @@ from __future__ import absolute_import # Import Salt libs -from salt.utils.schema import (Schema, +from salt.utils.schema import (DefinitionsSchema, + Schema, + ComplexSchemaItem, ArrayItem, IntegerItem, StringItem) +class DiskGroupDiskIdItem(ComplexSchemaItem): + ''' + Schema item of a ESXi host disk group containg disk ids + ''' + + title = 'Diskgroup Disk Id Item' + description = 'ESXi host diskgroup item containing disk ids' + + + cache_id = StringItem( + title='Cache Disk Id', + description='Specifies the id of the cache disk', + pattern=r'[^\s]+') + + capacity_ids = ArrayItem( + title='Capacity Disk Ids', + description='Array with the ids of the capacity disks', + items=StringItem(pattern=r'[^\s]+'), + min_items=1) + + +class DiskGroupsDiskIdSchema(DefinitionsSchema): + ''' + Schema of ESXi host diskgroups containing disk ids + ''' + + title = 'Diskgroups Disk Id Schema' + description = 'ESXi host diskgroup schema containing disk ids' + diskgroups = ArrayItem( + title='DiskGroups', + description='List of disk groups in an ESXi host', + min_items = 1, + items=DiskGroupDiskIdItem(), + required=True) + + class EsxiProxySchema(Schema): ''' Schema of the esxi proxy input From 7532c286903e33ec1e9dca82ed1bf90095937145 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 24 Sep 2017 20:04:24 -0400 Subject: [PATCH 319/348] Added salt.modules.vsphere.create_diskgroup --- salt/modules/vsphere.py | 70 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 917e1ae07ff..19573c1e899 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -185,6 +185,7 @@ from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ ESXClusterEntitySchema from salt.config.schemas.vcenter import VCenterEntitySchema +from salt.config.schemas.esxi import DiskGroupsDiskIdSchema # Import Third Party Libs try: @@ -6077,6 +6078,75 @@ def list_diskgroups(cache_disk_ids=None, service_instance=None): return ret_list +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True, + service_instance=None): + ''' + Creates disk group on an ESXi host with the specified cache and + capacity disks. + + cache_disk_id + The canonical name of the disk to be used as a cache. The disk must be + ssd. + + capacity_disk_ids + A list containing canonical names of the capacity disks. Must contain at + least one id. Default is True. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_diskgroup cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if safety_checks: + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, [cache_disk_id]) + if diskgroups: + raise VMwareObjectExistsError( + 'Diskgroup with cache disk id \'{0}\' already exists ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + disk_ids = capacity_disk_ids[:] + disk_ids.insert(0, cache_disk_id) + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=disk_ids) + for id in disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{0}\'' + ''.format(id, hostname)) + cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0] + capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids] + vsan_disk_mgmt_system = \ + salt.utils.vsan.get_vsan_disk_management_system(service_instance) + dg = salt.utils.vsan.create_diskgroup(service_instance, + vsan_disk_mgmt_system, + host_ref, + cache_disk, + capacity_disks) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 41d3846c110a84833a0c0ff2865ea289e359b909 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:07:12 -0400 Subject: [PATCH 320/348] Added salt.modules.vsphere.add_capacity_to_diskgroup --- salt/modules/vsphere.py | 64 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 19573c1e899..e32a506767f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6147,6 +6147,70 @@ def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True, return True +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids, + safety_checks=True, service_instance=None): + ''' + Adds capacity disks to the disk group with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + capacity_disk_ids + A list containing canonical names of the capacity disks to add. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.add_capacity_to_diskgroup + cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) + if safety_checks: + for id in capacity_disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' + ''.format(id, hostname)) + diskgroups = \ + salt.utils.vmware.get_diskgroups( + host_ref, cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, esxi_host)) + vsan_disk_mgmt_system = \ + salt.utils.vsan.get_vsan_disk_management_system(service_instance) + salt.utils.vsan.add_capacity_to_diskgroup(service_instance, + vsan_disk_mgmt_system, + host_ref, + disk_groups[0], + disks) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 790472673672442bc2c00affcac9b6f3f46e1bb6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:08:34 -0400 Subject: [PATCH 321/348] Added salt.modules.vsphere.remove_capacity_from_diskgroup --- salt/modules/vsphere.py | 68 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index e32a506767f..5a25c4bed37 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6211,6 +6211,74 @@ def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids, return True +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def remove_capacity_from_diskgroup(cache_disk_id, capacity_disk_ids, + data_evacuation=True, safety_checks=True, + service_instance=None): + ''' + Remove capacity disks from the disk group with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + capacity_disk_ids + A list containing canonical names of the capacity disks to add. + + data_evacuation + Specifies whether to gracefully evacuate the data on the capacity disks + before removing them from the disk group. Default value is True. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_capacity_from_diskgroup + cache_disk_id='naa.000000000000001' + capacity_disk_ids='[naa.000000000000002, naa.000000000000003]' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + try: + jsonschema.validate( + {'diskgroups': [{'cache_id': cache_disk_id, + 'capacity_ids': capacity_disk_ids}]}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=capacity_disk_ids) + if safety_checks: + for id in capacity_disk_ids: + if not [d for d in disks if d.canonicalName == id]: + raise VMwareObjectRetrievalError( + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' + ''.format(id, hostname)) + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, + cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('data_evacuation = {0}'.format(data_evacuation)) + salt.utils.vsan.remove_capacity_from_diskgroup( + service_instance, host_ref, diskgroups[0], + capacity_disks=[d for d in disks + if d.canonicalName in capacity_disk_ids], + data_evacuation=data_evacuation) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From b3909ee4cc35eaec9d746ba7bd4b049dd086915c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:13:57 -0400 Subject: [PATCH 322/348] Added salt.modules.vsphere.remove_diskgroup --- salt/modules/vsphere.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 5a25c4bed37..43c4884eec7 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6279,6 +6279,47 @@ def remove_capacity_from_diskgroup(cache_disk_id, capacity_disk_ids, return True +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def remove_diskgroup(cache_disk_id, data_accessibility=True, + service_instance=None): + ''' + Remove the diskgroup with the specified cache disk. + + cache_disk_id + The canonical name of the cache disk. + + data_accessibility + Specifies whether to ensure data accessibility. Default value is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_diskgroup cache_disk_id='naa.000000000000001' + ''' + log.trace('Validating diskgroup input') + schema = DiskGroupsDiskIdSchema.serialize() + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + diskgroups = \ + salt.utils.vmware.get_diskgroups(host_ref, + cache_disk_ids=[cache_disk_id]) + if not diskgroups: + raise VMwareObjectRetrievalError( + 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' + 'host \'{1}\''.format(cache_disk_id, hostname)) + log.trace('data accessibility = {0}'.format(data_accessibility)) + salt.utils.vsan.remove_diskgroup( + service_instance, host_ref, diskgroups[0], + data_accessibility=data_accessibility) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 8bd7993e973c26e1528565784e07dd1b54710350 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:12:09 -0400 Subject: [PATCH 323/348] Added SimpleHostCacheSchema JSON schema --- salt/config/schemas/esxi.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py index 2a894188612..f2ad765f0c7 100644 --- a/salt/config/schemas/esxi.py +++ b/salt/config/schemas/esxi.py @@ -18,6 +18,7 @@ from salt.utils.schema import (DefinitionsSchema, ComplexSchemaItem, ArrayItem, IntegerItem, + BooleanItem, StringItem) @@ -57,6 +58,22 @@ class DiskGroupsDiskIdSchema(DefinitionsSchema): required=True) +class SimpleHostCacheSchema(Schema): + ''' + Simplified Schema of ESXi host cache + ''' + + title = 'Simple Host Cache Schema' + description = 'Simplified schema of the ESXi host cache' + enabled = BooleanItem( + title='Enabled', + required=True) + datastore_name = StringItem(title='Datastore Name', + required=True) + swap_size_MiB = IntegerItem(title='Host cache swap size in MiB', + minimum=1) + + class EsxiProxySchema(Schema): ''' Schema of the esxi proxy input From 85388847044637de313fb5611486d66278bb528e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:14:55 -0400 Subject: [PATCH 324/348] Added salt.modules.vsphere.get_host_cache --- salt/modules/vsphere.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 43c4884eec7..631cf355fd1 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6320,6 +6320,37 @@ def remove_diskgroup(cache_disk_id, data_accessibility=True, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def get_host_cache(service_instance=None): + ''' + Returns the host cache configuration on the proxy host. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.get_host_cache + ''' + # Default to getting all disks if no filtering is done + ret_dict = {} + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + hci = salt.utils.vmware.get_host_cache(host_ref) + if not hci: + log.debug('Host cache not configured on host \'{0}\''.format(hostname)) + ret_dict['enabled'] = False + return ret_dict + + # TODO Support multiple host cache info objects (on multiple datastores) + return {'enabled': True, + 'datastore': {'name': hci.key.name}, + 'swap_size': '{}MiB'.format(hci.swapSize)} + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From ea637743532243ab8c28e4761c73f39fd0ed7d0a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:15:16 -0400 Subject: [PATCH 325/348] Added salt.modules.vsphere.configure_host_cache --- salt/modules/vsphere.py | 61 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 631cf355fd1..00b78043a02 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -185,7 +185,8 @@ from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ ESXClusterEntitySchema from salt.config.schemas.vcenter import VCenterEntitySchema -from salt.config.schemas.esxi import DiskGroupsDiskIdSchema +from salt.config.schemas.esxi import DiskGroupsDiskIdSchema, \ + VmfsDatastoreSchema, SimpleHostCacheSchema # Import Third Party Libs try: @@ -6351,6 +6352,64 @@ def get_host_cache(service_instance=None): 'swap_size': '{}MiB'.format(hci.swapSize)} +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def configure_host_cache(enabled, datastore=None, swap_size_MiB=None, + service_instance=None): + ''' + Configures the host cache on the selected host. + + enabled + Boolean flag specifying whether the host cache is enabled. + + datastore + Name of the datastore that contains the host cache. Must be set if + enabled is ``true``. + + swap_size_MiB + Swap size in Mibibytes. Needs to be set if enabled is ``true``. Must be + smaller thant the datastore size. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.configure_host_cache enabled=False + + salt '*' vsphere.configure_host_cache enabled=True datastore=ds1 + swap_size_MiB=1024 + ''' + log.debug('Validating host cache input') + schema = SimpleHostCacheSchema.serialize() + try: + jsonschema.validate({'enabled': enabled, + 'datastore_name': datastore, + 'swap_size_MiB': swap_size_MiB}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + if not enabled: + raise ArgumentValueError('Disabling the host cache is not supported') + ret_dict = {'enabled': False} + + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if datastore: + ds_refs = salt.utils.vmware.get_datastores( + service_instance, host_ref, datastore_names=[datastore]) + if not ds_refs: + raise VMwareObjectRetrievalError( + 'Datastore \'{0}\' was not found on host ' + '\'{1}\''.format(datastore_name, hostname)) + ds_ref = ds_refs[0] + salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 55e5a6ed21575991f2a390815a34614d6db7aca0 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:19:06 -0400 Subject: [PATCH 326/348] Added DiskGroupsDiskScsiAddressSchema JSON schema --- salt/config/schemas/esxi.py | 47 ++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py index f2ad765f0c7..8bea76f4064 100644 --- a/salt/config/schemas/esxi.py +++ b/salt/config/schemas/esxi.py @@ -17,9 +17,36 @@ from salt.utils.schema import (DefinitionsSchema, Schema, ComplexSchemaItem, ArrayItem, + DictItem, IntegerItem, BooleanItem, - StringItem) + StringItem, + OneOfItem) + + +class VMwareScsiAddressItem(StringItem): + pattern = r'vmhba\d+:C\d+:T\d+:L\d+' + + +class DiskGroupDiskScsiAddressItem(ComplexSchemaItem): + ''' + Schema item of a ESXi host disk group containing disk SCSI addresses + ''' + + title = 'Diskgroup Disk Scsi Address Item' + description = 'ESXi host diskgroup item containing disk SCSI addresses' + + + cache_scsi_addr = VMwareScsiAddressItem( + title='Cache Disk Scsi Address', + description='Specifies the SCSI address of the cache disk', + required=True) + + capacity_scsi_addrs = ArrayItem( + title='Capacity Scsi Addresses', + description='Array with the SCSI addresses of the capacity disks', + items=VMwareScsiAddressItem(), + min_items=1) class DiskGroupDiskIdItem(ComplexSchemaItem): @@ -43,6 +70,24 @@ class DiskGroupDiskIdItem(ComplexSchemaItem): min_items=1) +class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema): + ''' + Schema of ESXi host diskgroups containing disk SCSI addresses + ''' + + title = 'Diskgroups Disk Scsi Address Schema' + description = 'ESXi host diskgroup schema containing disk SCSI addresses' + disk_groups = ArrayItem( + title='Diskgroups', + description='List of diskgroups in an ESXi host', + min_items = 1, + items=DiskGroupDiskScsiAddressItem(), + required=True) + erase_disks = BooleanItem( + title='Erase Diskgroup Disks', + required=True) + + class DiskGroupsDiskIdSchema(DefinitionsSchema): ''' Schema of ESXi host diskgroups containing disk ids From 23e2fd3aefaa7cbd24026c227964eb2826a826dd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 06:20:50 -0400 Subject: [PATCH 327/348] Added VmfsDatastoreSchema and HostCacheSchema JSON schemas used in host cache state functions --- salt/config/schemas/esxi.py | 80 ++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py index 8bea76f4064..5c8b7596c3f 100644 --- a/salt/config/schemas/esxi.py +++ b/salt/config/schemas/esxi.py @@ -77,7 +77,7 @@ class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema): title = 'Diskgroups Disk Scsi Address Schema' description = 'ESXi host diskgroup schema containing disk SCSI addresses' - disk_groups = ArrayItem( + diskgroups = ArrayItem( title='Diskgroups', description='List of diskgroups in an ESXi host', min_items = 1, @@ -103,6 +103,84 @@ class DiskGroupsDiskIdSchema(DefinitionsSchema): required=True) +class VmfsDatastoreDiskIdItem(ComplexSchemaItem): + ''' + Schema item of a VMFS datastore referencing a backing disk id + ''' + + title = 'VMFS Datastore Disk Id Item' + description = 'VMFS datastore item referencing a backing disk id' + name = StringItem( + title='Name', + description='Specifies the name of the VMFS datastore', + required=True) + backing_disk_id = StringItem( + title='Backing Disk Id', + description=('Specifies the id of the disk backing the VMFS ' + 'datastore'), + pattern=r'[^\s]+', + required=True) + vmfs_version = IntegerItem( + title='VMFS Version', + description='VMFS version', + enum=[1, 2, 3, 5]) + + +class VmfsDatastoreDiskScsiAddressItem(ComplexSchemaItem): + ''' + Schema item of a VMFS datastore referencing a backing disk SCSI address + ''' + + title = 'VMFS Datastore Disk Scsi Address Item' + description = 'VMFS datastore item referencing a backing disk SCSI address' + name = StringItem( + title='Name', + description='Specifies the name of the VMFS datastore', + required=True) + backing_disk_scsi_addr = VMwareScsiAddressItem( + title='Backing Disk Scsi Address', + description=('Specifies the SCSI address of the disk backing the VMFS ' + 'datastore'), + required=True) + vmfs_version = IntegerItem( + title='VMFS Version', + description='VMFS version', + enum=[1, 2, 3, 5]) + + +class VmfsDatastoreSchema(DefinitionsSchema): + ''' + Schema of a VMFS datastore + ''' + + title = 'VMFS Datastore Schema' + description = 'Schema of a VMFS datastore' + datastore = OneOfItem( + items=[VmfsDatastoreDiskScsiAddressItem(), + VmfsDatastoreDiskIdItem()], + required=True) + + +class HostCacheSchema(DefinitionsSchema): + ''' + Schema of ESXi host cache + ''' + + title = 'Host Cache Schema' + description = 'Schema of the ESXi host cache' + enabled = BooleanItem( + title='Enabled', + required=True) + datastore = VmfsDatastoreDiskScsiAddressItem(required=True) + swap_size = StringItem( + title='Host cache swap size (in GB or %)', + pattern=r'(\d+GiB)|(([0-9]|([1-9][0-9])|100)%)', + required=True) + erase_backing_disk = BooleanItem( + title='Erase Backup Disk', + required=True) + + class SimpleHostCacheSchema(Schema): ''' Simplified Schema of ESXi host cache From 8e58f72964839029347582f95cca08ed82ffb486 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 11:02:04 -0400 Subject: [PATCH 328/348] Added salt.modules.vsphere.create_vmfs_datastore --- salt/modules/vsphere.py | 56 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 00b78043a02..ce29c9e1a1f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5563,6 +5563,60 @@ def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None, return ret_dict +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxi') +@gets_service_instance_via_proxy +def create_vmfs_datastore(datastore_name, disk_id, vmfs_major_version, + safety_checks=True, service_instance=None): + ''' + Creates a ESXi host disk group with the specified cache and capacity disks. + + datastore_name + The name of the datastore to be created. + + disk_id + The disk id (canonical name) on which the datastore is created. + + vmfs_major_version + The VMFS major version. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default is True. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.create_vmfs_datastore datastore_name=ds1 disk_id= + vmfs_major_version=5 + ''' + log.debug('Validating vmfs datastore input') + schema = VmfsDatastoreSchema.serialize() + try: + jsonschema.validate( + {'datastore': {'name': datastore_name, + 'backing_disk_id': disk_id, + 'vmfs_version': vmfs_major_version}}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise ArgumentValueError(exc) + host_ref = _get_proxy_target(service_instance) + hostname = __proxy__['esxi.get_details']()['esxi_host'] + if safety_checks: + disks = salt.utils.vmware.get_disks(host_ref, disk_ids=[disk_id]) + if not disks: + raise VMwareObjectRetrievalError( + 'Disk \'{0}\' was not found in host \'{1}\''.format(disk_id, + hostname)) + ds_ref = salt.utils.vmware.create_vmfs_datastore( + host_ref, datastore_name, disks[0], vmfs_major_version) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxi', 'esxcluster', 'esxdatacenter') @gets_service_instance_via_proxy @@ -6207,7 +6261,7 @@ def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids, salt.utils.vsan.add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system, host_ref, - disk_groups[0], + diskgroups[0], disks) return True From 29b59a62569c27df36ce09e16991c8abd805b72a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 11:02:41 -0400 Subject: [PATCH 329/348] Comment fix in salt.modules.vsphere.rename_datastore --- salt/modules/vsphere.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index ce29c9e1a1f..97449b4693b 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5984,7 +5984,7 @@ def erase_disk_partitions(disk_id=None, scsi_address=None, Either ``disk_id`` or ``scsi_address`` needs to be specified (``disk_id`` supersedes ``scsi_address``. - scsi_address` + scsi_address Scsi address of the disk. ``disk_id`` or ``scsi_address`` needs to be specified (``disk_id`` supersedes ``scsi_address``. From fa6460d578463259fbfe7a8631b0e267a73b389f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 07:59:08 -0400 Subject: [PATCH 330/348] Added salt.modules.vsphere.remove_datastore --- salt/modules/vsphere.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 97449b4693b..0bbf1936b9c 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5655,6 +5655,41 @@ def rename_datastore(datastore_name, new_datastore_name, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def remove_datastore(datastore, service_instance=None): + ''' + Removes a datastore. If multiple datastores an error is raised. + + datastore + Datastore name + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.remove_datastore ds_name + ''' + log.trace('Removing datastore \'{0}\''.format(datastore)) + target = _get_proxy_target(service_instance) + taget_name = target.name + datastores = salt.utils.vmware.get_datastores( + service_instance, + reference=target, + datastore_names=[datastore]) + if not datastores: + raise VMwareObjectRetrievalError( + 'Datastore \'{0}\' was not found'.format(datastore)) + if len(datastores) > 1: + raise VMwareObjectRetrievalError( + 'Multiple datastores \'{0}\' were found'.format(datastore)) + salt.utils.vmware.remove_datastore(service_instance, datastores[0]) + return True + + @depends(HAS_PYVMOMI) @supports_proxies('esxcluster', 'esxdatacenter') @gets_service_instance_via_proxy From 7d70a014f274eedc4dc905ea8a75e44612d4b252 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 18:07:30 -0400 Subject: [PATCH 331/348] Moved pyVmomi/python incompatibility check from __virtual__ to pyVmomi import as some functions do not use pyVmomi --- salt/modules/vsphere.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 0bbf1936b9c..21b5426445d 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -188,6 +188,8 @@ from salt.config.schemas.vcenter import VCenterEntitySchema from salt.config.schemas.esxi import DiskGroupsDiskIdSchema, \ VmfsDatastoreSchema, SimpleHostCacheSchema +log = logging.getLogger(__name__) + # Import Third Party Libs try: import jsonschema @@ -197,6 +199,14 @@ except ImportError: try: from pyVmomi import vim, vmodl, pbm, VmomiSupport + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + log.error('pyVmomi not loaded: Incompatible versions ' + 'of Python. See Issue #29537.') + raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -207,24 +217,11 @@ if esx_cli: else: HAS_ESX_CLI = False -log = logging.getLogger(__name__) - __virtualname__ = 'vsphere' __proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter', 'vcenter'] def __virtual__(): - if not HAS_JSONSCHEMA: - return False, 'Execution module did not load: jsonschema not found' - if not HAS_PYVMOMI: - return False, 'Execution module did not load: pyVmomi not found' - - # We check the supported vim versions to infer the pyVmomi version - if 'vim25/6.0' in VmomiSupport.versionMap and \ - sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): - - return False, ('Execution module did not load: Incompatible versions ' - 'of Python and pyVmomi present. See Issue #29537.') return __virtualname__ From 152ce0b691bf54d2e52a125488379a5cf8f04677 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 18:08:43 -0400 Subject: [PATCH 332/348] Added salt.states.esxi additional imports and pyVmomi/python compatibility check --- salt/states/esxi.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 12240422e4d..fa6ba13df39 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -90,20 +90,46 @@ ESXi Proxy Minion, please refer to the configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. - ''' # Import Python Libs from __future__ import absolute_import import logging +import sys +import re # Import Salt Libs from salt.ext import six import salt.utils.files -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, InvalidConfigError, \ + VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError +from salt.utils.decorators import depends +from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ + HostCacheSchema + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) +try: + from pyVmomi import vim, vmodl, VmomiSupport + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + log.error('pyVmomi not loaded: Incompatible versions ' + 'of Python. See Issue #29537.') + raise ImportError() + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + def __virtual__(): return 'esxi.cmd' in __salt__ From 1423d9dfb999cf480a5976056a790267ce7e4e35 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 18:10:38 -0400 Subject: [PATCH 333/348] Added diskgroups_configured state that configures VSAN diskgroups on ESXi hosts --- salt/states/esxi.py | 276 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index fa6ba13df39..208af27a8fc 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -1024,6 +1024,282 @@ def syslog_configured(name, return ret +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +def diskgroups_configured(name, diskgroups, erase_disks=False): + ''' + Configures the disk groups to use for vsan. + + It will do the following: + (1) checks for if all disks in the diskgroup spec exist and errors if they + don't + (2) creates diskgroups with the correct disk configurations if diskgroup + (identified by the cache disk canonical name) doesn't exist + (3) adds extra capacity disks to the existing diskgroup + + State input example + ------------------- + + .. code:: python + + { + 'cache_scsi_addr': 'vmhba1:C0:T0:L0', + 'capacity_scsi_addrs': [ + 'vmhba2:C0:T0:L0', + 'vmhba3:C0:T0:L0', + 'vmhba4:C0:T0:L0', + ] + } + + name + Mandatory state name. + + diskgroups + Disk group representation containing scsi disk addresses. + Scsi addresses are expected for disks in the diskgroup: + + erase_disks + Specifies whether to erase all partitions on all disks member of the + disk group before the disk group is created. Default vaule is False. + ''' + proxy_details = __salt__['esxi.get_details']() + hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ + else proxy_details['esxi_host'] + log.info('Running state {0} for host \'{1}\''.format(name, hostname)) + # Variable used to return the result of the invocation + ret = {'name': name, 'result': None, 'changes': {}, + 'pchanges': {}, 'comments': None} + # Signals if errors have been encountered + errors = False + # Signals if changes are required + changes = False + comments = [] + diskgroup_changes = {} + si = None + try: + log.trace('Validating diskgroups_configured input') + schema = DiskGroupsDiskScsiAddressSchema.serialize() + try: + jsonschema.validate({'diskgroups': diskgroups, + 'erase_disks': erase_disks}, schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + si = __salt__['vsphere.get_service_instance_via_proxy']() + host_disks = __salt__['vsphere.list_disks'](service_instance=si) + if not host_disks: + raise VMwareObjectRetrievalError( + 'No disks retrieved from host \'{0}\''.format(hostname)) + scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} + log.trace('scsi_addr_to_disk_map = {0}'.format(scsi_addr_to_disk_map)) + existing_diskgroups = \ + __salt__['vsphere.list_diskgroups'](service_instance=si) + cache_disk_to_existing_diskgroup_map = \ + {dg['cache_disk']: dg for dg in existing_diskgroups} + except CommandExecutionError as err: + log.error('Error: {0}'.format(err)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False if not __opts__['test'] else None, + 'comment': str(err)}) + return ret + + # Iterate through all of the disk groups + for idx, dg in enumerate(diskgroups): + # Check for cache disk + if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: + comments.append('No cache disk with scsi address \'{0}\' was ' + 'found.'.format(dg['cache_scsi_addr'])) + log.error(comments[-1]) + errors = True + continue + + # Check for capacity disks + cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] + cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], + cache_disk_id) + bad_scsi_addrs = [] + capacity_disk_ids = [] + capacity_disk_displays = [] + for scsi_addr in dg['capacity_scsi_addrs']: + if not scsi_addr in scsi_addr_to_disk_map: + bad_scsi_addrs.append(scsi_addr) + continue + capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) + capacity_disk_displays.append( + '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) + if bad_scsi_addrs: + comments.append('Error in diskgroup #{0}: capacity disks with ' + 'scsi addresses {1} were not found.' + ''.format(idx, + ', '.join(['\'{0}\''.format(a) + for a in bad_scsi_addrs]))) + log.error(comments[-1]) + errors = True + continue + + if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): + # A new diskgroup needs to be created + log.trace('erase_disks = {0}'.format(erase_disks)) + if erase_disks: + if __opts__['test']: + comments.append('State {0} will ' + 'erase all disks of disk group #{1}; ' + 'cache disk: \'{2}\', ' + 'capacity disk(s): {3}.' + ''.format(name, idx, cache_disk_display, + ', '.join( + ['\'{}\''.format(a) for a in + capacity_disk_displays]))) + else: + # Erase disk group disks + for disk_id in ([cache_disk_id] + capacity_disk_ids): + __salt__['vsphere.erase_disk_partitions']( + disk_id=disk_id, service_instance=si) + comments.append('Erased disks of diskgroup #{0}; ' + 'cache disk: \'{1}\', capacity disk(s): ' + '{2}'.format( + idx, cache_disk_display, + ', '.join(['\'{0}\''.format(a) for a in + capacity_disk_displays]))) + log.info(comments[-1]) + + if __opts__['test']: + comments.append('State {0} will create ' + 'the disk group #{1}; cache disk: \'{2}\', ' + 'capacity disk(s): {3}.' + .format(name, idx, cache_disk_display, + ', '.join(['\'{0}\''.format(a) for a in + capacity_disk_displays]))) + log.info(comments[-1]) + changes = True + continue + try: + __salt__['vsphere.create_diskgroup'](cache_disk_id, + capacity_disk_ids, + safety_checks=False, + service_instance=si) + except VMwareSaltError as err: + comments.append('Error creating disk group #{0}: ' + '{1}.'.format(idx, err)) + log.error(comments[-1]) + errors = True + continue + + comments.append('Created disk group #\'{0}\'.'.format(idx)) + log.info(comments[-1]) + diskgroup_changes[str(idx)] = \ + {'new': {'cache': cache_disk_display, + 'capacity': capacity_disk_displays}} + changes = True + continue + + # The diskgroup exists; checking the capacity disks + log.debug('Disk group #{0} exists. Checking capacity disks: ' + '{1}.'.format(idx, capacity_disk_displays)) + existing_diskgroup = \ + cache_disk_to_existing_diskgroup_map.get(cache_disk_id) + existing_capacity_disk_displays = \ + ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0], disk_id) + for disk_id in existing_diskgroup['capacity_disks']] + # Populate added disks and removed disks and their displays + added_capacity_disk_ids = [] + added_capacity_disk_displays = [] + removed_capacity_disk_ids = [] + removed_capacity_disk_displays = [] + for disk_id in capacity_disk_ids: + if disk_id not in existing_diskgroup['capacity_disks']: + disk_scsi_addr = [d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0] + added_capacity_disk_ids.append(disk_id) + added_capacity_disk_displays.append( + '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) + for disk_id in existing_diskgroup['capacity_disks']: + if disk_id not in capacity_disk_ids: + disk_scsi_addr = [d['scsi_address'] for d in host_disks + if d['id'] == disk_id][0] + removed_capacity_disk_ids.append(disk_id) + removed_capacity_disk_displays.append( + '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) + + log.debug('Disk group #{0}: existing capacity disk ids: {1}; added ' + 'capacity disk ids: {2}; removed capacity disk ids: {3}' + ''.format(idx, existing_capacity_disk_displays, + added_capacity_disk_displays, + removed_capacity_disk_displays)) + + #TODO revisit this when removing capacity disks is supported + if removed_capacity_disk_ids: + comments.append( + 'Error removing capacity disk(s) {0} from disk group #{1}; ' + 'operation is not supported.' + ''.format(', '.join(['\'{0}\''.format(id) for id in + removed_capacity_disk_displays]), idx)) + log.error(comments[-1]) + errors = True + continue + + if added_capacity_disk_ids: + # Capacity disks need to be added to disk group + + # Building a string representation of the capacity disks + # that need to be added + s = ', '.join(['\'{0}\''.format(id) for id in + added_capacity_disk_displays]) + if __opts__['test']: + comments.append('State {0} will add ' + 'capacity disk(s) {1} to disk group #{2}.' + ''.format(name, s, idx)) + log.info(comments[-1]) + changes = True + continue + try: + __salt__['vsphere.add_capacity_to_diskgroup']( + cache_disk_id, + added_capacity_disk_ids, + safety_checks=False, + service_instance=si) + except VMwareSaltError as err: + comments.append('Error adding capacity disk(s) {0} to ' + 'disk group #{1}: {2}.'.format(s, idx, err)) + log.error(comments[-1]) + errors = True + continue + + com = ('Added capacity disk(s) {0} to disk group #{1}' + ''.format(s, idx)) + log.info(com) + comments.append(com) + diskgroup_changes[str(idx)] = \ + {'new': {'cache': cache_disk_display, + 'capacity': capacity_disk_displays}, + 'old': {'cache': cache_disk_display, + 'capacity': existing_capacity_disk_displays}} + changes = True + continue + + # No capacity needs to be added + s = ('Disk group #{0} is correctly configured. Nothing to be done.' + ''.format(idx)) + log.info(s) + comments.append(s) + __salt__['vsphere.disconnect'](si) + + #Build the final return message + result = (True if not (changes or errors) else # no changes/errors + None if __opts__['test'] else # running in test mode + False if errors else True) # found errors; defaults to True + ret.update({'result': result, + 'comment': '\n'.join(comments)}) + if changes: + if __opts__['test']: + ret['pchanges'] = diskgroup_changes + elif changes: + ret['changes'] = diskgroup_changes + return ret + + def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from From c1d36f53c2594d88dbb7efcc24c4ffb1d16d5d63 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 25 Sep 2017 18:11:47 -0400 Subject: [PATCH 334/348] Added host_cache_configured state that configures the host cache on ESXi hosts --- salt/states/esxi.py | 298 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 298 insertions(+) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 208af27a8fc..c94daef37b3 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -1300,6 +1300,304 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): return ret +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +def host_cache_configured(name, enabled, datastore, swap_size='100%', + dedicated_backing_disk=False, + erase_backing_disk=False): + ''' + Configures the host cache used for swapping. + + It will do the following: + (1) checks if backing disk exists + (2) creates the VMFS datastore if doesn't exist (datastore partition will + be created and use the entire disk + (3) raises an error if dedicated_backing_disk is True and partitions + already exist on the backing disk + (4) configures host_cache to use a portion of the datastore for caching + (either a specific size or a percentage of the datastore) + + State input examples + -------------------- + + Percentage swap size (can't be 100%) + + .. code:: python + + { + 'enabled': true, + 'datastore': { + 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', + 'vmfs_version': 5, + 'name': 'hostcache' + } + 'dedicated_backing_disk': false + 'swap_size': '98%', + } + + + .. code:: python + + Fixed sized swap size + + { + 'enabled': true, + 'datastore': { + 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', + 'vmfs_version': 5, + 'name': 'hostcache' + } + 'dedicated_backing_disk': true + 'swap_size': '10GiB', + } + + name + Mandatory state name. + + enabled + Specifies whether the host cache is enabled. + + datastore + Specifies the host cache datastore. + + swap_size + Specifies the size of the host cache swap. Can be a percentage or a + value in GiB. Default value is ``100%``. + + dedicated_backing_disk + Specifies whether the backing disk is dedicated to the host cache which + means it must have no other partitions. Default is False + + erase_backing_disk + Specifies whether to erase all partitions on the backing disk before + the datastore is created. Default vaule is False. + ''' + log.trace('enabled = {0}'.format(enabled)) + log.trace('datastore = {0}'.format(datastore)) + log.trace('swap_size = {0}'.format(swap_size)) + log.trace('erase_backing_disk = {0}'.format(erase_backing_disk)) + # Variable used to return the result of the invocation + proxy_details = __salt__['esxi.get_details']() + hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ + else proxy_details['esxi_host'] + log.trace('hostname = {0}'.format(hostname)) + log.info('Running host_cache_swap_configured for host ' + '\'{0}\''.format(hostname)) + ret = {'name': hostname, 'comment': 'Default comments', + 'result': None, 'changes': {}, 'pchanges': {}} + result = None if __opts__['test'] else True #We assume success + needs_setting = False + comments = [] + changes = {} + si = None + try: + log.debug('Validating host_cache_configured input') + schema = HostCacheSchema.serialize() + try: + jsonschema.validate({'enabled': enabled, + 'datastore': datastore, + 'swap_size': swap_size, + 'erase_backing_disk': erase_backing_disk}, + schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + m = re.match(r'(\d+)(%|GiB)', swap_size) + swap_size_value = int(m.group(1)) + swap_type = m.group(2) + log.trace('swap_size_value = {0}; swap_type = {1}'.format( + swap_size_value, swap_type)) + si = __salt__['vsphere.get_service_instance_via_proxy']() + host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) + + # Check enabled + if host_cache['enabled'] != enabled: + changes.update({'enabled': {'old': host_cache['enabled'], + 'new': enabled}}) + needs_setting = True + + + # Check datastores + existing_datastores = None + if host_cache.get('datastore'): + existing_datastores = \ + __salt__['vsphere.list_datastores_via_proxy']( + datastore_names=[datastore['name']], + service_instance=si) + # Retrieve backing disks + existing_disks = __salt__['vsphere.list_disks']( + scsi_addresses=[datastore['backing_disk_scsi_addr']], + service_instance=si) + if not existing_disks: + raise VMwareObjectRetrievalError( + 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' + ''.format(datastore['backing_disk_scsi_addr'], hostname)) + backing_disk = existing_disks[0] + backing_disk_display = '{0} (id:{1})'.format( + backing_disk['scsi_address'], backing_disk['id']) + log.trace('backing_disk = {0}'.format(backing_disk_display)) + + existing_datastore = None + if not existing_datastores: + # Check if disk needs to be erased + if erase_backing_disk: + if __opts__['test']: + comments.append('State {0} will erase ' + 'the backing disk \'{1}\' on host \'{2}\'.' + ''.format(name, backing_disk_display, + hostname)) + log.info(comments[-1]) + else: + # Erase disk + __salt__['vsphere.erase_disk_partitions']( + disk_id=backing_disk['id'], service_instance=si) + comments.append('Erased backing disk \'{0}\' on host ' + '\'{1}\'.'.format(backing_disk_display, + hostname)) + log.info(comments[-1]) + # Create the datastore + if __opts__['test']: + comments.append('State {0} will create ' + 'the datastore \'{1}\', with backing disk ' + '\'{2}\', on host \'{3}\'.' + ''.format(name, datastore['name'], + backing_disk_display, hostname)) + log.info(comments[-1]) + else: + if dedicated_backing_disk: + # Check backing disk doesn't already have partitions + partitions = __salt__['vsphere.list_disk_partitions']( + disk_id=backing_disk['id'], service_instance=si) + log.trace('partitions = {0}'.format(partitions)) + # We will ignore the mbr partitions + non_mbr_partitions = [p for p in partitions + if p['format'] != 'mbr'] + if len(non_mbr_partitions) > 0: + raise VMwareApiError( + 'Backing disk \'{0}\' has unexpected partitions' + ''.format(backing_disk_display)) + __salt__['vsphere.create_vmfs_datastore']( + datastore['name'], existing_disks[0]['id'], + datastore['vmfs_version'], service_instance=si) + comments.append('Created vmfs datastore \'{0}\', backed by ' + 'disk \'{1}\', on host \'{2}\'.' + ''.format(datastore['name'], + backing_disk_display, hostname)) + log.info(comments[-1]) + changes.update( + {'datastore': + {'new': {'name': datastore['name'], + 'backing_disk': backing_disk_display}}}) + existing_datastore = \ + __salt__['vsphere.list_datastores_via_proxy']( + datastore_names=[datastore['name']], + service_instance=si)[0] + needs_setting = True + else: + # Check datastore is backed by the correct disk + if not existing_datastores[0].get('backing_disk_ids'): + raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' + 'backing disk' + ''.format(datastore['name'])) + if backing_disk['id'] not in \ + existing_datastores[0]['backing_disk_ids']: + + raise VMwareSaltError( + 'Datastore \'{0}\' is not backed by the correct disk: ' + 'expected \'{1}\'; got {2}' + ''.format( + datastore['name'], backing_disk['id'], + ', '.join( + ['\'{0}\''.format(disk) for disk in + existing_datastores[0]['backing_disk_ids']]))) + + comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' + 'and is backed by disk \'{2}\'. Nothing to be ' + 'done.'.format(datastore['name'], hostname, + backing_disk_display)) + existing_datastore = existing_datastores[0] + log.trace('existing_datastore = {0}'.format(existing_datastore)) + log.info(comments[-1]) + + + if existing_datastore: + # The following comparisons can be done if the existing_datastore + # is set; it may not be set if running in test mode + # + # We support percent, as well as MiB, we will convert the size + # to MiB, multiples of 1024 (VMware SDK limitation) + if swap_type == '%': + # Percentage swap size + # Convert from bytes to MiB + raw_size_MiB = (swap_size_value/100.0) * \ + (existing_datastore['capacity']/1024/1024) + else: + raw_size_MiB = swap_size_value * 1024 + log.trace('raw_size = {0}MiB'.format(raw_size_MiB)) + swap_size_MiB= int(raw_size_MiB/1024)*1024 + log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB)) + existing_swap_size_MiB = 0 + m = re.match('(\d+)MiB', host_cache.get('swap_size')) if \ + host_cache.get('swap_size') else None + if m: + # if swap_size from the host is set and has an expected value + # we are going to parse it to get the number of MiBs + existing_swap_size_MiB = int(m.group(1)) + if not (existing_swap_size_MiB == swap_size_MiB): + needs_setting = True + changes.update( + {'swap_size': + {'old': '{}GiB'.format(existing_swap_size_MiB/1024), + 'new': '{}GiB'.format(swap_size_MiB/1024)}}) + + + if needs_setting: + if __opts__['test']: + comments.append('State {0} will configure ' + 'the host cache on host \'{1}\' to: {2}.' + ''.format(name, hostname, + {'enabled': enabled, + 'datastore_name': datastore['name'], + 'swap_size': swap_size})) + else: + if (existing_datastore['capacity'] / 1024.0**2) < \ + swap_size_MiB: + + raise ArgumentValueError( + 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' + 'smaller than the required swap size ({2} MiB)' + ''.format(existing_datastore['name'], + existing_datastore['capacity'] / 1024.0**2, + swap_size_MiB)) + __salt__['vsphere.configure_host_cache']( + enabled, + datastore['name'], + swap_size_MiB=swap_size_MiB, + service_instance=si) + comments.append('Host cache configured on host ' + '\'{0}\'.'.format(hostname)) + else: + comments.append('Host cache on host \'{0}\' is already correctly ' + 'configured. Nothing to be done.'.format(hostname)) + result = True + __salt__['vsphere.disconnect'](si) + log.info(comments[-1]) + ret.update({'comment': '\n'.join(comments), + 'result': result}) + if __opts__['test']: + ret['pchanges'] = changes + else: + ret['changes'] = changes + return ret + except CommandExecutionError as err: + log.error('Error: {0}.'.format(err)) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False if not __opts__['test'] else None, + 'comment': '{}.'.format(err)}) + return ret + + def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from From ac3a3bdda50435267bb0e8c84af0e7ec3315145a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 17:25:29 -0400 Subject: [PATCH 335/348] Added VMwareObjectExistsError exception --- salt/exceptions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/exceptions.py b/salt/exceptions.py index db93362c0f8..7215112ea33 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -442,6 +442,12 @@ class VMwareObjectRetrievalError(VMwareSaltError): ''' +class VMwareObjectExistsError(VMwareSaltError): + ''' + Used when a VMware object exists + ''' + + class VMwareObjectNotFoundError(VMwareSaltError): ''' Used when a VMware object was not found From 951d43e0a9e6d7663563d0de7283305a6e165fef Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 17:28:06 -0400 Subject: [PATCH 336/348] pylint --- salt/config/schemas/esxi.py | 11 ++++------- salt/modules/vsphere.py | 12 ++++++------ salt/proxy/esxi.py | 20 +++++++++++--------- salt/states/esxi.py | 22 ++++++++++------------ salt/utils/vmware.py | 11 ++++++----- salt/utils/vsan.py | 5 +++-- 6 files changed, 40 insertions(+), 41 deletions(-) diff --git a/salt/config/schemas/esxi.py b/salt/config/schemas/esxi.py index 5c8b7596c3f..4520321c36a 100644 --- a/salt/config/schemas/esxi.py +++ b/salt/config/schemas/esxi.py @@ -17,7 +17,6 @@ from salt.utils.schema import (DefinitionsSchema, Schema, ComplexSchemaItem, ArrayItem, - DictItem, IntegerItem, BooleanItem, StringItem, @@ -36,7 +35,6 @@ class DiskGroupDiskScsiAddressItem(ComplexSchemaItem): title = 'Diskgroup Disk Scsi Address Item' description = 'ESXi host diskgroup item containing disk SCSI addresses' - cache_scsi_addr = VMwareScsiAddressItem( title='Cache Disk Scsi Address', description='Specifies the SCSI address of the cache disk', @@ -57,7 +55,6 @@ class DiskGroupDiskIdItem(ComplexSchemaItem): title = 'Diskgroup Disk Id Item' description = 'ESXi host diskgroup item containing disk ids' - cache_id = StringItem( title='Cache Disk Id', description='Specifies the id of the cache disk', @@ -80,7 +77,7 @@ class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema): diskgroups = ArrayItem( title='Diskgroups', description='List of diskgroups in an ESXi host', - min_items = 1, + min_items=1, items=DiskGroupDiskScsiAddressItem(), required=True) erase_disks = BooleanItem( @@ -98,7 +95,7 @@ class DiskGroupsDiskIdSchema(DefinitionsSchema): diskgroups = ArrayItem( title='DiskGroups', description='List of disk groups in an ESXi host', - min_items = 1, + min_items=1, items=DiskGroupDiskIdItem(), required=True) @@ -207,8 +204,8 @@ class EsxiProxySchema(Schema): additional_properties = False proxytype = StringItem(required=True, enum=['esxi']) - host = StringItem(pattern=r'[^\s]+') # Used when connecting directly - vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter + host = StringItem(pattern=r'[^\s]+') # Used when connecting directly + vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter esxi_host = StringItem() username = StringItem() passwords = ArrayItem(min_items=1, diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 21b5426445d..aad667d124d 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -180,7 +180,7 @@ import salt.utils.vsan import salt.utils.pbm from salt.exceptions import CommandExecutionError, VMwareSaltError, \ ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \ - VMwareApiError, InvalidEntityError + VMwareApiError, InvalidEntityError, VMwareObjectExistsError from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ ESXClusterEntitySchema @@ -5992,7 +5992,7 @@ def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None): host_ref, hostname=hostname) canonical_name_to_scsi_address = { lun.canonicalName: scsi_addr - for scsi_addr, lun in scsi_address_to_lun.iteritems()} + for scsi_addr, lun in six.iteritems(scsi_address_to_lun)} for d in salt.utils.vmware.get_disks(host_ref, disk_ids, scsi_addresses, get_all_disks): ret_list.append({'id': d.canonicalName, @@ -6052,7 +6052,7 @@ def erase_disk_partitions(disk_id=None, scsi_address=None, host_ref, disk_id, hostname=hostname) log.info('Erased disk partitions on disk \'{0}\' on host \'{1}\'' - ''.format(disk_id, esxi_host)) + ''.format(disk_id, hostname)) return True @@ -6220,7 +6220,7 @@ def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True, for id in disk_ids: if not [d for d in disks if d.canonicalName == id]: raise VMwareObjectRetrievalError( - 'No disk with id \'{0}\' was found in ESXi host \'{0}\'' + 'No disk with id \'{0}\' was found in ESXi host \'{1}\'' ''.format(id, hostname)) cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0] capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids] @@ -6287,7 +6287,7 @@ def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids, if not diskgroups: raise VMwareObjectRetrievalError( 'No diskgroup with cache disk id \'{0}\' was found in ESXi ' - 'host \'{1}\''.format(cache_disk_id, esxi_host)) + 'host \'{1}\''.format(cache_disk_id, hostname)) vsan_disk_mgmt_system = \ salt.utils.vsan.get_vsan_disk_management_system(service_instance) salt.utils.vsan.add_capacity_to_diskgroup(service_instance, @@ -6490,7 +6490,7 @@ def configure_host_cache(enabled, datastore=None, swap_size_MiB=None, if not ds_refs: raise VMwareObjectRetrievalError( 'Datastore \'{0}\' was not found on host ' - '\'{1}\''.format(datastore_name, hostname)) + '\'{1}\''.format(datastore, hostname)) ds_ref = ds_refs[0] salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB) return True diff --git a/salt/proxy/esxi.py b/salt/proxy/esxi.py index f358a710da0..c1131d4dfd6 100644 --- a/salt/proxy/esxi.py +++ b/salt/proxy/esxi.py @@ -276,7 +276,7 @@ import logging import os # Import Salt Libs -from salt.exceptions import SaltSystemExit +from salt.exceptions import SaltSystemExit, InvalidConfigError from salt.config.schemas.esxi import EsxiProxySchema from salt.utils.dictupdate import merge @@ -300,6 +300,7 @@ log = logging.getLogger(__file__) # Define the module's virtual name __virtualname__ = 'esxi' + def __virtual__(): ''' Only load if the ESXi execution module is available. @@ -309,6 +310,7 @@ def __virtual__(): return False, 'The ESXi Proxy Minion module did not load.' + def init(opts): ''' This function gets called when the proxy starts up. For @@ -325,7 +327,7 @@ def init(opts): try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: - raise excs.InvalidProxyInputError(exc) + raise InvalidConfigError(exc) DETAILS['proxytype'] = proxy_conf['proxytype'] if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf): @@ -345,7 +347,7 @@ def init(opts): # Get the correct login details try: username, password = find_credentials(host) - except excs.SaltSystemExit as err: + except SaltSystemExit as err: log.critical('Error: {0}'.format(err)) return False @@ -366,7 +368,7 @@ def init(opts): if 'mechanism' not in proxy_conf: log.critical('No \'mechanism\' key found in pillar for this proxy.') return False - mechanism = proxy_conf['mechanism'] + mechanism = proxy_conf['mechanism'] # Save mandatory fields in cache for key in ('vcenter', 'mechanism'): DETAILS[key] = proxy_conf[key] @@ -376,7 +378,7 @@ def init(opts): log.critical('No \'username\' key found in pillar for this ' 'proxy.') return False - if not 'passwords' in proxy_conf and \ + if 'passwords' not in proxy_conf and \ len(proxy_conf['passwords']) > 0: log.critical('Mechanism is set to \'userpass\' , but no ' @@ -386,11 +388,11 @@ def init(opts): for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] elif mechanism == 'sspi': - if not 'domain' in proxy_conf: + if 'domain' not in proxy_conf: log.critical('Mechanism is set to \'sspi\' , but no ' '\'domain\' key found in pillar for this proxy.') return False - if not 'principal' in proxy_conf: + if 'principal' not in proxy_conf: log.critical('Mechanism is set to \'sspi\' , but no ' '\'principal\' key found in pillar for this ' 'proxy.') @@ -405,7 +407,7 @@ def init(opts): try: username, password = find_credentials() DETAILS['password'] = password - except excs.SaltSystemExit as err: + except SaltSystemExit as err: log.critical('Error: {0}'.format(err)) return False @@ -456,7 +458,7 @@ def ping(): __salt__['vsphere.system_info'](host=DETAILS['host'], username=DETAILS['username'], password=DETAILS['password']) - except excs.SaltSystemExit as err: + except SaltSystemExit as err: log.warning(err) return False return True diff --git a/salt/states/esxi.py b/salt/states/esxi.py index c94daef37b3..337ef1d7f53 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -117,7 +117,7 @@ except ImportError: log = logging.getLogger(__name__) try: - from pyVmomi import vim, vmodl, VmomiSupport + from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ @@ -1122,7 +1122,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: - if not scsi_addr in scsi_addr_to_disk_map: + if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) @@ -1153,7 +1153,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): capacity_disk_displays]))) else: # Erase disk group disks - for disk_id in ([cache_disk_id] + capacity_disk_ids): + for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' @@ -1287,9 +1287,9 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): __salt__['vsphere.disconnect'](si) #Build the final return message - result = (True if not (changes or errors) else # no changes/errors - None if __opts__['test'] else # running in test mode - False if errors else True) # found errors; defaults to True + result = (True if not (changes or errors) else # no changes/errors + None if __opts__['test'] else # running in test mode + False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments)}) if changes: @@ -1385,7 +1385,7 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', '\'{0}\''.format(hostname)) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}, 'pchanges': {}} - result = None if __opts__['test'] else True #We assume success + result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} @@ -1518,7 +1518,6 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', log.trace('existing_datastore = {0}'.format(existing_datastore)) log.info(comments[-1]) - if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode @@ -1533,23 +1532,22 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = {0}MiB'.format(raw_size_MiB)) - swap_size_MiB= int(raw_size_MiB/1024)*1024 + swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB)) existing_swap_size_MiB = 0 - m = re.match('(\d+)MiB', host_cache.get('swap_size')) if \ + m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) - if not (existing_swap_size_MiB == swap_size_MiB): + if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) - if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index f2fbf43f593..45dd46a4ac7 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2471,7 +2471,7 @@ def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in - lun_ids_to_scsi_addr_map.iteritems()} + lun_ids_to_six.iteritems(scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, @@ -2513,8 +2513,9 @@ def get_disks(host_ref, disk_ids=None, scsi_addresses=None, lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref, storage_system, hostname) - disk_keys = [key for scsi_addr, key in lun_key_by_scsi_addr.iteritems() - if scsi_addr in scsi_addresses] + disk_keys = [key for scsi_addr, key + in six.iteritems(lun_key_by_scsi_addr) + if scsi_addr in scsi_addresses] log.trace('disk_keys based on scsi_addresses = {0}'.format(disk_keys)) scsi_luns = get_all_luns(host_ref, storage_system) @@ -2695,8 +2696,8 @@ def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] - disk_groups = [dm for dm in vsan_disk_mappings if \ - (get_all_disk_groups or \ + disk_groups = [dm for dm in vsan_disk_mappings if + (get_all_disk_groups or (dm.ssd.canonicalName in cache_disk_ids))] log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : ' '{1}'.format(hostname, diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index b2ec11f80d9..4e124c9f6f4 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -49,7 +49,8 @@ import logging import ssl # Import Salt Libs -from salt.exceptions import VMwareApiError, VMwareRuntimeError +from salt.exceptions import VMwareApiError, VMwareRuntimeError, \ + VMwareObjectRetrievalError import salt.utils.vmware try: @@ -282,7 +283,7 @@ def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system, spec.host = host_ref try: task = vsan_disk_mgmt_system.InitializeDiskMappings(spec) - except fault.NoPermission as exc: + except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) From 2e2b01e57cdfbf0a561b20a7383750d78126cefb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 17:29:01 -0400 Subject: [PATCH 337/348] Fix logic to list hosts --- salt/modules/vsphere.py | 2 +- salt/utils/vmware.py | 24 +++++++++++++----------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index aad667d124d..c88485d5551 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -5941,7 +5941,7 @@ def list_hosts_via_proxy(hostnames=None, datacenter=None, raise salt.exceptions.ArgumentValueError( 'Datacenter is required when cluster is specified') get_all_hosts = False - if not hostnames and not datacenter and not cluster: + if not hostnames: get_all_hosts = True hosts = salt.utils.vmware.get_hosts(service_instance, datacenter_name=datacenter, diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 45dd46a4ac7..532312b59b0 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2290,20 +2290,21 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None, Default value is False. ''' properties = ['name'] + if cluster_name and not datacenter_name: + raise salt.exceptions.ArgumentValueError( + 'Must specify the datacenter when specifying the cluster') if not host_names: host_names = [] - if get_all_hosts or not datacenter_name: + if not datacenter_name: # Assume the root folder is the starting point start_point = get_root_folder(service_instance) else: + start_point = get_datacenter(service_instance, datacenter_name) if cluster_name: + # Retrieval to test if cluster exists. Cluster existence only makes + # sense if the datacenter has been specified + cluster = get_cluster(start_point, cluster_name) properties.append('parent') - if datacenter_name: - start_point = get_datacenter(service_instance, datacenter_name) - if cluster_name: - # Retrieval to test if cluster exists. Cluster existence only makes - # sense if the cluster has been specified - cluster = get_cluster(start_point, cluster_name) # Search for the objects hosts = get_mors_with_properties(service_instance, @@ -2316,16 +2317,17 @@ def get_hosts(service_instance, datacenter_name=None, host_names=None, # Complex conditions checking if a host should be added to the # filtered list (either due to its name and/or cluster membership) - if get_all_hosts: - filtered_hosts.append(h['object']) - continue - if cluster_name: if not isinstance(h['parent'], vim.ClusterComputeResource): continue parent_name = get_managed_object_name(h['parent']) if parent_name != cluster_name: continue + + if get_all_hosts: + filtered_hosts.append(h['object']) + continue + if h['name'] in host_names: filtered_hosts.append(h['object']) return filtered_hosts From adfa462c05018648bf4077e98dc07c2cb00297f4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 17:29:36 -0400 Subject: [PATCH 338/348] Fixed tests for salt.utils.vmware.get_hosts --- tests/unit/utils/vmware/test_host.py | 43 ++++++++++++++++------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/tests/unit/utils/vmware/test_host.py b/tests/unit/utils/vmware/test_host.py index bd28c70f61c..5a6319e6c89 100644 --- a/tests/unit/utils/vmware/test_host.py +++ b/tests/unit/utils/vmware/test_host.py @@ -14,6 +14,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock # Import Salt libraries +from salt.exceptions import ArgumentValueError import salt.utils.vmware # Import Third Party Libs try: @@ -46,14 +47,23 @@ class GetHostsTestCase(TestCase): self.mock_host1, self.mock_host2, self.mock_host3 = MagicMock(), \ MagicMock(), MagicMock() self.mock_prop_host1 = {'name': 'fake_hostname1', - 'object': self.mock_host1} + 'object': self.mock_host1} self.mock_prop_host2 = {'name': 'fake_hostname2', - 'object': self.mock_host2} + 'object': self.mock_host2} self.mock_prop_host3 = {'name': 'fake_hostname3', - 'object': self.mock_host3} + 'object': self.mock_host3} self.mock_prop_hosts = [self.mock_prop_host1, self.mock_prop_host2, self.mock_prop_host3] + def test_cluster_no_datacenter(self): + with self.assertRaises(ArgumentValueError) as excinfo: + salt.utils.vmware.get_hosts(self.mock_si, + cluster_name='fake_cluster') + self.assertEqual(excinfo.exception.strerror, + 'Must specify the datacenter when specifying the ' + 'cluster') + + def test_get_si_no_datacenter_no_cluster(self): mock_get_mors = MagicMock() mock_get_root_folder = MagicMock(return_value=self.mock_root_folder) @@ -124,23 +134,20 @@ class GetHostsTestCase(TestCase): self.assertEqual(res, []) def test_filter_cluster(self): - cluster1 = vim.ClusterComputeResource('fake_good_cluster') - cluster2 = vim.ClusterComputeResource('fake_bad_cluster') - # Mock cluster1.name and cluster2.name - cluster1._stub = MagicMock(InvokeAccessor=MagicMock( - return_value='fake_good_cluster')) - cluster2._stub = MagicMock(InvokeAccessor=MagicMock( - return_value='fake_bad_cluster')) - self.mock_prop_host1['parent'] = cluster2 - self.mock_prop_host2['parent'] = cluster1 - self.mock_prop_host3['parent'] = cluster1 + self.mock_prop_host1['parent'] = vim.ClusterComputeResource('cluster') + self.mock_prop_host2['parent'] = vim.ClusterComputeResource('cluster') + self.mock_prop_host3['parent'] = vim.Datacenter('dc') + mock_get_cl_name = MagicMock( + side_effect=['fake_bad_cluster', 'fake_good_cluster']) with patch('salt.utils.vmware.get_mors_with_properties', MagicMock(return_value=self.mock_prop_hosts)): - res = salt.utils.vmware.get_hosts(self.mock_si, - datacenter_name='fake_datacenter', - cluster_name='fake_good_cluster', - get_all_hosts=True) - self.assertEqual(res, [self.mock_host2, self.mock_host3]) + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_cl_name): + res = salt.utils.vmware.get_hosts( + self.mock_si, datacenter_name='fake_datacenter', + cluster_name='fake_good_cluster', get_all_hosts=True) + self.assertEqual(mock_get_cl_name.call_count, 2) + self.assertEqual(res, [self.mock_host2]) def test_no_hosts(self): with patch('salt.utils.vmware.get_mors_with_properties', From 90a174c915fd04ae391c83042b3772059e57e435 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Tue, 26 Sep 2017 20:22:26 -0400 Subject: [PATCH 339/348] more pylint --- salt/proxy/esxi.py | 2 +- salt/states/esxi.py | 4 ++-- salt/utils/vmware.py | 8 ++++---- tests/unit/utils/vmware/test_host.py | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/salt/proxy/esxi.py b/salt/proxy/esxi.py index c1131d4dfd6..1599c381c67 100644 --- a/salt/proxy/esxi.py +++ b/salt/proxy/esxi.py @@ -405,7 +405,7 @@ def init(opts): log.debug('Retrieving credentials and testing vCenter connection' ' for mehchanism \'userpass\'') try: - username, password = find_credentials() + username, password = find_credentials(DETAILS['vcenter']) DETAILS['password'] = password except SaltSystemExit as err: log.critical('Error: {0}'.format(err)) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 337ef1d7f53..3d723abce14 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -101,7 +101,8 @@ import re from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ - VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError + VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ + ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema @@ -1415,7 +1416,6 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', 'new': enabled}}) needs_setting = True - # Check datastores existing_datastores = None if host_cache.get('datastore'): diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 532312b59b0..68ff6ca7227 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -2473,7 +2473,7 @@ def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None): luns_to_key_map = {d.key: d for d in get_all_luns(host_ref, storage_system, hostname)} return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in - lun_ids_to_six.iteritems(scsi_addr_map)} + six.iteritems(lun_ids_to_scsi_addr_map)} def get_disks(host_ref, disk_ids=None, scsi_addresses=None, @@ -2698,9 +2698,9 @@ def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False): vsan_disk_mappings = vsan_storage_info.diskMapping if not vsan_disk_mappings: return [] - disk_groups = [dm for dm in vsan_disk_mappings if - (get_all_disk_groups or - (dm.ssd.canonicalName in cache_disk_ids))] + disk_groups = [dm for dm in vsan_disk_mappings if + (get_all_disk_groups or + (dm.ssd.canonicalName in cache_disk_ids))] log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : ' '{1}'.format(hostname, [d.ssd.canonicalName for d in disk_groups])) diff --git a/tests/unit/utils/vmware/test_host.py b/tests/unit/utils/vmware/test_host.py index 5a6319e6c89..0f6965fb7c2 100644 --- a/tests/unit/utils/vmware/test_host.py +++ b/tests/unit/utils/vmware/test_host.py @@ -63,7 +63,6 @@ class GetHostsTestCase(TestCase): 'Must specify the datacenter when specifying the ' 'cluster') - def test_get_si_no_datacenter_no_cluster(self): mock_get_mors = MagicMock() mock_get_root_folder = MagicMock(return_value=self.mock_root_folder) From 1a12d5cb30efb89bacd455fa052aa41aad1581b6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 28 Sep 2017 06:29:55 -0400 Subject: [PATCH 340/348] Added salt.pillar.extra_minion_data_in_pillar that adds any extra minion data into the pillar --- salt/pillar/extra_minion_data_in_pillar.py | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 salt/pillar/extra_minion_data_in_pillar.py diff --git a/salt/pillar/extra_minion_data_in_pillar.py b/salt/pillar/extra_minion_data_in_pillar.py new file mode 100644 index 00000000000..2dad741c66d --- /dev/null +++ b/salt/pillar/extra_minion_data_in_pillar.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +''' +Add all extra minion data to the pillar. + +:codeauthor: Alexandru.Bleotu@morganstanley.ms.com + +One can filter on the keys to include in the pillar by using the ``include`` +parameter. For subkeys the ':' notation is supported (i.e. 'key:subkey') +The keyword ```` includes all keys. + +Complete example in etc/salt/master +===================================== + +.. code-block:: yaml + + ext_pillar: + - extra_minion_data_in_pillar: + include: + + ext_pillar: + - extra_minion_data_in_pillar: + include: + - key1 + - key2:subkey2 +''' + + +from __future__ import absolute_import +import os +import logging + + +# Set up logging +log = logging.getLogger(__name__) + + +__virtualname__ = 'extra_minion_data_in_pillar' + +def __virtual__(): + return __virtualname__ + + +def ext_pillar(minion_id, pillar, include, extra_minion_data=None): + + def get_subtree(key, source_dict): + ''' + Returns a subtree corresponfing to the specified key. + + key + Key. Supports the ':' notation (e.g. 'key:subkey') + + source_dict + Source dictionary + ''' + ret_dict = aux_dict = {} + subtree = source_dict + subkeys = key.split(':') + # Build an empty intermediate subtree following the subkeys + for subkey in subkeys[:-1]: + # The result will be built in aux_dict + aux_dict[subkey] = {} + aux_dict = aux_dict[subkey] + if not subkey in subtree: + # The subkey is not in + return {} + subtree = subtree[subkey] + if subkeys[-1] not in subtree: + # Final subkey is not in subtree + return {} + # Assign the subtree value to the result + aux_dict[subkeys[-1]] = subtree[subkeys[-1]] + return ret_dict + + log.trace('minion_id = {0}'.format(minion_id)) + log.trace('include = {0}'.format(include)) + log.trace('extra_minion_data = {0}'.format(extra_minion_data)) + data = {} + + if not extra_minion_data: + return {} + if include == '': + return extra_minion_data + data = {} + for key in include: + data.update(get_subtree(key, extra_minion_data)) + return data From 998c4a95fa8e8d82d15757265a1558f22b52b6b4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 28 Sep 2017 06:31:35 -0400 Subject: [PATCH 341/348] Added tests for salt.pillar.extra_minion_data_in_pillar --- .../test_extra_minion_data_in_pillar.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 tests/unit/pillar/test_extra_minion_data_in_pillar.py diff --git a/tests/unit/pillar/test_extra_minion_data_in_pillar.py b/tests/unit/pillar/test_extra_minion_data_in_pillar.py new file mode 100644 index 00000000000..df4484e5e6c --- /dev/null +++ b/tests/unit/pillar/test_extra_minion_data_in_pillar.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import + +# Import Salt Testing libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock + +# Import Salt Libs +from salt.pillar import extra_minion_data_in_pillar + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ExtraMinionDataInPillarTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.pillar.extra_minion_data_in_pillar + ''' + def setup_loader_modules(self): + return { + extra_minion_data_in_pillar : { + '__virtual__': True, + } + } + + def setUp(self): + self.pillar = MagicMock() + self.extra_minion_data = {'key1': {'subkey1': 'value1'}, + 'key2': {'subkey2': {'subsubkey2': 'value2'}}, + 'key3': 'value3', + 'key4': {'subkey4': 'value4'}} + + def test_extra_values_none_or_empty(self): + ret = extra_minion_data_in_pillar.ext_pillar('fake_id', self.pillar, + 'fake_include', None) + self.assertEqual(ret, {}) + ret = extra_minion_data_in_pillar.ext_pillar('fake_id', self.pillar, + 'fake_include', {}) + self.assertEqual(ret, {}) + + def test_include_all(self): + ret = extra_minion_data_in_pillar.ext_pillar( + 'fake_id', self.pillar, '', self.extra_minion_data) + self.assertEqual(ret, self.extra_minion_data) + + def test_include_specific_keys(self): + # Tests partially existing key, key with and without subkey, + ret = extra_minion_data_in_pillar.ext_pillar( + 'fake_id', self.pillar, + include=['key1:subkey1', 'key2:subkey3', 'key3', 'key4'], + extra_minion_data=self.extra_minion_data) + self.assertEqual(ret, {'key1': {'subkey1': 'value1'}, + 'key3': 'value3', + 'key4': {'subkey4': 'value4'}}) From d862a6f3f2e52c542d1f900408801cc299636344 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 28 Sep 2017 09:52:04 -0400 Subject: [PATCH 342/348] Fix some formatting issues on the oxygen release notes page --- doc/topics/releases/oxygen.rst | 481 ++++++++++++++++++--------------- 1 file changed, 260 insertions(+), 221 deletions(-) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 4c651bfce95..bce4c56dad9 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -8,17 +8,17 @@ Comparison Operators in Package Installation -------------------------------------------- Salt now supports using comparison operators (e.g. ``>=1.2.3``) when installing -packages on minions which use :mod:`yum/dnf ` or :mod:`apt -`. This is supported both in the :py:func:`pkg.installed -` state and in the ``pkg.install`` remote execution -function. +packages on minions which use :mod:`yum/dnf ` or +:mod:`apt `. This is supported both in the +:py:func:`pkg.installed ` state and in the ``pkg.install`` +remote execution function. :ref:`Master Tops ` Changes ----------------------------------------------- -When both :ref:`Master Tops ` and a :ref:`Top File -` produce SLS matches for a given minion, the matches were being -merged in an unpredictable manner which did not preserve ordering. This has +When both :ref:`Master Tops ` and a +:ref:`Top File ` produce SLS matches for a given minion, the matches +were being merged in an unpredictable manner which did not preserve ordering. This has been changed. The top file matches now execute in the expected order, followed by any master tops matches that are not matched via a top file. @@ -55,14 +55,14 @@ New support for Cisco UCS Chassis --------------------------------- The salt proxy minion now allows for control of Cisco USC chassis. See -the `cimc` modules for details. +the ``cimc`` modules for details. New salt-ssh roster ------------------- A new roster has been added that allows users to pull in a list of hosts -for salt-ssh targeting from a ~/.ssh configuration. For full details, -please see the `sshconfig` roster. +for salt-ssh targeting from a ``~/.ssh`` configuration. For full details, +please see the ``sshconfig`` roster. New GitFS Features ------------------ @@ -149,185 +149,200 @@ check the configuration for the correct format and only load if the validation p - ``avahi_announce`` beacon Old behavior: - ``` - beacons: - avahi_announce: - run_once: True - servicetype: _demo._tcp - port: 1234 - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' - ``` + + .. code-block:: yaml + + beacons: + avahi_announce: + run_once: True + servicetype: _demo._tcp + port: 1234 + txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' New behavior: - ``` - beacons: - avahi_announce: - - run_once: True - - servicetype: _demo._tcp - - port: 1234 - - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' - ``` + + .. code-block:: yaml + + beacons: + avahi_announce: + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' - ``bonjour_announce`` beacon Old behavior: - ``` - beacons: - bonjour_announce: - run_once: True - servicetype: _demo._tcp - port: 1234 - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' - ``` + + .. code-block:: yaml + + beacons: + bonjour_announce: + run_once: True + servicetype: _demo._tcp + port: 1234 + txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' New behavior: - ``` - beacons: - bonjour_announce: - - run_once: True - - servicetype: _demo._tcp - - port: 1234 - - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' - ``` + + .. code-block:: yaml + + beacons: + bonjour_announce: + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' - ``btmp`` beacon Old behavior: - ``` - beacons: - btmp: {} - ``` + + .. code-block:: yaml + + beacons: + btmp: {} New behavior: - ``` - beacons: - btmp: [] - ``` + .. code-block:: yaml + + beacons: + btmp: [] - ``glxinfo`` beacon Old behavior: - ``` - beacons: - glxinfo: - user: frank - screen_event: True - ``` + + .. code-block:: yaml + + beacons: + glxinfo: + user: frank + screen_event: True New behavior: - ``` - beacons: - glxinfo: - - user: frank - - screen_event: True - ``` + + .. code-block:: yaml + + beacons: + glxinfo: + - user: frank + - screen_event: True - ``haproxy`` beacon Old behavior: - ``` - beacons: - haproxy: - - www-backend: - threshold: 45 - servers: + + .. code-block:: yaml + + beacons: + haproxy: + - www-backend: + threshold: 45 + servers: + - web1 + - web2 + - interval: 120 + + New behavior: + + .. code-block:: yaml + + beacons: + haproxy: + - backends: + www-backend: + threshold: 45 + servers: - web1 - web2 - interval: 120 - ``` - - New behavior: - ``` - beacons: - haproxy: - - backends: - www-backend: - threshold: 45 - servers: - - web1 - - web2 - - interval: 120 - ``` - ``inotify`` beacon Old behavior: - ``` - beacons: - inotify: - /path/to/file/or/dir: - mask: - - open - - create - - close_write - recurse: True - auto_add: True - exclude: - - /path/to/file/or/dir/exclude1 - - /path/to/file/or/dir/exclude2 - - /path/to/file/or/dir/regex[a-m]*$: - regex: True - coalesce: True - ``` + + .. code-block:: yaml + + beacons: + inotify: + /path/to/file/or/dir: + mask: + - open + - create + - close_write + recurse: True + auto_add: True + exclude: + - /path/to/file/or/dir/exclude1 + - /path/to/file/or/dir/exclude2 + - /path/to/file/or/dir/regex[a-m]*$: + regex: True + coalesce: True New behavior: - ``` - beacons: - inotify: - - files: - /path/to/file/or/dir: - mask: - - open - - create - - close_write - recurse: True - auto_add: True - exclude: - - /path/to/file/or/dir/exclude1 - - /path/to/file/or/dir/exclude2 - - /path/to/file/or/dir/regex[a-m]*$: - regex: True - - coalesce: True -``` + + .. code-block:: yaml + + beacons: + inotify: + - files: + /path/to/file/or/dir: + mask: + - open + - create + - close_write + recurse: True + auto_add: True + exclude: + - /path/to/file/or/dir/exclude1 + - /path/to/file/or/dir/exclude2 + - /path/to/file/or/dir/regex[a-m]*$: + regex: True + - coalesce: True - ``journald`` beacon Old behavior: - ``` - beacons: - journald: - sshd: - SYSLOG_IDENTIFIER: sshd - PRIORITY: 6 - ``` - New behavior: - ``` - beacons: - journald: - - services: + .. code-block:: yaml + + beacons: + journald: sshd: SYSLOG_IDENTIFIER: sshd PRIORITY: 6 - ``` + + New behavior: + + .. code-block:: yaml + + beacons: + journald: + - services: + sshd: + SYSLOG_IDENTIFIER: sshd + PRIORITY: 6 - ``load`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: load: 1m: @@ -341,51 +356,55 @@ check the configuration for the correct format and only load if the validation p - 1.0 emitatstartup: True onchangeonly: False - ``` New behavior: - ``` - beacons: - load: - - averages: - 1m: - - 0.0 - - 2.0 - 5m: - - 0.0 - - 1.5 - 15m: - - 0.1 - - 1.0 - - emitatstartup: True - - onchangeonly: False - ``` + + .. code-block:: yaml + + beacons: + load: + - averages: + 1m: + - 0.0 + - 2.0 + 5m: + - 0.0 + - 1.5 + 15m: + - 0.1 + - 1.0 + - emitatstartup: True + - onchangeonly: False - ``log`` beacon Old behavior: - ``` - beacons: - log: - file: - : - regex: - ``` - New behavior: - ``` - beacons: - log: - - file: - - tags: + .. code-block:: yaml + + beacons: + log: + file: : regex: - ``` + + New behavior: + + .. code-block:: yaml + + beacons: + log: + - file: + - tags: + : + regex: - ``network_info`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: network_info: - eth0: @@ -398,10 +417,11 @@ check the configuration for the correct format and only load if the validation p errout: 100 dropin: 100 dropout: 100 - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: network_info: - interfaces: @@ -415,12 +435,13 @@ check the configuration for the correct format and only load if the validation p errout: 100 dropin: 100 dropout: 100 - ``` - ``network_settings`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: network_settings: eth0: @@ -429,10 +450,11 @@ check the configuration for the correct format and only load if the validation p onvalue: 1 eth1: linkmode: - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: network_settings: - interfaces: @@ -442,12 +464,13 @@ check the configuration for the correct format and only load if the validation p onvalue: 1 - eth1: linkmode: - ``` - ``proxy_example`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: proxy_example: endpoint: beacon @@ -458,60 +481,66 @@ check the configuration for the correct format and only load if the validation p beacons: proxy_example: - endpoint: beacon - ``` - ``ps`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: ps: - salt-master: running - mysql: stopped - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: ps: - processes: salt-master: running mysql: stopped - ``` - ``salt_proxy`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: salt_proxy: - p8000: {} - p8001: {} - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: salt_proxy: - proxies: p8000: {} p8001: {} - ``` - ``sensehat`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: sensehat: humidity: 70% temperature: [20, 40] temperature_from_pressure: 40 pressure: 1500 - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: sensehat: - sensors: @@ -519,21 +548,22 @@ check the configuration for the correct format and only load if the validation p temperature: [20, 40] temperature_from_pressure: 40 pressure: 1500 - ``` - ``service`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: service: salt-master: mysql: - ``` - New behavior: - ``` + + .. code-block:: yaml + beacons: service: - services: @@ -541,93 +571,102 @@ check the configuration for the correct format and only load if the validation p onchangeonly: True delay: 30 uncleanshutdown: /run/nginx.pid - ``` - ``sh`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: sh: {} - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: sh: [] - ``` - ``status`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: status: {} - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: status: [] - ``` - ``telegram_bot_msg`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: telegram_bot_msg: token: "" accept_from: - "" interval: 10 - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: telegram_bot_msg: - token: "" - accept_from: - "" - interval: 10 - ``` - ``twilio_txt_msg`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: twilio_txt_msg: account_sid: "" auth_token: "" twilio_number: "+15555555555" interval: 10 - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: twilio_txt_msg: - account_sid: "" - auth_token: "" - twilio_number: "+15555555555" - interval: 10 - ``` - ``wtmp`` beacon Old behavior: - ``` + + .. code-block:: yaml + beacons: wtmp: {} - ``` New behavior: - ``` + + .. code-block:: yaml + beacons: wtmp: [] - ``` Deprecations ------------ From 25a440a2ea9d2aa03d83e3e873dba23db965c01e Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 28 Sep 2017 10:12:06 -0400 Subject: [PATCH 343/348] Fixup a couple of issues with the panos execution module - Spelling errors - Version added tag for Oxygen - Use correct import for salt.exceptions --- salt/modules/panos.py | 56 +++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/salt/modules/panos.py b/salt/modules/panos.py index aecf93fffed..006cef2f4bb 100644 --- a/salt/modules/panos.py +++ b/salt/modules/panos.py @@ -7,9 +7,11 @@ Module to provide Palo Alto compatibility to Salt. :depends: none :platform: unix +.. versionadded:: Oxygen Configuration ============= + This module accepts connection configuration details either as parameters, or as configuration settings in pillar as a Salt proxy. Options passed into opts will be ignored if options are passed into pillar. @@ -19,6 +21,7 @@ Options passed into opts will be ignored if options are passed into pillar. About ===== + This execution module was designed to handle connections to a Palo Alto based firewall. This module adds support to send connections directly to the device through the XML API or through a brokered connection to Panorama. @@ -31,8 +34,9 @@ import logging import time # Import Salt Libs -import salt.utils.platform +from salt.exceptions import CommandExecutionError import salt.proxy.panos +import salt.utils.platform log = logging.getLogger(__name__) @@ -55,11 +59,11 @@ def __virtual__(): def _get_job_results(query=None): ''' - Executes a query that requires a job for completion. This funciton will wait for the job to complete + Executes a query that requires a job for completion. This function will wait for the job to complete and return the results. ''' if not query: - raise salt.exception.CommandExecutionError("Query parameters cannot be empty.") + raise CommandExecutionError("Query parameters cannot be empty.") response = __proxy__['panos.call'](query) @@ -241,10 +245,10 @@ def download_software_file(filename=None, synch=False): ''' if not filename: - raise salt.exception.CommandExecutionError("Filename option must not be none.") + raise CommandExecutionError("Filename option must not be none.") if not isinstance(synch, bool): - raise salt.exception.CommandExecutionError("Synch option must be boolean..") + raise CommandExecutionError("Synch option must be boolean..") if synch is True: query = {'type': 'op', @@ -276,10 +280,10 @@ def download_software_version(version=None, synch=False): ''' if not version: - raise salt.exception.CommandExecutionError("Version option must not be none.") + raise CommandExecutionError("Version option must not be none.") if not isinstance(synch, bool): - raise salt.exception.CommandExecutionError("Synch option must be boolean..") + raise CommandExecutionError("Synch option must be boolean..") if synch is True: query = {'type': 'op', @@ -644,7 +648,7 @@ def get_job(jid=None): ''' if not jid: - raise salt.exception.CommandExecutionError("ID option must not be none.") + raise CommandExecutionError("ID option must not be none.") query = {'type': 'op', 'cmd': '{0}'.format(jid)} @@ -675,7 +679,7 @@ def get_jobs(state='all'): elif state.lower() == 'processed': query = {'type': 'op', 'cmd': ''} else: - raise salt.exception.CommandExecutionError("The state parameter must be all, pending, or processed.") + raise CommandExecutionError("The state parameter must be all, pending, or processed.") return __proxy__['panos.call'](query) @@ -1163,7 +1167,7 @@ def install_antivirus(version=None, latest=False, synch=False, skip_commit=False ''' if not version and latest is False: - raise salt.exception.CommandExecutionError("Version option must not be none.") + raise CommandExecutionError("Version option must not be none.") if synch is True: s = "yes" @@ -1220,7 +1224,7 @@ def install_software(version=None): ''' if not version: - raise salt.exception.CommandExecutionError("Version option must not be none.") + raise CommandExecutionError("Version option must not be none.") query = {'type': 'op', 'cmd': '' @@ -1261,7 +1265,7 @@ def refresh_fqdn_cache(force=False): ''' if not isinstance(force, bool): - raise salt.exception.CommandExecutionError("Force option must be boolean.") + raise CommandExecutionError("Force option must be boolean.") if force: query = {'type': 'op', @@ -1312,7 +1316,7 @@ def resolve_address(address=None, vsys=None): return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version) if not address: - raise salt.exception.CommandExecutionError("FQDN to resolve must be provided as address.") + raise CommandExecutionError("FQDN to resolve must be provided as address.") if not vsys: query = {'type': 'op', @@ -1340,7 +1344,7 @@ def save_device_config(filename=None): ''' if not filename: - raise salt.exception.CommandExecutionError("Filename must not be empty.") + raise CommandExecutionError("Filename must not be empty.") query = {'type': 'op', 'cmd': '{0}'.format(filename)} @@ -1382,7 +1386,7 @@ def set_authentication_profile(profile=None, deploy=False): ''' if not profile: - salt.exception.CommandExecutionError("Profile name option must not be none.") + CommandExecutionError("Profile name option must not be none.") ret = {} @@ -1419,7 +1423,7 @@ def set_hostname(hostname=None, deploy=False): ''' if not hostname: - salt.exception.CommandExecutionError("Hostname option must not be none.") + CommandExecutionError("Hostname option must not be none.") ret = {} @@ -1459,7 +1463,7 @@ def set_management_icmp(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1499,7 +1503,7 @@ def set_management_http(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1539,7 +1543,7 @@ def set_management_https(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1579,7 +1583,7 @@ def set_management_ocsp(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1619,7 +1623,7 @@ def set_management_snmp(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1659,7 +1663,7 @@ def set_management_ssh(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1699,7 +1703,7 @@ def set_management_telnet(enabled=True, deploy=False): elif enabled is False: value = "yes" else: - salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + CommandExecutionError("Invalid option provided for service enabled option.") ret = {} @@ -1892,7 +1896,7 @@ def set_permitted_ip(address=None, deploy=False): ''' if not address: - salt.exception.CommandExecutionError("Address option must not be empty.") + CommandExecutionError("Address option must not be empty.") ret = {} @@ -1928,7 +1932,7 @@ def set_timezone(tz=None, deploy=False): ''' if not tz: - salt.exception.CommandExecutionError("Timezone name option must not be none.") + CommandExecutionError("Timezone name option must not be none.") ret = {} @@ -1976,7 +1980,7 @@ def unlock_admin(username=None): ''' if not username: - raise salt.exception.CommandExecutionError("Username option must not be none.") + raise CommandExecutionError("Username option must not be none.") query = {'type': 'op', 'cmd': '{0}' From cdafbe2068c3bdcfa0853badeabd7311c3526d01 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Thu, 28 Sep 2017 07:26:30 -0700 Subject: [PATCH 344/348] Update Oxygen Release notes with new grains and module Also do a final cosmetic fix to the purefa module documentation --- doc/topics/releases/oxygen.rst | 15 +++++++++++++++ salt/modules/purefa.py | 1 + 2 files changed, 16 insertions(+) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 2469018a71f..d08c6c8f3d1 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -46,6 +46,21 @@ noon PST so the Stormpath external authentication module has been removed. https://stormpath.com/oktaplusstormpath +New Grains +---------- + +New core grains have been added to expose any storage inititator setting. + +The new grains added are: + +* ``fc_wwn``: Show all fibre channel world wide port names for a host +* ``iscsi_iqn``: Show the iSCSI IQN name for a host + +New Modules +----------- + +- :mod:`salt.modules.purefa ` + New NaCl Renderer ----------------- diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index 8bcf06fbe81..ce42818fb8b 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -31,6 +31,7 @@ Installation Prerequisites three methods. 1) From the minion config + .. code-block:: yaml pure_tags: From 8c5b021519e6ce194ae508d7a13ab8e7940a6ae3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 28 Sep 2017 11:16:14 -0400 Subject: [PATCH 345/348] Added * as an include all wildcard in extra_minion_data_in_pillar external pillar (+ test) --- salt/pillar/extra_minion_data_in_pillar.py | 9 +++++++-- tests/unit/pillar/test_extra_minion_data_in_pillar.py | 7 ++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/salt/pillar/extra_minion_data_in_pillar.py b/salt/pillar/extra_minion_data_in_pillar.py index 2dad741c66d..ee8961a8876 100644 --- a/salt/pillar/extra_minion_data_in_pillar.py +++ b/salt/pillar/extra_minion_data_in_pillar.py @@ -15,13 +15,18 @@ Complete example in etc/salt/master ext_pillar: - extra_minion_data_in_pillar: - include: + include: * ext_pillar: - extra_minion_data_in_pillar: include: - key1 - key2:subkey2 + + ext_pillar: + - extra_minion_data_in_pillar: + include: + ''' @@ -78,7 +83,7 @@ def ext_pillar(minion_id, pillar, include, extra_minion_data=None): if not extra_minion_data: return {} - if include == '': + if include in ['*', '']: return extra_minion_data data = {} for key in include: diff --git a/tests/unit/pillar/test_extra_minion_data_in_pillar.py b/tests/unit/pillar/test_extra_minion_data_in_pillar.py index df4484e5e6c..36e0f9e286f 100644 --- a/tests/unit/pillar/test_extra_minion_data_in_pillar.py +++ b/tests/unit/pillar/test_extra_minion_data_in_pillar.py @@ -40,9 +40,10 @@ class ExtraMinionDataInPillarTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, {}) def test_include_all(self): - ret = extra_minion_data_in_pillar.ext_pillar( - 'fake_id', self.pillar, '', self.extra_minion_data) - self.assertEqual(ret, self.extra_minion_data) + for include_all in ['*', '']: + ret = extra_minion_data_in_pillar.ext_pillar( + 'fake_id', self.pillar, include_all, self.extra_minion_data) + self.assertEqual(ret, self.extra_minion_data) def test_include_specific_keys(self): # Tests partially existing key, key with and without subkey, From 503cb9c93afb31be60a35c519773eb2cfcf073f0 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 28 Sep 2017 11:31:11 -0400 Subject: [PATCH 346/348] pylint --- salt/pillar/extra_minion_data_in_pillar.py | 5 ++--- tests/unit/pillar/test_extra_minion_data_in_pillar.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/salt/pillar/extra_minion_data_in_pillar.py b/salt/pillar/extra_minion_data_in_pillar.py index ee8961a8876..13c7e812a34 100644 --- a/salt/pillar/extra_minion_data_in_pillar.py +++ b/salt/pillar/extra_minion_data_in_pillar.py @@ -31,16 +31,15 @@ Complete example in etc/salt/master from __future__ import absolute_import -import os import logging # Set up logging log = logging.getLogger(__name__) - __virtualname__ = 'extra_minion_data_in_pillar' + def __virtual__(): return __virtualname__ @@ -65,7 +64,7 @@ def ext_pillar(minion_id, pillar, include, extra_minion_data=None): # The result will be built in aux_dict aux_dict[subkey] = {} aux_dict = aux_dict[subkey] - if not subkey in subtree: + if subkey not in subtree: # The subkey is not in return {} subtree = subtree[subkey] diff --git a/tests/unit/pillar/test_extra_minion_data_in_pillar.py b/tests/unit/pillar/test_extra_minion_data_in_pillar.py index 36e0f9e286f..ed6eeaf65ca 100644 --- a/tests/unit/pillar/test_extra_minion_data_in_pillar.py +++ b/tests/unit/pillar/test_extra_minion_data_in_pillar.py @@ -19,7 +19,7 @@ class ExtraMinionDataInPillarTestCase(TestCase, LoaderModuleMockMixin): ''' def setup_loader_modules(self): return { - extra_minion_data_in_pillar : { + extra_minion_data_in_pillar: { '__virtual__': True, } } From 676c18481fe30a20104814271d0c7568ec68a316 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Tue, 26 Sep 2017 13:05:02 -0700 Subject: [PATCH 347/348] Fix trailing newline on grains The grains iscsi_iqn and fc_wwn have training newlines which cause show when calling the grain from a state file and trying to pass this to an execution module. Remove the training newline with rstrip() --- salt/grains/core.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index c613c27d64e..9adf2fd776a 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2517,7 +2517,8 @@ def _linux_iqn(): for line in _iscsi: if line.find('InitiatorName') != -1: iqn = line.split('=') - ret.extend([iqn[1]]) + final_iqn = iqn[1].rstrip() + ret.extend([final_iqn]) return ret @@ -2532,7 +2533,8 @@ def _aix_iqn(): aixret = __salt__['cmd.run'](aixcmd) if aixret[0].isalpha(): iqn = aixret.split() - ret.extend([iqn[1]]) + final_iqn = iqn[1].rstrip() + ret.extend([final_iqn]) return ret @@ -2545,6 +2547,7 @@ def _linux_wwns(): for fcfile in glob.glob('/sys/class/fc_host/*/port_name'): with salt.utils.files.fopen(fcfile, 'r') as _wwn: for line in _wwn: + line = line.rstrip() ret.extend([line[2:]]) return ret @@ -2571,6 +2574,7 @@ def _windows_iqn(): for line in cmdret['stdout'].splitlines(): if line[0].isalpha(): continue + line = line.rstrip() ret.extend([line]) return ret @@ -2587,6 +2591,7 @@ def _windows_wwns(): cmdret = __salt__['cmd.run_ps'](ps_cmd) for line in cmdret: + line = line.rstrip() ret.append(line) return ret From 8f58f7b8ab6b1a38c29b12a65ed2096c718d506a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B3=E8=89=B3=E8=8A=AC?= Date: Sun, 24 Sep 2017 11:49:39 +0800 Subject: [PATCH 348/348] bugfix: Catch OSError/IOError when check minion cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 申艳芬 --- salt/key.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/key.py b/salt/key.py index 52b2d268e1b..40881e479a2 100644 --- a/salt/key.py +++ b/salt/key.py @@ -501,7 +501,13 @@ class Key(object): if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: - shutil.rmtree(os.path.join(m_cache, minion)) + try: + shutil.rmtree(os.path.join(m_cache, minion)) + except (OSError, IOError) as ex: + log.warning('Key: Delete cache for %s got OSError/IOError: %s \n', + minion, + ex) + continue cache = salt.cache.factory(self.opts) clist = cache.list(self.ACC) if clist: @@ -979,7 +985,13 @@ class RaetKey(Key): if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: - shutil.rmtree(os.path.join(m_cache, minion)) + try: + shutil.rmtree(os.path.join(m_cache, minion)) + except (OSError, IOError) as ex: + log.warning('RaetKey: Delete cache for %s got OSError/IOError: %s \n', + minion, + ex) + continue cache = salt.cache.factory(self.opts) clist = cache.list(self.ACC) if clist: