mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge branch 'develop' into infra5292
This commit is contained in:
commit
0581e684a3
41 changed files with 5415 additions and 224 deletions
|
@ -689,6 +689,12 @@
|
|||
# for a full explanation.
|
||||
#multiprocessing: True
|
||||
|
||||
# Limit the maximum amount of processes or threads created by salt-minion.
|
||||
# This is useful to avoid resource exhaustion in case the minion receives more
|
||||
# publications than it is able to handle, as it limits the number of spawned
|
||||
# processes or threads. -1 is the default and disables the limit.
|
||||
#process_count_max: -1
|
||||
|
||||
|
||||
##### Logging settings #####
|
||||
##########################################
|
||||
|
|
|
@ -2419,6 +2419,23 @@ executed in a thread.
|
|||
|
||||
multiprocessing: True
|
||||
|
||||
.. conf_minion:: process_count_max
|
||||
|
||||
``process_count_max``
|
||||
-------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Default: ``-1``
|
||||
|
||||
Limit the maximum amount of processes or threads created by ``salt-minion``.
|
||||
This is useful to avoid resource exhaustion in case the minion receives more
|
||||
publications than it is able to handle, as it limits the number of spawned
|
||||
processes or threads. ``-1`` is the default and disables the limit.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
process_count_max: -1
|
||||
|
||||
.. _minion-logging-settings:
|
||||
|
||||
|
|
|
@ -25,6 +25,9 @@ configuration:
|
|||
- web*:
|
||||
- test.*
|
||||
- pkg.*
|
||||
# Allow managers to use saltutil module functions
|
||||
manager_.*:
|
||||
- saltutil.*
|
||||
|
||||
Permission Issues
|
||||
-----------------
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.auth module
|
||||
========================
|
||||
salt.runners.auth
|
||||
=================
|
||||
|
||||
.. automodule:: salt.runners.auth
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.event module
|
||||
=========================
|
||||
salt.runners.event
|
||||
==================
|
||||
|
||||
.. automodule:: salt.runners.event
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.smartos_vmadm module
|
||||
=================================
|
||||
salt.runners.smartos_vmadm
|
||||
==========================
|
||||
|
||||
.. automodule:: salt.runners.smartos_vmadm
|
||||
:members:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
salt.runners.vistara module
|
||||
===========================
|
||||
salt.runners.vistara
|
||||
====================
|
||||
|
||||
.. automodule:: salt.runners.vistara
|
||||
:members:
|
||||
|
|
1719
doc/topics/releases/2016.11.8.rst
Normal file
1719
doc/topics/releases/2016.11.8.rst
Normal file
File diff suppressed because it is too large
Load diff
|
@ -369,46 +369,13 @@ class LoadAuth(object):
|
|||
eauth_config = self.opts['external_auth'][eauth]
|
||||
if not groups:
|
||||
groups = []
|
||||
group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups
|
||||
|
||||
# First we need to know if the user is allowed to proceed via any of their group memberships.
|
||||
group_auth_match = False
|
||||
for group_config in group_perm_keys:
|
||||
if group_config.rstrip('%') in groups:
|
||||
group_auth_match = True
|
||||
break
|
||||
# If a group_auth_match is set it means only that we have a
|
||||
# user which matches at least one or more of the groups defined
|
||||
# in the configuration file.
|
||||
|
||||
external_auth_in_db = False
|
||||
for entry in eauth_config:
|
||||
if entry.startswith('^'):
|
||||
external_auth_in_db = True
|
||||
break
|
||||
|
||||
# If neither a catchall, a named membership or a group
|
||||
# membership is found, there is no need to continue. Simply
|
||||
# deny the user access.
|
||||
if not ((name in eauth_config) |
|
||||
('*' in eauth_config) |
|
||||
group_auth_match | external_auth_in_db):
|
||||
# Auth successful, but no matching user found in config
|
||||
log.warning('Authorization failure occurred.')
|
||||
return None
|
||||
|
||||
# We now have an authenticated session and it is time to determine
|
||||
# what the user has access to.
|
||||
auth_list = []
|
||||
if name in eauth_config:
|
||||
auth_list = eauth_config[name]
|
||||
elif '*' in eauth_config:
|
||||
auth_list = eauth_config['*']
|
||||
if group_auth_match:
|
||||
auth_list = self.ckminions.fill_auth_list_from_groups(
|
||||
eauth_config,
|
||||
groups,
|
||||
auth_list)
|
||||
auth_list = self.ckminions.fill_auth_list(
|
||||
eauth_config,
|
||||
name,
|
||||
groups)
|
||||
|
||||
auth_list = self.__process_acl(load, auth_list)
|
||||
|
||||
|
|
21
salt/cache/redis_cache.py
vendored
21
salt/cache/redis_cache.py
vendored
|
@ -481,18 +481,17 @@ def list_(bank):
|
|||
Lists entries stored in the specified bank.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
banks = redis_server.smembers(bank_redis_key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot list the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
if not banks:
|
||||
return []
|
||||
return list(bank_keys)
|
||||
return list(banks)
|
||||
|
||||
|
||||
def contains(bank, key):
|
||||
|
@ -500,15 +499,11 @@ def contains(bank, key):
|
|||
Checks if the specified bank contains the specified key.
|
||||
'''
|
||||
redis_server = _get_redis_server()
|
||||
bank_keys_redis_key = _get_bank_keys_redis_key(bank)
|
||||
bank_keys = None
|
||||
bank_redis_key = _get_bank_redis_key(bank)
|
||||
try:
|
||||
bank_keys = redis_server.smembers(bank_keys_redis_key)
|
||||
return redis_server.sismember(bank_redis_key, key)
|
||||
except (RedisConnectionError, RedisResponseError) as rerr:
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_keys_redis_key,
|
||||
mesg = 'Cannot retrieve the Redis cache key {rkey}: {rerr}'.format(rkey=bank_redis_key,
|
||||
rerr=rerr)
|
||||
log.error(mesg)
|
||||
raise SaltCacheError(mesg)
|
||||
if not bank_keys:
|
||||
return False
|
||||
return key in bank_keys
|
||||
|
|
|
@ -3543,16 +3543,15 @@ def list_nodes_min(location=None, call=None):
|
|||
|
||||
for instance in instances:
|
||||
if isinstance(instance['instancesSet']['item'], list):
|
||||
for item in instance['instancesSet']['item']:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
items = instance['instancesSet']['item']
|
||||
else:
|
||||
item = instance['instancesSet']['item']
|
||||
items = [instance['instancesSet']['item']]
|
||||
|
||||
for item in items:
|
||||
state = item['instanceState']['name']
|
||||
name = _extract_name_tag(item)
|
||||
id = item['instanceId']
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
ret[name] = {'state': state, 'id': id}
|
||||
return ret
|
||||
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ __virtualname__ = 'libvirt'
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def libvirt_error_handler(ctx, error):
|
||||
def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
|
||||
'''
|
||||
Redirect stderr prints from libvirt to salt logging.
|
||||
'''
|
||||
|
|
|
@ -337,6 +337,9 @@ VALID_OPTS = {
|
|||
# Whether or not processes should be forked when needed. The alternative is to use threading.
|
||||
'multiprocessing': bool,
|
||||
|
||||
# Maximum number of concurrently active processes at any given point in time
|
||||
'process_count_max': int,
|
||||
|
||||
# Whether or not the salt minion should run scheduled mine updates
|
||||
'mine_enabled': bool,
|
||||
|
||||
|
@ -746,6 +749,10 @@ VALID_OPTS = {
|
|||
'fileserver_limit_traversal': bool,
|
||||
'fileserver_verify_config': bool,
|
||||
|
||||
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
|
||||
# applied only if the user didn't matched by other matchers.
|
||||
'permissive_acl': bool,
|
||||
|
||||
# Optionally enables keeping the calculated user's auth list in the token file.
|
||||
'keep_acl_in_token': bool,
|
||||
|
||||
|
@ -1258,6 +1265,7 @@ DEFAULT_MINION_OPTS = {
|
|||
'auto_accept': True,
|
||||
'autosign_timeout': 120,
|
||||
'multiprocessing': True,
|
||||
'process_count_max': -1,
|
||||
'mine_enabled': True,
|
||||
'mine_return_job': False,
|
||||
'mine_interval': 60,
|
||||
|
@ -1526,6 +1534,7 @@ DEFAULT_MASTER_OPTS = {
|
|||
'external_auth': {},
|
||||
'token_expire': 43200,
|
||||
'token_expire_user_override': False,
|
||||
'permissive_acl': False,
|
||||
'keep_acl_in_token': False,
|
||||
'eauth_acl_module': '',
|
||||
'eauth_tokens': 'localfs',
|
||||
|
|
|
@ -170,6 +170,14 @@ def clean_old_jobs(opts):
|
|||
|
||||
|
||||
def mk_key(opts, user):
|
||||
if HAS_PWD:
|
||||
uid = None
|
||||
try:
|
||||
uid = pwd.getpwnam(user).pw_uid
|
||||
except KeyError:
|
||||
# User doesn't exist in the system
|
||||
if opts['client_acl_verify']:
|
||||
return None
|
||||
if salt.utils.platform.is_windows():
|
||||
# The username may contain '\' if it is in Windows
|
||||
# 'DOMAIN\username' format. Fix this for the keyfile path.
|
||||
|
@ -197,9 +205,9 @@ def mk_key(opts, user):
|
|||
# Write access is necessary since on subsequent runs, if the file
|
||||
# exists, it needs to be written to again. Windows enforces this.
|
||||
os.chmod(keyfile, 0o600)
|
||||
if HAS_PWD:
|
||||
if HAS_PWD and uid is not None:
|
||||
try:
|
||||
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
|
||||
os.chown(keyfile, uid, -1)
|
||||
except OSError:
|
||||
# The master is not being run as root and can therefore not
|
||||
# chown the key file
|
||||
|
@ -214,27 +222,26 @@ def access_keys(opts):
|
|||
'''
|
||||
# TODO: Need a way to get all available users for systems not supported by pwd module.
|
||||
# For now users pattern matching will not work for publisher_acl.
|
||||
users = []
|
||||
keys = {}
|
||||
publisher_acl = opts['publisher_acl']
|
||||
acl_users = set(publisher_acl.keys())
|
||||
if opts.get('user'):
|
||||
acl_users.add(opts['user'])
|
||||
acl_users.add(salt.utils.get_user())
|
||||
for user in acl_users:
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
key = mk_key(opts, user)
|
||||
if key is not None:
|
||||
keys[user] = key
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
if opts['client_acl_verify'] and HAS_PWD:
|
||||
log.profile('Beginning pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in pwd.getpwall():
|
||||
users.append(user.pw_name)
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
for user in acl_users:
|
||||
log.info('Preparing the %s key for local communication', user)
|
||||
keys[user] = mk_key(opts, user)
|
||||
|
||||
# Check other users matching ACL patterns
|
||||
if HAS_PWD:
|
||||
for user in users:
|
||||
user = user.pw_name
|
||||
if user not in keys and salt.utils.check_whitelist_blacklist(user, whitelist=acl_users):
|
||||
keys[user] = mk_key(opts, user)
|
||||
log.profile('End pwd.getpwall() call in masterarpi access_keys function')
|
||||
|
||||
return keys
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ import os
|
|||
import json
|
||||
import socket
|
||||
import sys
|
||||
import glob
|
||||
import re
|
||||
import platform
|
||||
import logging
|
||||
|
@ -65,6 +66,7 @@ __salt__ = {
|
|||
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
|
||||
'smbios.records': salt.modules.smbios.records,
|
||||
'smbios.get': salt.modules.smbios.get,
|
||||
'cmd.run_ps': salt.modules.cmdmod.powershell,
|
||||
}
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -2472,3 +2474,119 @@ def default_gateway():
|
|||
except Exception as exc:
|
||||
pass
|
||||
return grains
|
||||
|
||||
|
||||
def fc_wwn():
|
||||
'''
|
||||
Return list of fiber channel HBA WWNs
|
||||
'''
|
||||
grains = {}
|
||||
grains['fc_wwn'] = False
|
||||
if salt.utils.platform.is_linux():
|
||||
grains['fc_wwn'] = _linux_wwns()
|
||||
elif salt.utils.platform.is_windows():
|
||||
grains['fc_wwn'] = _windows_wwns()
|
||||
return grains
|
||||
|
||||
|
||||
def iscsi_iqn():
|
||||
'''
|
||||
Return iSCSI IQN
|
||||
'''
|
||||
grains = {}
|
||||
grains['iscsi_iqn'] = False
|
||||
if salt.utils.platform.is_linux():
|
||||
grains['iscsi_iqn'] = _linux_iqn()
|
||||
elif salt.utils.platform.is_windows():
|
||||
grains['iscsi_iqn'] = _windows_iqn()
|
||||
elif salt.utils.platform.is_aix():
|
||||
grains['iscsi_iqn'] = _aix_iqn()
|
||||
return grains
|
||||
|
||||
|
||||
def _linux_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from a Linux host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
initiator = '/etc/iscsi/initiatorname.iscsi'
|
||||
|
||||
if os.path.isfile(initiator):
|
||||
with salt.utils.files.fopen(initiator, 'r') as _iscsi:
|
||||
for line in _iscsi:
|
||||
if line.find('InitiatorName') != -1:
|
||||
iqn = line.split('=')
|
||||
ret.extend([iqn[1]])
|
||||
return ret
|
||||
|
||||
|
||||
def _aix_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from an AIX host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
aixcmd = 'lsattr -E -l iscsi0 | grep initiator_name'
|
||||
|
||||
aixret = __salt__['cmd.run'](aixcmd)
|
||||
if aixret[0].isalpha():
|
||||
iqn = aixret.split()
|
||||
ret.extend([iqn[1]])
|
||||
return ret
|
||||
|
||||
|
||||
def _linux_wwns():
|
||||
'''
|
||||
Return Fibre Channel port WWNs from a Linux host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
|
||||
with salt.utils.files.fopen(fcfile, 'r') as _wwn:
|
||||
for line in _wwn:
|
||||
ret.extend([line[2:]])
|
||||
return ret
|
||||
|
||||
|
||||
def _windows_iqn():
|
||||
'''
|
||||
Return iSCSI IQN from a Windows host.
|
||||
'''
|
||||
ret = []
|
||||
|
||||
wmic = salt.utils.path.which('wmic')
|
||||
|
||||
if not wmic:
|
||||
return ret
|
||||
|
||||
namespace = r'\\root\WMI'
|
||||
mspath = 'MSiSCSIInitiator_MethodClass'
|
||||
get = 'iSCSINodeName'
|
||||
|
||||
cmdret = __salt__['cmd.run_all'](
|
||||
'{0} /namespace:{1} path {2} get {3} /format:table'.format(
|
||||
wmic, namespace, mspath, get))
|
||||
|
||||
for line in cmdret['stdout'].splitlines():
|
||||
if line[0].isalpha():
|
||||
continue
|
||||
ret.extend([line])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _windows_wwns():
|
||||
'''
|
||||
Return Fibre Channel port WWNs from a Windows host.
|
||||
'''
|
||||
ps_cmd = r'Get-WmiObject -class MSFC_FibrePortHBAAttributes -namespace "root\WMI" | Select -Expandproperty Attributes | %{($_.PortWWN | % {"{0:x2}" -f $_}) -join ""}'
|
||||
|
||||
ret = []
|
||||
|
||||
cmdret = __salt__['cmd.run_ps'](ps_cmd)
|
||||
|
||||
for line in cmdret:
|
||||
ret.append(line)
|
||||
|
||||
return ret
|
||||
|
|
|
@ -1333,6 +1333,7 @@ class Minion(MinionBase):
|
|||
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
|
||||
return True
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _handle_decoded_payload(self, data):
|
||||
'''
|
||||
Override this method if you wish to handle the decoded data
|
||||
|
@ -1365,6 +1366,15 @@ class Minion(MinionBase):
|
|||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
process_count_max = self.opts.get('process_count_max')
|
||||
if process_count_max > 0:
|
||||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
while process_count >= process_count_max:
|
||||
log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
|
||||
yield tornado.gen.sleep(10)
|
||||
process_count = len(salt.utils.minion.running(self.opts))
|
||||
|
||||
# We stash an instance references to allow for the socket
|
||||
# communication in Windows. You can't pickle functions, and thus
|
||||
# python needs to be able to reconstruct the reference on the other
|
||||
|
|
|
@ -599,9 +599,14 @@ def exists(vpc_id=None, name=None, cidr=None, tags=None, region=None, key=None,
|
|||
try:
|
||||
vpc_ids = _find_vpcs(vpc_id=vpc_id, vpc_name=name, cidr=cidr, tags=tags,
|
||||
region=region, key=key, keyid=keyid, profile=profile)
|
||||
return {'exists': bool(vpc_ids)}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
return {'error': boto_err}
|
||||
|
||||
return {'exists': bool(vpc_ids)}
|
||||
|
||||
|
||||
def create(cidr_block, instance_tenancy=None, vpc_name=None,
|
||||
|
@ -723,27 +728,34 @@ def describe(vpc_id=None, vpc_name=None, region=None, key=None,
|
|||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)
|
||||
if not vpc_id:
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidVpcID.NotFound':
|
||||
# VPC was not found: handle the error and return None.
|
||||
return {'vpc': None}
|
||||
return {'error': boto_err}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
if not vpc_id:
|
||||
return {'vpc': None}
|
||||
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
|
||||
try:
|
||||
vpcs = conn.get_all_vpcs(**filter_parameters)
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
if vpcs:
|
||||
vpc = vpcs[0] # Found!
|
||||
log.debug('Found VPC: {0}'.format(vpc.id))
|
||||
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
keys = ('id', 'cidr_block', 'is_default', 'state', 'tags',
|
||||
'dhcp_options_id', 'instance_tenancy')
|
||||
_r = dict([(k, getattr(vpc, k)) for k in keys])
|
||||
_r.update({'region': getattr(vpc, 'region').name})
|
||||
return {'vpc': _r}
|
||||
else:
|
||||
return {'vpc': None}
|
||||
|
||||
|
||||
def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
|
||||
|
@ -809,7 +821,7 @@ def _find_subnets(subnet_name=None, vpc_id=None, cidr=None, tags=None, conn=None
|
|||
Given subnet properties, find and return matching subnet ids
|
||||
'''
|
||||
|
||||
if not any(subnet_name, tags, cidr):
|
||||
if not any([subnet_name, tags, cidr]):
|
||||
raise SaltInvocationError('At least one of the following must be '
|
||||
'specified: subnet_name, cidr or tags.')
|
||||
|
||||
|
@ -927,34 +939,38 @@ def subnet_exists(subnet_id=None, name=None, subnet_name=None, cidr=None,
|
|||
|
||||
try:
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
filter_parameters = {'filters': {}}
|
||||
except BotoServerError as err:
|
||||
return {'error': salt.utils.boto.get_error(err)}
|
||||
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
filter_parameters = {'filters': {}}
|
||||
if subnet_id:
|
||||
filter_parameters['subnet_ids'] = [subnet_id]
|
||||
if subnet_name:
|
||||
filter_parameters['filters']['tag:Name'] = subnet_name
|
||||
if cidr:
|
||||
filter_parameters['filters']['cidr'] = cidr
|
||||
if tags:
|
||||
for tag_name, tag_value in six.iteritems(tags):
|
||||
filter_parameters['filters']['tag:{0}'.format(tag_name)] = tag_value
|
||||
if zones:
|
||||
filter_parameters['filters']['availability_zone'] = zones
|
||||
|
||||
try:
|
||||
subnets = conn.get_all_subnets(**filter_parameters)
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
except BotoServerError as err:
|
||||
boto_err = salt.utils.boto.get_error(err)
|
||||
if boto_err.get('aws', {}).get('code') == 'InvalidSubnetID.NotFound':
|
||||
# Subnet was not found: handle the error and return False.
|
||||
return {'exists': False}
|
||||
except BotoServerError as e:
|
||||
return {'error': salt.utils.boto.get_error(e)}
|
||||
return {'error': boto_err}
|
||||
|
||||
log.debug('The filters criteria {0} matched the following subnets:{1}'.format(filter_parameters, subnets))
|
||||
if subnets:
|
||||
log.info('Subnet {0} exists.'.format(subnet_name or subnet_id))
|
||||
return {'exists': True}
|
||||
else:
|
||||
log.info('Subnet {0} does not exist.'.format(subnet_name or subnet_id))
|
||||
return {'exists': False}
|
||||
|
||||
|
||||
def get_subnet_association(subnets, region=None, key=None, keyid=None,
|
||||
|
|
|
@ -26,7 +26,7 @@ _XCCDF_MAP = {
|
|||
'cmd_pattern': (
|
||||
"oscap xccdf eval "
|
||||
"--oval-results --results results.xml --report report.html "
|
||||
"--profile {0} {1} {2}"
|
||||
"--profile {0} {1}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -73,7 +73,6 @@ def xccdf(params):
|
|||
'''
|
||||
params = shlex.split(params)
|
||||
policy = params[-1]
|
||||
del params[-1]
|
||||
|
||||
success = True
|
||||
error = None
|
||||
|
@ -90,7 +89,7 @@ def xccdf(params):
|
|||
error = str(err)
|
||||
|
||||
if success:
|
||||
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, " ".join(argv), policy)
|
||||
cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy)
|
||||
tempdir = tempfile.mkdtemp()
|
||||
proc = Popen(
|
||||
shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir)
|
||||
|
|
|
@ -27,6 +27,20 @@ Installation Prerequisites
|
|||
|
||||
pip install purestorage
|
||||
|
||||
- Configure Pure Storage FlashArray authentication. Use one of the following
|
||||
three methods.
|
||||
|
||||
1) From the minion config
|
||||
.. code-block:: yaml
|
||||
|
||||
pure_tags:
|
||||
fa:
|
||||
san_ip: management vip or hostname for the FlashArray
|
||||
api_token: A valid api token for the FlashArray being managed
|
||||
|
||||
2) From environment (PUREFA_IP and PUREFA_API)
|
||||
3) From the pillar (PUREFA_IP and PUREFA_API)
|
||||
|
||||
:maintainer: Simon Dodsley (simon@purestorage.com)
|
||||
:maturity: new
|
||||
:requires: purestorage
|
||||
|
@ -195,7 +209,7 @@ def snap_create(name, suffix=None):
|
|||
|
||||
Will return False is volume selected to snap does not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume to snapshot
|
||||
|
@ -231,7 +245,7 @@ def snap_delete(name, suffix=None, eradicate=False):
|
|||
|
||||
Will return False if selected snapshot does not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -273,7 +287,7 @@ def snap_eradicate(name, suffix=None):
|
|||
|
||||
Will retunr False is snapshot is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -306,7 +320,7 @@ def volume_create(name, size=None):
|
|||
|
||||
Will return False if volume already exists.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume (truncated to 63 characters)
|
||||
|
@ -344,7 +358,7 @@ def volume_delete(name, eradicate=False):
|
|||
|
||||
Will return False if volume doesn't exist is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -383,7 +397,7 @@ def volume_eradicate(name):
|
|||
|
||||
Will return False is volume is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -413,7 +427,7 @@ def volume_extend(name, size):
|
|||
|
||||
Will return False if new size is less than or equal to existing size.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -451,7 +465,7 @@ def snap_volume_create(name, target, overwrite=False):
|
|||
Will return False if target volume already exists and
|
||||
overwrite is not specified, or selected snapshot doesn't exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume snapshot
|
||||
|
@ -497,7 +511,7 @@ def volume_clone(name, target, overwrite=False):
|
|||
Will return False if source volume doesn't exist, or
|
||||
target volume already exists and overwrite not specified.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -541,7 +555,7 @@ def volume_attach(name, host):
|
|||
|
||||
Host and volume must exist or else will return False.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -574,7 +588,7 @@ def volume_detach(name, host):
|
|||
Will return False if either host or volume do not exist, or
|
||||
if selected volume isn't already connected to the host.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of volume
|
||||
|
@ -608,7 +622,7 @@ def host_create(name, iqn=None, wwn=None):
|
|||
Fibre Channel parameters are not in a valid format.
|
||||
See Pure Storage FlashArray documentation.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host (truncated to 63 characters)
|
||||
|
@ -659,7 +673,7 @@ def host_update(name, iqn=None, wwn=None):
|
|||
by another host, or are not in a valid format.
|
||||
See Pure Storage FlashArray documentation.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host
|
||||
|
@ -699,7 +713,7 @@ def host_delete(name):
|
|||
|
||||
Will return False if the host doesn't exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of host
|
||||
|
@ -735,7 +749,7 @@ def hg_create(name, host=None, volume=None):
|
|||
Will return False if hostgroup already exists, or if
|
||||
named host or volume do not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup (truncated to 63 characters)
|
||||
|
@ -791,7 +805,7 @@ def hg_update(name, host=None, volume=None):
|
|||
Will return False is hostgroup doesn't exist, or host
|
||||
or volume do not exist.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
|
@ -837,7 +851,7 @@ def hg_delete(name):
|
|||
|
||||
Will return False is hostgroup is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
|
@ -875,7 +889,7 @@ def hg_remove(name, volume=None, host=None):
|
|||
Will return False is hostgroup does not exist, or named host or volume are
|
||||
not in the hostgroup.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
|
@ -936,7 +950,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True):
|
|||
hostgroups, hosts or volumes
|
||||
* Named type for protection group does not exist
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
|
@ -1029,7 +1043,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None):
|
|||
* Incorrect type selected for current protection group type
|
||||
* Specified type does not exist
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
|
@ -1119,7 +1133,7 @@ def pg_delete(name, eradicate=False):
|
|||
|
||||
Will return False if protection group is already in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
|
@ -1156,7 +1170,7 @@ def pg_eradicate(name):
|
|||
|
||||
Will return False if protection group is not in a deleted state.
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of protection group
|
||||
|
@ -1188,7 +1202,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None):
|
|||
* Protection group does not exist
|
||||
* Specified type is not currently associated with the protection group
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
name : string
|
||||
name of hostgroup
|
||||
|
|
|
@ -132,7 +132,7 @@ def procs():
|
|||
uind = 0
|
||||
pind = 0
|
||||
cind = 0
|
||||
plines = __salt__['cmd.run'](__grains__['ps']).splitlines()
|
||||
plines = __salt__['cmd.run'](__grains__['ps'], python_shell=True).splitlines()
|
||||
guide = plines.pop(0).split()
|
||||
if 'USER' in guide:
|
||||
uind = guide.index('USER')
|
||||
|
@ -1417,7 +1417,7 @@ def pid(sig):
|
|||
'''
|
||||
|
||||
cmd = __grains__['ps']
|
||||
output = __salt__['cmd.run_stdout'](cmd)
|
||||
output = __salt__['cmd.run_stdout'](cmd, python_shell=True)
|
||||
|
||||
pids = ''
|
||||
for line in output.splitlines():
|
||||
|
|
|
@ -3622,6 +3622,992 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N
|
|||
return ret
|
||||
|
||||
|
||||
def _get_dvs_config_dict(dvs_name, dvs_config):
|
||||
'''
|
||||
Returns the dict representation of the DVS config
|
||||
|
||||
dvs_name
|
||||
The name of the DVS
|
||||
|
||||
dvs_config
|
||||
The DVS config
|
||||
'''
|
||||
log.trace('Building the dict of the DVS \'{0}\' config'.format(dvs_name))
|
||||
conf_dict = {'name': dvs_name,
|
||||
'contact_email': dvs_config.contact.contact,
|
||||
'contact_name': dvs_config.contact.name,
|
||||
'description': dvs_config.description,
|
||||
'lacp_api_version': dvs_config.lacpApiVersion,
|
||||
'network_resource_control_version':
|
||||
dvs_config.networkResourceControlVersion,
|
||||
'network_resource_management_enabled':
|
||||
dvs_config.networkResourceManagementEnabled,
|
||||
'max_mtu': dvs_config.maxMtu}
|
||||
if isinstance(dvs_config.uplinkPortPolicy,
|
||||
vim.DVSNameArrayUplinkPortPolicy):
|
||||
conf_dict.update(
|
||||
{'uplink_names': dvs_config.uplinkPortPolicy.uplinkPortName})
|
||||
return conf_dict
|
||||
|
||||
|
||||
def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
|
||||
'''
|
||||
Returns the dict representation of the DVS link discovery protocol
|
||||
|
||||
dvs_name
|
||||
The name of the DVS
|
||||
|
||||
dvs_link_disc_protocl
|
||||
The DVS link discovery protocol
|
||||
'''
|
||||
log.trace('Building the dict of the DVS \'{0}\' link discovery '
|
||||
'protocol'.format(dvs_name))
|
||||
return {'operation': dvs_link_disc_protocol.operation,
|
||||
'protocol': dvs_link_disc_protocol.protocol}
|
||||
|
||||
|
||||
def _get_dvs_product_info(dvs_name, dvs_product_info):
|
||||
'''
|
||||
Returns the dict representation of the DVS product_info
|
||||
|
||||
dvs_name
|
||||
The name of the DVS
|
||||
|
||||
dvs_product_info
|
||||
The DVS product info
|
||||
'''
|
||||
log.trace('Building the dict of the DVS \'{0}\' product '
|
||||
'info'.format(dvs_name))
|
||||
return {'name': dvs_product_info.name,
|
||||
'vendor': dvs_product_info.vendor,
|
||||
'version': dvs_product_info.version}
|
||||
|
||||
|
||||
def _get_dvs_capability(dvs_name, dvs_capability):
|
||||
'''
|
||||
Returns the dict representation of the DVS product_info
|
||||
|
||||
dvs_name
|
||||
The name of the DVS
|
||||
|
||||
dvs_capability
|
||||
The DVS capability
|
||||
'''
|
||||
log.trace('Building the dict of the DVS \'{0}\' capability'
|
||||
''.format(dvs_name))
|
||||
return {'operation_supported': dvs_capability.dvsOperationSupported,
|
||||
'portgroup_operation_supported':
|
||||
dvs_capability.dvPortGroupOperationSupported,
|
||||
'port_operation_supported': dvs_capability.dvPortOperationSupported}
|
||||
|
||||
|
||||
def _get_dvs_infrastructure_traffic_resources(dvs_name,
|
||||
dvs_infra_traffic_ress):
|
||||
'''
|
||||
Returns a list of dict representations of the DVS infrastructure traffic
|
||||
resource
|
||||
|
||||
dvs_name
|
||||
The name of the DVS
|
||||
|
||||
dvs_infra_traffic_ress
|
||||
The DVS infrastructure traffic resources
|
||||
'''
|
||||
log.trace('Building the dicts of the DVS \'{0}\' infrastructure traffic '
|
||||
'resources'.format(dvs_name))
|
||||
res_dicts = []
|
||||
for res in dvs_infra_traffic_ress:
|
||||
res_dict = {'key': res.key,
|
||||
'limit': res.allocationInfo.limit,
|
||||
'reservation': res.allocationInfo.reservation}
|
||||
if res.allocationInfo.shares:
|
||||
res_dict.update({'num_shares': res.allocationInfo.shares.shares,
|
||||
'share_level': res.allocationInfo.shares.level})
|
||||
res_dicts.append(res_dict)
|
||||
return res_dicts
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_dvss(datacenter=None, dvs_names=None, service_instance=None):
|
||||
'''
|
||||
Returns a list of distributed virtual switches (DVSs).
|
||||
The list can be filtered by the datacenter or DVS names.
|
||||
|
||||
datacenter
|
||||
The datacenter to look for DVSs in.
|
||||
Default value is None.
|
||||
|
||||
dvs_names
|
||||
List of DVS names to look for. If None, all DVSs are returned.
|
||||
Default value is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_dvss
|
||||
|
||||
salt '*' vsphere.list_dvss dvs_names=[dvs1,dvs2]
|
||||
'''
|
||||
ret_list = []
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
|
||||
for dvs in salt.utils.vmware.get_dvss(dc_ref, dvs_names, (not dvs_names)):
|
||||
dvs_dict = {}
|
||||
# XXX: Because of how VMware did DVS object inheritance we can\'t
|
||||
# be more restrictive when retrieving the dvs config, we have to
|
||||
# retrieve the entire object
|
||||
props = salt.utils.vmware.get_properties_of_managed_object(
|
||||
dvs, ['name', 'config', 'capability', 'networkResourcePool'])
|
||||
dvs_dict = _get_dvs_config_dict(props['name'], props['config'])
|
||||
# Product info
|
||||
dvs_dict.update(
|
||||
{'product_info':
|
||||
_get_dvs_product_info(props['name'],
|
||||
props['config'].productInfo)})
|
||||
# Link Discovery Protocol
|
||||
if props['config'].linkDiscoveryProtocolConfig:
|
||||
dvs_dict.update(
|
||||
{'link_discovery_protocol':
|
||||
_get_dvs_link_discovery_protocol(
|
||||
props['name'],
|
||||
props['config'].linkDiscoveryProtocolConfig)})
|
||||
# Capability
|
||||
dvs_dict.update({'capability':
|
||||
_get_dvs_capability(props['name'],
|
||||
props['capability'])})
|
||||
# InfrastructureTrafficResourceConfig - available with vSphere 6.0
|
||||
if hasattr(props['config'], 'infrastructureTrafficResourceConfig'):
|
||||
dvs_dict.update({
|
||||
'infrastructure_traffic_resource_pools':
|
||||
_get_dvs_infrastructure_traffic_resources(
|
||||
props['name'],
|
||||
props['config'].infrastructureTrafficResourceConfig)})
|
||||
ret_list.append(dvs_dict)
|
||||
return ret_list
|
||||
|
||||
|
||||
def _apply_dvs_config(config_spec, config_dict):
|
||||
'''
|
||||
Applies the values of the config dict dictionary to a config spec
|
||||
(vim.VMwareDVSConfigSpec)
|
||||
'''
|
||||
if config_dict.get('name'):
|
||||
config_spec.name = config_dict['name']
|
||||
if config_dict.get('contact_email') or config_dict.get('contact_name'):
|
||||
if not config_spec.contact:
|
||||
config_spec.contact = vim.DVSContactInfo()
|
||||
config_spec.contact.contact = config_dict.get('contact_email')
|
||||
config_spec.contact.name = config_dict.get('contact_name')
|
||||
if config_dict.get('description'):
|
||||
config_spec.description = config_dict.get('description')
|
||||
if config_dict.get('max_mtu'):
|
||||
config_spec.maxMtu = config_dict.get('max_mtu')
|
||||
if config_dict.get('lacp_api_version'):
|
||||
config_spec.lacpApiVersion = config_dict.get('lacp_api_version')
|
||||
if config_dict.get('network_resource_control_version'):
|
||||
config_spec.networkResourceControlVersion = \
|
||||
config_dict.get('network_resource_control_version')
|
||||
if config_dict.get('uplink_names'):
|
||||
if not config_spec.uplinkPortPolicy or \
|
||||
not isinstance(config_spec.uplinkPortPolicy,
|
||||
vim.DVSNameArrayUplinkPortPolicy):
|
||||
|
||||
config_spec.uplinkPortPolicy = \
|
||||
vim.DVSNameArrayUplinkPortPolicy()
|
||||
config_spec.uplinkPortPolicy.uplinkPortName = \
|
||||
config_dict['uplink_names']
|
||||
|
||||
|
||||
def _apply_dvs_link_discovery_protocol(disc_prot_config, disc_prot_dict):
|
||||
'''
|
||||
Applies the values of the disc_prot_dict dictionary to a link discovery
|
||||
protocol config object (vim.LinkDiscoveryProtocolConfig)
|
||||
'''
|
||||
disc_prot_config.operation = disc_prot_dict['operation']
|
||||
disc_prot_config.protocol = disc_prot_dict['protocol']
|
||||
|
||||
|
||||
def _apply_dvs_product_info(product_info_spec, product_info_dict):
|
||||
'''
|
||||
Applies the values of the product_info_dict dictionary to a product info
|
||||
spec (vim.DistributedVirtualSwitchProductSpec)
|
||||
'''
|
||||
if product_info_dict.get('name'):
|
||||
product_info_spec.name = product_info_dict['name']
|
||||
if product_info_dict.get('vendor'):
|
||||
product_info_spec.vendor = product_info_dict['vendor']
|
||||
if product_info_dict.get('version'):
|
||||
product_info_spec.version = product_info_dict['version']
|
||||
|
||||
|
||||
def _apply_dvs_capability(capability_spec, capability_dict):
|
||||
'''
|
||||
Applies the values of the capability_dict dictionary to a DVS capability
|
||||
object (vim.vim.DVSCapability)
|
||||
'''
|
||||
if 'operation_supported' in capability_dict:
|
||||
capability_spec.dvsOperationSupported = \
|
||||
capability_dict['operation_supported']
|
||||
if 'port_operation_supported' in capability_dict:
|
||||
capability_spec.dvPortOperationSupported = \
|
||||
capability_dict['port_operation_supported']
|
||||
if 'portgroup_operation_supported' in capability_dict:
|
||||
capability_spec.dvPortGroupOperationSupported = \
|
||||
capability_dict['portgroup_operation_supported']
|
||||
|
||||
|
||||
def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
|
||||
resource_dicts):
|
||||
'''
|
||||
Applies the values of the resource dictionaries to infra traffic resources,
|
||||
creating the infra traffic resource if required
|
||||
(vim.DistributedVirtualSwitchProductSpec)
|
||||
'''
|
||||
for res_dict in resource_dicts:
|
||||
filtered_traffic_resources = \
|
||||
[r for r in infra_traffic_resources if r.key == res_dict['key']]
|
||||
if filtered_traffic_resources:
|
||||
traffic_res = filtered_traffic_resources[0]
|
||||
else:
|
||||
traffic_res = vim.DvsHostInfrastructureTrafficResource()
|
||||
traffic_res.key = res_dict['key']
|
||||
traffic_res.allocationInfo = \
|
||||
vim.DvsHostInfrastructureTrafficResourceAllocation()
|
||||
infra_traffic_resources.append(traffic_res)
|
||||
if res_dict.get('limit'):
|
||||
traffic_res.allocationInfo.limit = res_dict['limit']
|
||||
if res_dict.get('reservation'):
|
||||
traffic_res.allocationInfo.reservation = res_dict['reservation']
|
||||
if res_dict.get('num_shares') or res_dict.get('share_level'):
|
||||
if not traffic_res.allocationInfo.shares:
|
||||
traffic_res.allocationInfo.shares = vim.SharesInfo()
|
||||
if res_dict.get('share_level'):
|
||||
traffic_res.allocationInfo.shares.level = \
|
||||
vim.SharesLevel(res_dict['share_level'])
|
||||
if res_dict.get('num_shares'):
|
||||
#XXX Even though we always set the number of shares if provided,
|
||||
#the vCenter will ignore it unless the share level is 'custom'.
|
||||
traffic_res.allocationInfo.shares.shares = res_dict['num_shares']
|
||||
|
||||
|
||||
def _apply_dvs_network_resource_pools(network_resource_pools, resource_dicts):
|
||||
'''
|
||||
Applies the values of the resource dictionaries to network resource pools,
|
||||
creating the resource pools if required
|
||||
(vim.DVSNetworkResourcePoolConfigSpec)
|
||||
'''
|
||||
for res_dict in resource_dicts:
|
||||
ress = [r for r in network_resource_pools if r.key == res_dict['key']]
|
||||
if ress:
|
||||
res = ress[0]
|
||||
else:
|
||||
res = vim.DVSNetworkResourcePoolConfigSpec()
|
||||
res.key = res_dict['key']
|
||||
res.allocationInfo = \
|
||||
vim.DVSNetworkResourcePoolAllocationInfo()
|
||||
network_resource_pools.append(res)
|
||||
if res_dict.get('limit'):
|
||||
res.allocationInfo.limit = res_dict['limit']
|
||||
if res_dict.get('num_shares') and res_dict.get('share_level'):
|
||||
if not res.allocationInfo.shares:
|
||||
res.allocationInfo.shares = vim.SharesInfo()
|
||||
res.allocationInfo.shares.shares = res_dict['num_shares']
|
||||
res.allocationInfo.shares.level = \
|
||||
vim.SharesLevel(res_dict['share_level'])
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def create_dvs(dvs_dict, dvs_name, service_instance=None):
|
||||
'''
|
||||
Creates a distributed virtual switch (DVS).
|
||||
|
||||
Note: The ``dvs_name`` param will override any name set in ``dvs_dict``.
|
||||
|
||||
dvs_dict
|
||||
Dict representation of the new DVS (exmaple in salt.states.dvs)
|
||||
|
||||
dvs_name
|
||||
Name of the DVS to be created.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name
|
||||
'''
|
||||
log.trace('Creating dvs \'{0}\' with dict = {1}'.format(dvs_name,
|
||||
dvs_dict))
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
# Make the name of the DVS consistent with the call
|
||||
dvs_dict['name'] = dvs_name
|
||||
# Build the config spec from the input
|
||||
dvs_create_spec = vim.DVSCreateSpec()
|
||||
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
|
||||
_apply_dvs_config(dvs_create_spec.configSpec, dvs_dict)
|
||||
if dvs_dict.get('product_info'):
|
||||
dvs_create_spec.productInfo = vim.DistributedVirtualSwitchProductSpec()
|
||||
_apply_dvs_product_info(dvs_create_spec.productInfo,
|
||||
dvs_dict['product_info'])
|
||||
if dvs_dict.get('capability'):
|
||||
dvs_create_spec.capability = vim.DVSCapability()
|
||||
_apply_dvs_capability(dvs_create_spec.capability,
|
||||
dvs_dict['capability'])
|
||||
if dvs_dict.get('link_discovery_protocol'):
|
||||
dvs_create_spec.configSpec.linkDiscoveryProtocolConfig = \
|
||||
vim.LinkDiscoveryProtocolConfig()
|
||||
_apply_dvs_link_discovery_protocol(
|
||||
dvs_create_spec.configSpec.linkDiscoveryProtocolConfig,
|
||||
dvs_dict['link_discovery_protocol'])
|
||||
if dvs_dict.get('infrastructure_traffic_resource_pools'):
|
||||
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig = []
|
||||
_apply_dvs_infrastructure_traffic_resources(
|
||||
dvs_create_spec.configSpec.infrastructureTrafficResourceConfig,
|
||||
dvs_dict['infrastructure_traffic_resource_pools'])
|
||||
log.trace('dvs_create_spec = {}'.format(dvs_create_spec))
|
||||
salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec)
|
||||
if 'network_resource_management_enabled' in dvs_dict:
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref,
|
||||
dvs_names=[dvs_name])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError(
|
||||
'DVS \'{0}\' wasn\'t found in datacenter \'{1}\''
|
||||
''.format(dvs_name, datacenter))
|
||||
dvs_ref = dvs_refs[0]
|
||||
salt.utils.vmware.set_dvs_network_resource_management_enabled(
|
||||
dvs_ref, dvs_dict['network_resource_management_enabled'])
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def update_dvs(dvs_dict, dvs, service_instance=None):
|
||||
'''
|
||||
Updates a distributed virtual switch (DVS).
|
||||
|
||||
Note: Updating the product info, capability, uplinks of a DVS is not
|
||||
supported so the corresponding entries in ``dvs_dict`` will be
|
||||
ignored.
|
||||
|
||||
dvs_dict
|
||||
Dictionary with the values the DVS should be update with
|
||||
(exmaple in salt.states.dvs)
|
||||
|
||||
dvs
|
||||
Name of the DVS to be updated.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.update_dvs dvs_dict=$dvs_dict dvs=dvs1
|
||||
'''
|
||||
# Remove ignored properties
|
||||
log.trace('Updating dvs \'{0}\' with dict = {1}'.format(dvs, dvs_dict))
|
||||
for prop in ['product_info', 'capability', 'uplink_names', 'name']:
|
||||
if prop in dvs_dict:
|
||||
del dvs_dict[prop]
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' wasn\'t found in '
|
||||
'datacenter \'{1}\''
|
||||
''.format(dvs, datacenter))
|
||||
dvs_ref = dvs_refs[0]
|
||||
# Build the config spec from the input
|
||||
dvs_props = salt.utils.vmware.get_properties_of_managed_object(
|
||||
dvs_ref, ['config', 'capability'])
|
||||
dvs_config = vim.VMwareDVSConfigSpec()
|
||||
# Copy all of the properties in the config of the of the DVS to a
|
||||
# DvsConfigSpec
|
||||
skipped_properties = ['host']
|
||||
for prop in dvs_config.__dict__.keys():
|
||||
if prop in skipped_properties:
|
||||
continue
|
||||
if hasattr(dvs_props['config'], prop):
|
||||
setattr(dvs_config, prop, getattr(dvs_props['config'], prop))
|
||||
_apply_dvs_config(dvs_config, dvs_dict)
|
||||
if dvs_dict.get('link_discovery_protocol'):
|
||||
if not dvs_config.linkDiscoveryProtocolConfig:
|
||||
dvs_config.linkDiscoveryProtocolConfig = \
|
||||
vim.LinkDiscoveryProtocolConfig()
|
||||
_apply_dvs_link_discovery_protocol(
|
||||
dvs_config.linkDiscoveryProtocolConfig,
|
||||
dvs_dict['link_discovery_protocol'])
|
||||
if dvs_dict.get('infrastructure_traffic_resource_pools'):
|
||||
if not dvs_config.infrastructureTrafficResourceConfig:
|
||||
dvs_config.infrastructureTrafficResourceConfig = []
|
||||
_apply_dvs_infrastructure_traffic_resources(
|
||||
dvs_config.infrastructureTrafficResourceConfig,
|
||||
dvs_dict['infrastructure_traffic_resource_pools'])
|
||||
log.trace('dvs_config= {}'.format(dvs_config))
|
||||
salt.utils.vmware.update_dvs(dvs_ref, dvs_config_spec=dvs_config)
|
||||
if 'network_resource_management_enabled' in dvs_dict:
|
||||
salt.utils.vmware.set_dvs_network_resource_management_enabled(
|
||||
dvs_ref, dvs_dict['network_resource_management_enabled'])
|
||||
return True
|
||||
|
||||
|
||||
def _get_dvportgroup_out_shaping(pg_name, pg_default_port_config):
|
||||
'''
|
||||
Returns the out shaping policy of a distributed virtual portgroup
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
'''
|
||||
log.trace('Retrieving portgroup\'s \'{0}\' out shaping '
|
||||
'config'.format(pg_name))
|
||||
out_shaping_policy = pg_default_port_config.outShapingPolicy
|
||||
if not out_shaping_policy:
|
||||
return {}
|
||||
return {'average_bandwidth': out_shaping_policy.averageBandwidth.value,
|
||||
'burst_size': out_shaping_policy.burstSize.value,
|
||||
'enabled': out_shaping_policy.enabled.value,
|
||||
'peak_bandwidth': out_shaping_policy.peakBandwidth.value}
|
||||
|
||||
|
||||
def _get_dvportgroup_security_policy(pg_name, pg_default_port_config):
|
||||
'''
|
||||
Returns the security policy of a distributed virtual portgroup
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
'''
|
||||
log.trace('Retrieving portgroup\'s \'{0}\' security policy '
|
||||
'config'.format(pg_name))
|
||||
sec_policy = pg_default_port_config.securityPolicy
|
||||
if not sec_policy:
|
||||
return {}
|
||||
return {'allow_promiscuous': sec_policy.allowPromiscuous.value,
|
||||
'forged_transmits': sec_policy.forgedTransmits.value,
|
||||
'mac_changes': sec_policy.macChanges.value}
|
||||
|
||||
|
||||
def _get_dvportgroup_teaming(pg_name, pg_default_port_config):
|
||||
'''
|
||||
Returns the teaming of a distributed virtual portgroup
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
pg_default_port_config
|
||||
The dafault port config of the portgroup
|
||||
'''
|
||||
log.trace('Retrieving portgroup\'s \'{0}\' teaming'
|
||||
'config'.format(pg_name))
|
||||
teaming_policy = pg_default_port_config.uplinkTeamingPolicy
|
||||
if not teaming_policy:
|
||||
return {}
|
||||
ret_dict = {'notify_switches': teaming_policy.notifySwitches.value,
|
||||
'policy': teaming_policy.policy.value,
|
||||
'reverse_policy': teaming_policy.reversePolicy.value,
|
||||
'rolling_order': teaming_policy.rollingOrder.value}
|
||||
if teaming_policy.failureCriteria:
|
||||
failure_criteria = teaming_policy.failureCriteria
|
||||
ret_dict.update({'failure_criteria': {
|
||||
'check_beacon': failure_criteria.checkBeacon.value,
|
||||
'check_duplex': failure_criteria.checkDuplex.value,
|
||||
'check_error_percent': failure_criteria.checkErrorPercent.value,
|
||||
'check_speed': failure_criteria.checkSpeed.value,
|
||||
'full_duplex': failure_criteria.fullDuplex.value,
|
||||
'percentage': failure_criteria.percentage.value,
|
||||
'speed': failure_criteria.speed.value}})
|
||||
if teaming_policy.uplinkPortOrder:
|
||||
uplink_order = teaming_policy.uplinkPortOrder
|
||||
ret_dict.update({'port_order': {
|
||||
'active': uplink_order.activeUplinkPort,
|
||||
'standby': uplink_order.standbyUplinkPort}})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_dvportgroup_dict(pg_ref):
|
||||
'''
|
||||
Returns a dictionary with a distributed virutal portgroup data
|
||||
|
||||
|
||||
pg_ref
|
||||
Portgroup reference
|
||||
'''
|
||||
props = salt.utils.vmware.get_properties_of_managed_object(
|
||||
pg_ref, ['name', 'config.description', 'config.numPorts',
|
||||
'config.type', 'config.defaultPortConfig'])
|
||||
pg_dict = {'name': props['name'],
|
||||
'description': props.get('config.description'),
|
||||
'num_ports': props['config.numPorts'],
|
||||
'type': props['config.type']}
|
||||
if props['config.defaultPortConfig']:
|
||||
dpg = props['config.defaultPortConfig']
|
||||
if dpg.vlan and \
|
||||
isinstance(dpg.vlan,
|
||||
vim.VmwareDistributedVirtualSwitchVlanIdSpec):
|
||||
|
||||
pg_dict.update({'vlan_id': dpg.vlan.vlanId})
|
||||
pg_dict.update({'out_shaping':
|
||||
_get_dvportgroup_out_shaping(
|
||||
props['name'],
|
||||
props['config.defaultPortConfig'])})
|
||||
pg_dict.update({'security_policy':
|
||||
_get_dvportgroup_security_policy(
|
||||
props['name'],
|
||||
props['config.defaultPortConfig'])})
|
||||
pg_dict.update({'teaming':
|
||||
_get_dvportgroup_teaming(
|
||||
props['name'],
|
||||
props['config.defaultPortConfig'])})
|
||||
return pg_dict
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_dvportgroups(dvs=None, portgroup_names=None, service_instance=None):
|
||||
'''
|
||||
Returns a list of distributed virtual switch portgroups.
|
||||
The list can be filtered by the portgroup names or by the DVS.
|
||||
|
||||
dvs
|
||||
Name of the DVS containing the portgroups.
|
||||
Default value is None.
|
||||
|
||||
portgroup_names
|
||||
List of portgroup names to look for. If None, all portgroups are
|
||||
returned.
|
||||
Default value is None
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
salt '*' vsphere.list_dvporgroups
|
||||
|
||||
salt '*' vsphere.list_dvportgroups dvs=dvs1
|
||||
|
||||
salt '*' vsphere.list_dvportgroups portgroup_names=[pg1]
|
||||
|
||||
salt '*' vsphere.list_dvportgroups dvs=dvs1 portgroup_names=[pg1]
|
||||
'''
|
||||
ret_dict = []
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
if dvs:
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
|
||||
'retrieved'.format(dvs))
|
||||
dvs_ref = dvs_refs[0]
|
||||
get_all_portgroups = True if not portgroup_names else False
|
||||
for pg_ref in salt.utils.vmware.get_dvportgroups(
|
||||
parent_ref=dvs_ref if dvs else dc_ref,
|
||||
portgroup_names=portgroup_names,
|
||||
get_all_portgroups=get_all_portgroups):
|
||||
|
||||
ret_dict.append(_get_dvportgroup_dict(pg_ref))
|
||||
return ret_dict
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def list_uplink_dvportgroup(dvs, service_instance=None):
|
||||
'''
|
||||
Returns the uplink portgroup of a distributed virtual switch.
|
||||
|
||||
dvs
|
||||
Name of the DVS containing the portgroup.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.list_uplink_dvportgroup dvs=dvs_name
|
||||
'''
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
|
||||
'retrieved'.format(dvs))
|
||||
uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0])
|
||||
return _get_dvportgroup_dict(uplink_pg_ref)
|
||||
|
||||
|
||||
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf):
|
||||
'''
|
||||
Applies the values in out_shaping_conf to an out_shaping object
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
out_shaping
|
||||
The vim.DVSTrafficShapingPolicy to apply the config to
|
||||
|
||||
out_shaping_conf
|
||||
The out shaping config
|
||||
'''
|
||||
log.trace('Building portgroup\'s \'{0}\' out shaping '
|
||||
'policy'.format(pg_name))
|
||||
if out_shaping_conf.get('average_bandwidth'):
|
||||
out_shaping.averageBandwidth = vim.LongPolicy()
|
||||
out_shaping.averageBandwidth.value = \
|
||||
out_shaping_conf['average_bandwidth']
|
||||
if out_shaping_conf.get('burst_size'):
|
||||
out_shaping.burstSize = vim.LongPolicy()
|
||||
out_shaping.burstSize.value = out_shaping_conf['burst_size']
|
||||
if 'enabled' in out_shaping_conf:
|
||||
out_shaping.enabled = vim.BoolPolicy()
|
||||
out_shaping.enabled.value = out_shaping_conf['enabled']
|
||||
if out_shaping_conf.get('peak_bandwidth'):
|
||||
out_shaping.peakBandwidth = vim.LongPolicy()
|
||||
out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth']
|
||||
|
||||
|
||||
def _apply_dvportgroup_security_policy(pg_name, sec_policy, sec_policy_conf):
|
||||
'''
|
||||
Applies the values in sec_policy_conf to a security policy object
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
sec_policy
|
||||
The vim.DVSTrafficShapingPolicy to apply the config to
|
||||
|
||||
sec_policy_conf
|
||||
The out shaping config
|
||||
'''
|
||||
log.trace('Building portgroup\'s \'{0}\' security policy '.format(pg_name))
|
||||
if 'allow_promiscuous' in sec_policy_conf:
|
||||
sec_policy.allowPromiscuous = vim.BoolPolicy()
|
||||
sec_policy.allowPromiscuous.value = \
|
||||
sec_policy_conf['allow_promiscuous']
|
||||
if 'forged_transmits' in sec_policy_conf:
|
||||
sec_policy.forgedTransmits = vim.BoolPolicy()
|
||||
sec_policy.forgedTransmits.value = sec_policy_conf['forged_transmits']
|
||||
if 'mac_changes' in sec_policy_conf:
|
||||
sec_policy.macChanges = vim.BoolPolicy()
|
||||
sec_policy.macChanges.value = sec_policy_conf['mac_changes']
|
||||
|
||||
|
||||
def _apply_dvportgroup_teaming(pg_name, teaming, teaming_conf):
|
||||
'''
|
||||
Applies the values in teaming_conf to a teaming policy object
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
teaming
|
||||
The vim.VmwareUplinkPortTeamingPolicy to apply the config to
|
||||
|
||||
teaming_conf
|
||||
The teaming config
|
||||
'''
|
||||
log.trace('Building portgroup\'s \'{0}\' teaming'.format(pg_name))
|
||||
if 'notify_switches' in teaming_conf:
|
||||
teaming.notifySwitches = vim.BoolPolicy()
|
||||
teaming.notifySwitches.value = teaming_conf['notify_switches']
|
||||
if 'policy' in teaming_conf:
|
||||
teaming.policy = vim.StringPolicy()
|
||||
teaming.policy.value = teaming_conf['policy']
|
||||
if 'reverse_policy' in teaming_conf:
|
||||
teaming.reversePolicy = vim.BoolPolicy()
|
||||
teaming.reversePolicy.value = teaming_conf['reverse_policy']
|
||||
if 'rolling_order' in teaming_conf:
|
||||
teaming.rollingOrder = vim.BoolPolicy()
|
||||
teaming.rollingOrder.value = teaming_conf['rolling_order']
|
||||
if 'failure_criteria' in teaming_conf:
|
||||
if not teaming.failureCriteria:
|
||||
teaming.failureCriteria = vim.DVSFailureCriteria()
|
||||
failure_criteria_conf = teaming_conf['failure_criteria']
|
||||
if 'check_beacon' in failure_criteria_conf:
|
||||
teaming.failureCriteria.checkBeacon = vim.BoolPolicy()
|
||||
teaming.failureCriteria.checkBeacon.value = \
|
||||
failure_criteria_conf['check_beacon']
|
||||
if 'check_duplex' in failure_criteria_conf:
|
||||
teaming.failureCriteria.checkDuplex = vim.BoolPolicy()
|
||||
teaming.failureCriteria.checkDuplex.value = \
|
||||
failure_criteria_conf['check_duplex']
|
||||
if 'check_error_percent' in failure_criteria_conf:
|
||||
teaming.failureCriteria.checkErrorPercent = vim.BoolPolicy()
|
||||
teaming.failureCriteria.checkErrorPercent.value = \
|
||||
failure_criteria_conf['check_error_percent']
|
||||
if 'check_speed' in failure_criteria_conf:
|
||||
teaming.failureCriteria.checkSpeed = vim.StringPolicy()
|
||||
teaming.failureCriteria.checkSpeed.value = \
|
||||
failure_criteria_conf['check_speed']
|
||||
if 'full_duplex' in failure_criteria_conf:
|
||||
teaming.failureCriteria.fullDuplex = vim.BoolPolicy()
|
||||
teaming.failureCriteria.fullDuplex.value = \
|
||||
failure_criteria_conf['full_duplex']
|
||||
if 'percentage' in failure_criteria_conf:
|
||||
teaming.failureCriteria.percentage = vim.IntPolicy()
|
||||
teaming.failureCriteria.percentage.value = \
|
||||
failure_criteria_conf['percentage']
|
||||
if 'speed' in failure_criteria_conf:
|
||||
teaming.failureCriteria.speed = vim.IntPolicy()
|
||||
teaming.failureCriteria.speed.value = \
|
||||
failure_criteria_conf['speed']
|
||||
if 'port_order' in teaming_conf:
|
||||
if not teaming.uplinkPortOrder:
|
||||
teaming.uplinkPortOrder = vim.VMwareUplinkPortOrderPolicy()
|
||||
if 'active' in teaming_conf['port_order']:
|
||||
teaming.uplinkPortOrder.activeUplinkPort = \
|
||||
teaming_conf['port_order']['active']
|
||||
if 'standby' in teaming_conf['port_order']:
|
||||
teaming.uplinkPortOrder.standbyUplinkPort = \
|
||||
teaming_conf['port_order']['standby']
|
||||
|
||||
|
||||
def _apply_dvportgroup_config(pg_name, pg_spec, pg_conf):
|
||||
'''
|
||||
Applies the values in conf to a distributed portgroup spec
|
||||
|
||||
pg_name
|
||||
The name of the portgroup
|
||||
|
||||
pg_spec
|
||||
The vim.DVPortgroupConfigSpec to apply the config to
|
||||
|
||||
pg_conf
|
||||
The portgroup config
|
||||
'''
|
||||
log.trace('Building portgroup\'s \'{0}\' spec'.format(pg_name))
|
||||
if 'name' in pg_conf:
|
||||
pg_spec.name = pg_conf['name']
|
||||
if 'description' in pg_conf:
|
||||
pg_spec.description = pg_conf['description']
|
||||
if 'num_ports' in pg_conf:
|
||||
pg_spec.numPorts = pg_conf['num_ports']
|
||||
if 'type' in pg_conf:
|
||||
pg_spec.type = pg_conf['type']
|
||||
|
||||
if not pg_spec.defaultPortConfig:
|
||||
for prop in ['vlan_id', 'out_shaping', 'security_policy', 'teaming']:
|
||||
if prop in pg_conf:
|
||||
pg_spec.defaultPortConfig = vim.VMwareDVSPortSetting()
|
||||
if 'vlan_id' in pg_conf:
|
||||
pg_spec.defaultPortConfig.vlan = \
|
||||
vim.VmwareDistributedVirtualSwitchVlanIdSpec()
|
||||
pg_spec.defaultPortConfig.vlan.vlanId = pg_conf['vlan_id']
|
||||
if 'out_shaping' in pg_conf:
|
||||
if not pg_spec.defaultPortConfig.outShapingPolicy:
|
||||
pg_spec.defaultPortConfig.outShapingPolicy = \
|
||||
vim.DVSTrafficShapingPolicy()
|
||||
_apply_dvportgroup_out_shaping(
|
||||
pg_name, pg_spec.defaultPortConfig.outShapingPolicy,
|
||||
pg_conf['out_shaping'])
|
||||
if 'security_policy' in pg_conf:
|
||||
if not pg_spec.defaultPortConfig.securityPolicy:
|
||||
pg_spec.defaultPortConfig.securityPolicy = \
|
||||
vim.DVSSecurityPolicy()
|
||||
_apply_dvportgroup_security_policy(
|
||||
pg_name, pg_spec.defaultPortConfig.securityPolicy,
|
||||
pg_conf['security_policy'])
|
||||
if 'teaming' in pg_conf:
|
||||
if not pg_spec.defaultPortConfig.uplinkTeamingPolicy:
|
||||
pg_spec.defaultPortConfig.uplinkTeamingPolicy = \
|
||||
vim.VmwareUplinkPortTeamingPolicy()
|
||||
_apply_dvportgroup_teaming(
|
||||
pg_name, pg_spec.defaultPortConfig.uplinkTeamingPolicy,
|
||||
pg_conf['teaming'])
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def create_dvportgroup(portgroup_dict, portgroup_name, dvs,
|
||||
service_instance=None):
|
||||
'''
|
||||
Creates a distributed virtual portgroup.
|
||||
|
||||
Note: The ``portgroup_name`` param will override any name already set
|
||||
in ``portgroup_dict``.
|
||||
|
||||
portgroup_dict
|
||||
Dictionary with the config values the portgroup should be created with
|
||||
(exmaple in salt.states.dvs).
|
||||
|
||||
portgroup_name
|
||||
Name of the portgroup to be created.
|
||||
|
||||
dvs
|
||||
Name of the DVS that will contain the portgroup.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.create_dvportgroup portgroup_dict=<dict>
|
||||
portgroup_name=pg1 dvs=dvs1
|
||||
'''
|
||||
log.trace('Creating portgroup\'{0}\' in dvs \'{1}\' '
|
||||
'with dict = {2}'.format(portgroup_name, dvs, portgroup_dict))
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
|
||||
'retrieved'.format(dvs))
|
||||
# Make the name of the dvportgroup consistent with the parameter
|
||||
portgroup_dict['name'] = portgroup_name
|
||||
spec = vim.DVPortgroupConfigSpec()
|
||||
_apply_dvportgroup_config(portgroup_name, spec, portgroup_dict)
|
||||
salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def update_dvportgroup(portgroup_dict, portgroup, dvs, service_instance=True):
|
||||
'''
|
||||
Updates a distributed virtual portgroup.
|
||||
|
||||
portgroup_dict
|
||||
Dictionary with the values the portgroup should be update with
|
||||
(exmaple in salt.states.dvs).
|
||||
|
||||
portgroup
|
||||
Name of the portgroup to be updated.
|
||||
|
||||
dvs
|
||||
Name of the DVS containing the portgroups.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
|
||||
portgroup=pg1
|
||||
|
||||
salt '*' vsphere.update_dvportgroup portgroup_dict=<dict>
|
||||
portgroup=pg1 dvs=dvs1
|
||||
'''
|
||||
log.trace('Updating portgroup\'{0}\' in dvs \'{1}\' '
|
||||
'with dict = {2}'.format(portgroup, dvs, portgroup_dict))
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
|
||||
'retrieved'.format(dvs))
|
||||
pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0],
|
||||
portgroup_names=[portgroup])
|
||||
if not pg_refs:
|
||||
raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not '
|
||||
'retrieved'.format(portgroup))
|
||||
pg_props = salt.utils.vmware.get_properties_of_managed_object(pg_refs[0],
|
||||
['config'])
|
||||
spec = vim.DVPortgroupConfigSpec()
|
||||
# Copy existing properties in spec
|
||||
for prop in ['autoExpand', 'configVersion', 'defaultPortConfig',
|
||||
'description', 'name', 'numPorts', 'policy', 'portNameFormat',
|
||||
'scope', 'type', 'vendorSpecificConfig']:
|
||||
setattr(spec, prop, getattr(pg_props['config'], prop))
|
||||
_apply_dvportgroup_config(portgroup, spec, portgroup_dict)
|
||||
salt.utils.vmware.update_dvportgroup(pg_refs[0], spec)
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
def remove_dvportgroup(portgroup, dvs, service_instance=None):
|
||||
'''
|
||||
Removes a distributed virtual portgroup.
|
||||
|
||||
portgroup
|
||||
Name of the portgroup to be removed.
|
||||
|
||||
dvs
|
||||
Name of the DVS containing the portgroups.
|
||||
|
||||
service_instance
|
||||
Service instance (vim.ServiceInstance) of the vCenter.
|
||||
Default is None.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' vsphere.remove_dvportgroup portgroup=pg1 dvs=dvs1
|
||||
'''
|
||||
log.trace('Removing portgroup\'{0}\' in dvs \'{1}\' '
|
||||
''.format(portgroup, dvs))
|
||||
proxy_type = get_proxy_type()
|
||||
if proxy_type == 'esxdatacenter':
|
||||
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
|
||||
dc_ref = _get_proxy_target(service_instance)
|
||||
elif proxy_type == 'esxcluster':
|
||||
datacenter = __salt__['esxcluster.get_details']()['datacenter']
|
||||
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
|
||||
dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs])
|
||||
if not dvs_refs:
|
||||
raise VMwareObjectRetrievalError('DVS \'{0}\' was not '
|
||||
'retrieved'.format(dvs))
|
||||
pg_refs = salt.utils.vmware.get_dvportgroups(dvs_refs[0],
|
||||
portgroup_names=[portgroup])
|
||||
if not pg_refs:
|
||||
raise VMwareObjectRetrievalError('Portgroup \'{0}\' was not '
|
||||
'retrieved'.format(portgroup))
|
||||
salt.utils.vmware.remove_dvportgroup(pg_refs[0])
|
||||
return True
|
||||
|
||||
|
||||
@depends(HAS_PYVMOMI)
|
||||
@supports_proxies('esxdatacenter', 'esxcluster')
|
||||
@gets_service_instance_via_proxy
|
||||
|
|
|
@ -110,7 +110,7 @@ def available(software=True,
|
|||
Include software updates in the results (default is True)
|
||||
|
||||
drivers (bool):
|
||||
Include driver updates in the results (default is False)
|
||||
Include driver updates in the results (default is True)
|
||||
|
||||
summary (bool):
|
||||
- True: Return a summary of updates available for each category.
|
||||
|
|
|
@ -1347,6 +1347,7 @@ def install(name=None,
|
|||
to_install = []
|
||||
to_downgrade = []
|
||||
to_reinstall = []
|
||||
_available = {}
|
||||
# The above three lists will be populated with tuples containing the
|
||||
# package name and the string being used for this particular package
|
||||
# modification. The reason for this method is that the string we use for
|
||||
|
|
|
@ -77,6 +77,9 @@ def __virtual__():
|
|||
) == 0:
|
||||
return 'zfs'
|
||||
|
||||
if __grains__['kernel'] == 'OpenBSD':
|
||||
return False
|
||||
|
||||
_zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse')
|
||||
if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')):
|
||||
return 'zfs'
|
||||
|
|
163
salt/pillar/rethinkdb_pillar.py
Normal file
163
salt/pillar/rethinkdb_pillar.py
Normal file
|
@ -0,0 +1,163 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Provide external pillar data from RethinkDB
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:depends: rethinkdb (on the salt-master)
|
||||
|
||||
|
||||
salt master rethinkdb configuration
|
||||
===================================
|
||||
These variables must be configured in your master configuration file.
|
||||
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
|
||||
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
|
||||
Defaults to ``'28015'``
|
||||
* ``rethinkdb.database`` - The database to connect to.
|
||||
Defaults to ``'salt'``
|
||||
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
|
||||
Defaults to ``''``
|
||||
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
|
||||
Defaults to ``''``
|
||||
|
||||
|
||||
salt-master ext_pillar configuration
|
||||
====================================
|
||||
|
||||
The ext_pillar function arguments are given in single line dictionary notation.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
ext_pillar:
|
||||
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
|
||||
|
||||
In the example above the following happens.
|
||||
* The salt-master will look for external pillars in the 'ext_pillar' table
|
||||
on the RethinkDB host
|
||||
* The minion id will be matched against the 'minion_id' field
|
||||
* Pillars will be retrieved from the nested field 'pillar_root'
|
||||
* Found pillars will be merged inside a key called 'external_pillar'
|
||||
|
||||
|
||||
Module Documentation
|
||||
====================
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libraries
|
||||
import logging
|
||||
|
||||
# Import 3rd party libraries
|
||||
try:
|
||||
import rethinkdb
|
||||
HAS_RETHINKDB = True
|
||||
except ImportError:
|
||||
HAS_RETHINKDB = False
|
||||
|
||||
__virtualname__ = 'rethinkdb'
|
||||
|
||||
__opts__ = {
|
||||
'rethinkdb.host': 'salt',
|
||||
'rethinkdb.port': '28015',
|
||||
'rethinkdb.database': 'salt',
|
||||
'rethinkdb.username': None,
|
||||
'rethinkdb.password': None
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_RETHINKDB:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Configure logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ext_pillar(minion_id,
|
||||
pillar,
|
||||
table='pillar',
|
||||
id_field=None,
|
||||
field=None,
|
||||
pillar_key=None):
|
||||
'''
|
||||
Collect minion external pillars from a RethinkDB database
|
||||
|
||||
Arguments:
|
||||
* `table`: The RethinkDB table containing external pillar information.
|
||||
Defaults to ``'pillar'``
|
||||
* `id_field`: Field in document containing the minion id.
|
||||
If blank then we assume the table index matches minion ids
|
||||
* `field`: Specific field in the document used for pillar data, if blank
|
||||
then the entire document will be used
|
||||
* `pillar_key`: The salt-master will nest found external pillars under
|
||||
this key before merging into the minion pillars. If blank, external
|
||||
pillars will be merged at top level
|
||||
'''
|
||||
host = __opts__['rethinkdb.host']
|
||||
port = __opts__['rethinkdb.port']
|
||||
database = __opts__['rethinkdb.database']
|
||||
username = __opts__['rethinkdb.username']
|
||||
password = __opts__['rethinkdb.password']
|
||||
|
||||
log.debug('Connecting to {0}:{1} as user \'{2}\' for RethinkDB ext_pillar'
|
||||
.format(host, port, username))
|
||||
|
||||
# Connect to the database
|
||||
conn = rethinkdb.connect(host=host,
|
||||
port=port,
|
||||
db=database,
|
||||
user=username,
|
||||
password=password)
|
||||
|
||||
data = None
|
||||
|
||||
try:
|
||||
|
||||
if id_field:
|
||||
log.debug('ext_pillar.rethinkdb: looking up pillar. '
|
||||
'table: {0}, field: {1}, minion: {2}'.format(
|
||||
table, id_field, minion_id))
|
||||
|
||||
if field:
|
||||
data = rethinkdb.table(table).filter(
|
||||
{id_field: minion_id}).pluck(field).run(conn)
|
||||
else:
|
||||
data = rethinkdb.table(table).filter(
|
||||
{id_field: minion_id}).run(conn)
|
||||
|
||||
else:
|
||||
log.debug('ext_pillar.rethinkdb: looking up pillar. '
|
||||
'table: {0}, field: id, minion: {1}'.format(
|
||||
table, minion_id))
|
||||
|
||||
if field:
|
||||
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
|
||||
conn)
|
||||
else:
|
||||
data = rethinkdb.table(table).get(minion_id).run(conn)
|
||||
|
||||
finally:
|
||||
if conn.is_open():
|
||||
conn.close()
|
||||
|
||||
if data.items:
|
||||
|
||||
# Return nothing if multiple documents are found for a minion
|
||||
if len(data.items) > 1:
|
||||
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
|
||||
'minion {0}'.format(minion_id))
|
||||
return {}
|
||||
|
||||
else:
|
||||
result = data.items.pop()
|
||||
|
||||
if pillar_key:
|
||||
return {pillar_key: result}
|
||||
return result
|
||||
|
||||
else:
|
||||
# No document found in the database
|
||||
log.debug('ext_pillar.rethinkdb: no document found')
|
||||
return {}
|
|
@ -414,7 +414,7 @@ def extracted(name,
|
|||
.. versionadded:: 2017.7.3
|
||||
|
||||
keep : True
|
||||
Same as ``keep_source``.
|
||||
Same as ``keep_source``, kept for backward-compatibility.
|
||||
|
||||
.. note::
|
||||
If both ``keep_source`` and ``keep`` are used, ``keep`` will be
|
||||
|
@ -648,6 +648,21 @@ def extracted(name,
|
|||
# Remove pub kwargs as they're irrelevant here.
|
||||
kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
|
||||
if 'keep_source' in kwargs and 'keep' in kwargs:
|
||||
ret.setdefault('warnings', []).append(
|
||||
'Both \'keep_source\' and \'keep\' were used. Since these both '
|
||||
'do the same thing, \'keep\' was ignored.'
|
||||
)
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
kwargs.pop('keep')
|
||||
elif 'keep_source' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep_source'))
|
||||
elif 'keep' in kwargs:
|
||||
keep_source = bool(kwargs.pop('keep'))
|
||||
else:
|
||||
# Neither was passed, default is True
|
||||
keep_source = True
|
||||
|
||||
if 'keep_source' in kwargs and 'keep' in kwargs:
|
||||
ret.setdefault('warnings', []).append(
|
||||
'Both \'keep_source\' and \'keep\' were used. Since these both '
|
||||
|
|
717
salt/states/dvs.py
Normal file
717
salt/states/dvs.py
Normal file
|
@ -0,0 +1,717 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage VMware distributed virtual switches (DVSs) and their distributed virtual
|
||||
portgroups (DVportgroups).
|
||||
|
||||
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstaley.com>`
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Several settings can be changed for DVSs and DVporgroups. Here are two examples
|
||||
covering all of the settings. Fewer settings can be used
|
||||
|
||||
DVS
|
||||
---
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
'name': 'dvs1',
|
||||
'max_mtu': 1000,
|
||||
'uplink_names': [
|
||||
'dvUplink1',
|
||||
'dvUplink2',
|
||||
'dvUplink3'
|
||||
],
|
||||
'capability': {
|
||||
'portgroup_operation_supported': false,
|
||||
'operation_supported': true,
|
||||
'port_operation_supported': false
|
||||
},
|
||||
'lacp_api_version': 'multipleLag',
|
||||
'contact_email': 'foo@email.com',
|
||||
'product_info': {
|
||||
'version':
|
||||
'6.0.0',
|
||||
'vendor':
|
||||
'VMware,
|
||||
Inc.',
|
||||
'name':
|
||||
'DVS'
|
||||
},
|
||||
'network_resource_management_enabled': true,
|
||||
'contact_name': 'me@email.com',
|
||||
'infrastructure_traffic_resource_pools': [
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': 1000,
|
||||
'share_level': 'high',
|
||||
'key': 'management',
|
||||
'num_shares': 100
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'faultTolerance',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': 32000,
|
||||
'share_level': 'normal',
|
||||
'key': 'vmotion',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 10000,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'virtualMachine',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'custom',
|
||||
'key': 'iSCSI',
|
||||
'num_shares': 75
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'nfs',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'hbr',
|
||||
'num_shares': 50
|
||||
},
|
||||
{
|
||||
'reservation': 8750,
|
||||
'limit': 15000,
|
||||
'share_level': 'high',
|
||||
'key': 'vsan',
|
||||
'num_shares': 100
|
||||
},
|
||||
{
|
||||
'reservation': 0,
|
||||
'limit': -1,
|
||||
'share_level': 'normal',
|
||||
'key': 'vdp',
|
||||
'num_shares': 50
|
||||
}
|
||||
],
|
||||
'link_discovery_protocol': {
|
||||
'operation':
|
||||
'listen',
|
||||
'protocol':
|
||||
'cdp'
|
||||
},
|
||||
'network_resource_control_version': 'version3',
|
||||
'description': 'Managed by Salt. Random settings.'
|
||||
|
||||
Note: The mandatory attribute is: ``name``.
|
||||
|
||||
Portgroup
|
||||
---------
|
||||
|
||||
.. code-block:: python
|
||||
'security_policy': {
|
||||
'allow_promiscuous': true,
|
||||
'mac_changes': false,
|
||||
'forged_transmits': true
|
||||
},
|
||||
'name': 'vmotion-v702',
|
||||
'out_shaping': {
|
||||
'enabled': true,
|
||||
'average_bandwidth': 1500,
|
||||
'burst_size': 4096,
|
||||
'peak_bandwidth': 1500
|
||||
},
|
||||
'num_ports': 128,
|
||||
'teaming': {
|
||||
'port_order': {
|
||||
'active': [
|
||||
'dvUplink2'
|
||||
],
|
||||
'standby': [
|
||||
'dvUplink1'
|
||||
]
|
||||
},
|
||||
'notify_switches': false,
|
||||
'reverse_policy': true,
|
||||
'rolling_order': false,
|
||||
'policy': 'failover_explicit',
|
||||
'failure_criteria': {
|
||||
'check_error_percent': true,
|
||||
'full_duplex': false,
|
||||
'check_duplex': false,
|
||||
'percentage': 50,
|
||||
'check_speed': 'minimum',
|
||||
'speed': 20,
|
||||
'check_beacon': true
|
||||
}
|
||||
},
|
||||
'type': 'earlyBinding',
|
||||
'vlan_id': 100,
|
||||
'description': 'Managed by Salt. Random settings.'
|
||||
|
||||
Note: The mandatory attributes are: ``name``, ``type``.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
|
||||
- pyVmomi Python Module
|
||||
|
||||
|
||||
pyVmomi
|
||||
-------
|
||||
|
||||
PyVmomi can be installed via pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi
|
||||
|
||||
.. note::
|
||||
|
||||
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
|
||||
versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9,
|
||||
or newer must be present. This is due to an upstream dependency
|
||||
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
|
||||
version of Python is not in the supported range, you will need to install an
|
||||
earlier version of pyVmomi. See `Issue #29537`_ for more information.
|
||||
|
||||
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
|
||||
|
||||
Based on the note above, to install an earlier version of pyVmomi than the
|
||||
version currently listed in PyPi, run the following:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyVmomi==5.5.0.2014.1.1
|
||||
|
||||
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
|
||||
Module was developed against.
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.exceptions
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
from pyVmomi import VmomiSupport
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if not HAS_PYVMOMI:
|
||||
return False, 'State module did not load: pyVmomi not found'
|
||||
|
||||
# We check the supported vim versions to infer the pyVmomi version
|
||||
if 'vim25/6.0' in VmomiSupport.versionMap and \
|
||||
sys.version_info > (2, 7) and sys.version_info < (2, 7, 9):
|
||||
|
||||
return False, ('State module did not load: Incompatible versions '
|
||||
'of Python and pyVmomi present. See Issue #29537.')
|
||||
return 'dvs'
|
||||
|
||||
|
||||
def mod_init(low):
|
||||
'''
|
||||
Init function
|
||||
'''
|
||||
return True
|
||||
|
||||
|
||||
def _get_datacenter_name():
|
||||
'''
|
||||
Returns the datacenter name configured on the proxy
|
||||
|
||||
Supported proxies: esxcluster, esxdatacenter
|
||||
'''
|
||||
|
||||
proxy_type = __salt__['vsphere.get_proxy_type']()
|
||||
details = None
|
||||
if proxy_type == 'esxcluster':
|
||||
details = __salt__['esxcluster.get_details']()
|
||||
elif proxy_type == 'esxdatacenter':
|
||||
details = __salt__['esxdatacenter.get_details']()
|
||||
if not details:
|
||||
raise salt.exceptions.CommandExecutionError(
|
||||
'details for proxy type \'{0}\' not loaded'.format(proxy_type))
|
||||
return details['datacenter']
|
||||
|
||||
|
||||
def dvs_configured(name, dvs):
|
||||
'''
|
||||
Configures a DVS.
|
||||
|
||||
Creates a new DVS, if it doesn't exist in the provided datacenter or
|
||||
reconfigures it if configured differently.
|
||||
|
||||
dvs
|
||||
DVS dict representations (see module sysdocs)
|
||||
'''
|
||||
datacenter_name = _get_datacenter_name()
|
||||
dvs_name = dvs['name'] if dvs.get('name') else name
|
||||
log.info('Running state {0} for DVS \'{1}\' in datacenter '
|
||||
'\'{2}\''.format(name, dvs_name, datacenter_name))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO dvs validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
dvss = __salt__['vsphere.list_dvss'](dvs_names=[dvs_name],
|
||||
service_instance=si)
|
||||
if not dvss:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create a new DVS '
|
||||
'\'{1}\' in datacenter \'{2}\''
|
||||
''.format(name, dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
dvs['name'] = dvs_name
|
||||
__salt__['vsphere.create_dvs'](dvs_dict=dvs,
|
||||
dvs_name=dvs_name,
|
||||
service_instance=si)
|
||||
comments.append('Created a new DVS \'{0}\' in datacenter '
|
||||
'\'{1}\''.format(dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'dvs': {'new': dvs}})
|
||||
else:
|
||||
# DVS already exists. Checking various aspects of the config
|
||||
props = ['description', 'contact_email', 'contact_name',
|
||||
'lacp_api_version', 'link_discovery_protocol',
|
||||
'max_mtu', 'network_resource_control_version',
|
||||
'network_resource_management_enabled']
|
||||
log.trace('DVS \'{0}\' found in datacenter \'{1}\'. Checking '
|
||||
'for any updates in '
|
||||
'{2}'.format(dvs_name, datacenter_name, props))
|
||||
props_to_original_values = {}
|
||||
props_to_updated_values = {}
|
||||
current_dvs = dvss[0]
|
||||
for prop in props:
|
||||
if prop in dvs and dvs[prop] != current_dvs.get(prop):
|
||||
props_to_original_values[prop] = current_dvs.get(prop)
|
||||
props_to_updated_values[prop] = dvs[prop]
|
||||
|
||||
# Simple infrastructure traffic resource control compare doesn't
|
||||
# work because num_shares is optional if share_level is not custom
|
||||
# We need to do a dedicated compare for this property
|
||||
infra_prop = 'infrastructure_traffic_resource_pools'
|
||||
original_infra_res_pools = []
|
||||
updated_infra_res_pools = []
|
||||
if infra_prop in dvs:
|
||||
if not current_dvs.get(infra_prop):
|
||||
updated_infra_res_pools = dvs[infra_prop]
|
||||
else:
|
||||
for idx in range(len(dvs[infra_prop])):
|
||||
if 'num_shares' not in dvs[infra_prop][idx] and \
|
||||
current_dvs[infra_prop][idx]['share_level'] != \
|
||||
'custom' and \
|
||||
'num_shares' in current_dvs[infra_prop][idx]:
|
||||
|
||||
del current_dvs[infra_prop][idx]['num_shares']
|
||||
if dvs[infra_prop][idx] != \
|
||||
current_dvs[infra_prop][idx]:
|
||||
|
||||
original_infra_res_pools.append(
|
||||
current_dvs[infra_prop][idx])
|
||||
updated_infra_res_pools.append(
|
||||
dict(dvs[infra_prop][idx]))
|
||||
if updated_infra_res_pools:
|
||||
props_to_original_values[
|
||||
'infrastructure_traffic_resource_pools'] = \
|
||||
original_infra_res_pools
|
||||
props_to_updated_values[
|
||||
'infrastructure_traffic_resource_pools'] = \
|
||||
updated_infra_res_pools
|
||||
if props_to_updated_values:
|
||||
if __opts__['test']:
|
||||
changes_string = ''
|
||||
for p in props_to_updated_values:
|
||||
if p == 'infrastructure_traffic_resource_pools':
|
||||
changes_string += \
|
||||
'\tinfrastructure_traffic_resource_pools:\n'
|
||||
for idx in range(len(props_to_updated_values[p])):
|
||||
d = props_to_updated_values[p][idx]
|
||||
s = props_to_original_values[p][idx]
|
||||
changes_string += \
|
||||
('\t\t{0} from \'{1}\' to \'{2}\'\n'
|
||||
''.format(d['key'], s, d))
|
||||
else:
|
||||
changes_string += \
|
||||
('\t{0} from \'{1}\' to \'{2}\'\n'
|
||||
''.format(p, props_to_original_values[p],
|
||||
props_to_updated_values[p]))
|
||||
comments.append(
|
||||
'State dvs_configured will update DVS \'{0}\' '
|
||||
'in datacenter \'{1}\':\n{2}'
|
||||
''.format(dvs_name, datacenter_name, changes_string))
|
||||
log.info(comments[-1])
|
||||
else:
|
||||
__salt__['vsphere.update_dvs'](
|
||||
dvs_dict=props_to_updated_values,
|
||||
dvs=dvs_name,
|
||||
service_instance=si)
|
||||
comments.append('Updated DVS \'{0}\' in datacenter \'{1}\''
|
||||
''.format(dvs_name, datacenter_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'dvs': {'new': props_to_updated_values,
|
||||
'old': props_to_original_values}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': str(exc),
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not comments:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('DVS \'{0}\' in datacenter \'{1}\' is '
|
||||
'correctly configured. Nothing to be done.'
|
||||
''.format(dvs_name, datacenter_name)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
||||
|
||||
|
||||
def _get_diff_dict(dict1, dict2):
|
||||
'''
|
||||
Returns a dictionary with the diffs between two dictionaries
|
||||
|
||||
It will ignore any key that doesn't exist in dict2
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in dict2.keys():
|
||||
if p not in dict1:
|
||||
ret_dict.update({p: {'val1': None, 'val2': dict2[p]}})
|
||||
elif dict1[p] != dict2[p]:
|
||||
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
|
||||
sub_diff_dict = _get_diff_dict(dict1[p], dict2[p])
|
||||
if sub_diff_dict:
|
||||
ret_dict.update({p: sub_diff_dict})
|
||||
else:
|
||||
ret_dict.update({p: {'val1': dict1[p], 'val2': dict2[p]}})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_val2_dict_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a dictionaries with the values stored in val2 of a diff dict.
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if 'val2' in diff_dict[p].keys():
|
||||
ret_dict.update({p: diff_dict[p]['val2']})
|
||||
else:
|
||||
ret_dict.update(
|
||||
{p: _get_val2_dict_from_diff_dict(diff_dict[p])})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_val1_dict_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a dictionaries with the values stored in val1 of a diff dict.
|
||||
'''
|
||||
ret_dict = {}
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if 'val1' in diff_dict[p].keys():
|
||||
ret_dict.update({p: diff_dict[p]['val1']})
|
||||
else:
|
||||
ret_dict.update(
|
||||
{p: _get_val1_dict_from_diff_dict(diff_dict[p])})
|
||||
return ret_dict
|
||||
|
||||
|
||||
def _get_changes_from_diff_dict(diff_dict):
|
||||
'''
|
||||
Returns a list of string message of the differences in a diff dict.
|
||||
|
||||
Each inner message is tabulated one tab deeper
|
||||
'''
|
||||
changes_strings = []
|
||||
for p in diff_dict.keys():
|
||||
if not isinstance(diff_dict[p], dict):
|
||||
raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict))
|
||||
if sorted(diff_dict[p].keys()) == ['val1', 'val2']:
|
||||
# Some string formatting
|
||||
from_str = diff_dict[p]['val1']
|
||||
if isinstance(diff_dict[p]['val1'], str):
|
||||
from_str = '\'{0}\''.format(diff_dict[p]['val1'])
|
||||
elif isinstance(diff_dict[p]['val1'], list):
|
||||
from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1']))
|
||||
to_str = diff_dict[p]['val2']
|
||||
if isinstance(diff_dict[p]['val2'], str):
|
||||
to_str = '\'{0}\''.format(diff_dict[p]['val2'])
|
||||
elif isinstance(diff_dict[p]['val2'], list):
|
||||
to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2']))
|
||||
changes_strings.append('{0} from {1} to {2}'.format(
|
||||
p, from_str, to_str))
|
||||
else:
|
||||
sub_changes = _get_changes_from_diff_dict(diff_dict[p])
|
||||
if sub_changes:
|
||||
changes_strings.append('{0}:'.format(p))
|
||||
changes_strings.extend(['\t{0}'.format(c)
|
||||
for c in sub_changes])
|
||||
return changes_strings
|
||||
|
||||
|
||||
def portgroups_configured(name, dvs, portgroups):
|
||||
'''
|
||||
Configures portgroups on a DVS.
|
||||
|
||||
Creates/updates/removes portgroups in a provided DVS
|
||||
|
||||
dvs
|
||||
Name of the DVS
|
||||
|
||||
portgroups
|
||||
Portgroup dict representations (see module sysdocs)
|
||||
'''
|
||||
datacenter = _get_datacenter_name()
|
||||
log.info('Running state {0} on DVS \'{1}\', datacenter '
|
||||
'\'{2}\''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO portroups validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
current_pgs = __salt__['vsphere.list_dvportgroups'](
|
||||
dvs=dvs, service_instance=si)
|
||||
expected_pg_names = []
|
||||
for pg in portgroups:
|
||||
pg_name = pg['name']
|
||||
expected_pg_names.append(pg_name)
|
||||
del pg['name']
|
||||
log.info('Checking pg \'{0}\''.format(pg_name))
|
||||
filtered_current_pgs = \
|
||||
[p for p in current_pgs if p.get('name') == pg_name]
|
||||
if not filtered_current_pgs:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will create a new portgroup '
|
||||
'\'{1}\' in DVS \'{2}\', datacenter '
|
||||
'\'{3}\''.format(name, pg_name, dvs,
|
||||
datacenter))
|
||||
else:
|
||||
__salt__['vsphere.create_dvportgroup'](
|
||||
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Created a new portgroup \'{0}\' in DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update({pg_name: {'new': pg}})
|
||||
else:
|
||||
# Porgroup already exists. Checking the config
|
||||
log.trace('Portgroup \'{0}\' found in DVS \'{1}\', datacenter '
|
||||
'\'{2}\'. Checking for any updates.'
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
current_pg = filtered_current_pgs[0]
|
||||
diff_dict = _get_diff_dict(current_pg, pg)
|
||||
|
||||
if diff_dict:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
changes_strings = \
|
||||
_get_changes_from_diff_dict(diff_dict)
|
||||
log.trace('changes_strings = '
|
||||
'{0}'.format(changes_strings))
|
||||
comments.append(
|
||||
'State {0} will update portgroup \'{1}\' in '
|
||||
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
|
||||
''.format(name, pg_name, dvs, datacenter,
|
||||
'\n'.join(['\t{0}'.format(c) for c in
|
||||
changes_strings])))
|
||||
else:
|
||||
__salt__['vsphere.update_dvportgroup'](
|
||||
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Updated portgroup \'{0}\' in DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(pg_name, dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update(
|
||||
{pg_name: {'new':
|
||||
_get_val2_dict_from_diff_dict(diff_dict),
|
||||
'old':
|
||||
_get_val1_dict_from_diff_dict(diff_dict)}})
|
||||
# Add the uplink portgroup to the expected pg names
|
||||
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
|
||||
dvs=dvs, service_instance=si)
|
||||
expected_pg_names.append(uplink_pg['name'])
|
||||
# Remove any extra portgroups
|
||||
for current_pg in current_pgs:
|
||||
if current_pg['name'] not in expected_pg_names:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
comments.append('State {0} will remove '
|
||||
'the portgroup \'{1}\' from DVS \'{2}\', '
|
||||
'datacenter \'{3}\''
|
||||
''.format(name, current_pg['name'], dvs,
|
||||
datacenter))
|
||||
else:
|
||||
__salt__['vsphere.remove_dvportgroup'](
|
||||
portgroup=current_pg['name'], dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Removed the portgroup \'{0}\' from DVS '
|
||||
'\'{1}\', datacenter \'{2}\''
|
||||
''.format(current_pg['name'], dvs,
|
||||
datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update({current_pg['name']:
|
||||
{'old': current_pg}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': exc.strerror,
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not changes_required:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
|
||||
'\'{1}\' exist and are correctly configured. '
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
||||
|
||||
|
||||
def uplink_portgroup_configured(name, dvs, uplink_portgroup):
|
||||
'''
|
||||
Configures the uplink portgroup on a DVS. The state assumes there is only
|
||||
one uplink portgroup.
|
||||
|
||||
dvs
|
||||
Name of the DVS
|
||||
|
||||
upling_portgroup
|
||||
Uplink portgroup dict representations (see module sysdocs)
|
||||
|
||||
'''
|
||||
datacenter = _get_datacenter_name()
|
||||
log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\''
|
||||
''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
|
||||
try:
|
||||
#TODO portroups validation
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
current_uplink_portgroup = __salt__['vsphere.list_uplink_dvportgroup'](
|
||||
dvs=dvs, service_instance=si)
|
||||
log.trace('current_uplink_portgroup = '
|
||||
'{0}'.format(current_uplink_portgroup))
|
||||
diff_dict = _get_diff_dict(current_uplink_portgroup, uplink_portgroup)
|
||||
if diff_dict:
|
||||
changes_required = True
|
||||
if __opts__['test']:
|
||||
changes_strings = \
|
||||
_get_changes_from_diff_dict(diff_dict)
|
||||
log.trace('changes_strings = '
|
||||
'{0}'.format(changes_strings))
|
||||
comments.append(
|
||||
'State {0} will update the '
|
||||
'uplink portgroup in DVS \'{1}\', datacenter '
|
||||
'\'{2}\':\n{3}'
|
||||
''.format(name, dvs, datacenter,
|
||||
'\n'.join(['\t{0}'.format(c) for c in
|
||||
changes_strings])))
|
||||
else:
|
||||
__salt__['vsphere.update_dvportgroup'](
|
||||
portgroup_dict=uplink_portgroup,
|
||||
portgroup=current_uplink_portgroup['name'],
|
||||
dvs=dvs,
|
||||
service_instance=si)
|
||||
comments.append('Updated the uplink portgroup in DVS '
|
||||
'\'{0}\', datacenter \'{1}\''
|
||||
''.format(dvs, datacenter))
|
||||
log.info(comments[-1])
|
||||
changes.update(
|
||||
{'uplink_portgroup':
|
||||
{'new': _get_val2_dict_from_diff_dict(diff_dict),
|
||||
'old': _get_val1_dict_from_diff_dict(diff_dict)}})
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc()))
|
||||
if si:
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if not __opts__['test']:
|
||||
ret['result'] = False
|
||||
ret.update({'comment': exc.strerror,
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
if not changes_required:
|
||||
# We have no changes
|
||||
ret.update({'comment': ('Uplink portgroup in DVS \'{0}\', datacenter '
|
||||
'\'{1}\' is correctly configured. '
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
return ret
|
|
@ -6637,6 +6637,28 @@ def cached(name,
|
|||
else:
|
||||
pre_hash = None
|
||||
|
||||
def _try_cache(path, checksum):
|
||||
'''
|
||||
This helper is not needed anymore in develop as the fileclient in the
|
||||
develop branch now has means of skipping a download if the existing
|
||||
hash matches one passed to cp.cache_file. Remove this helper and the
|
||||
code that invokes it, once we have merged forward into develop.
|
||||
'''
|
||||
if not path or not checksum:
|
||||
return True
|
||||
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
|
||||
if form is None:
|
||||
# Shouldn't happen, an invalid checksum length should be caught
|
||||
# before we get here. But in the event this gets through, don't let
|
||||
# it cause any trouble, and just return True.
|
||||
return True
|
||||
try:
|
||||
return salt.utils.get_hash(path, form=form) != checksum
|
||||
except (IOError, OSError, ValueError):
|
||||
# Again, shouldn't happen, but don't let invalid input/permissions
|
||||
# in the call to get_hash blow this up.
|
||||
return True
|
||||
|
||||
# Cache the file. Note that this will not actually download the file if
|
||||
# either of the following is true:
|
||||
# 1. source is a salt:// URL and the fileserver determines that the hash
|
||||
|
@ -6645,14 +6667,18 @@ def cached(name,
|
|||
# matches the cached copy.
|
||||
# Remote, non salt:// sources _will_ download if a copy of the file was
|
||||
# not already present in the minion cache.
|
||||
try:
|
||||
local_copy = __salt__['cp.cache_file'](
|
||||
name,
|
||||
saltenv=saltenv,
|
||||
source_hash=source_sum.get('hsum'))
|
||||
except Exception as exc:
|
||||
ret['comment'] = exc.__str__()
|
||||
return ret
|
||||
if _try_cache(local_copy, source_sum.get('hsum')):
|
||||
# The _try_cache helper is obsolete in the develop branch. Once merged
|
||||
# forward, remove the helper as well as this if statement, and dedent
|
||||
# the below block.
|
||||
try:
|
||||
local_copy = __salt__['cp.cache_file'](
|
||||
name,
|
||||
saltenv=saltenv,
|
||||
source_hash=source_sum.get('hsum'))
|
||||
except Exception as exc:
|
||||
ret['comment'] = exc.__str__()
|
||||
return ret
|
||||
|
||||
if not local_copy:
|
||||
ret['comment'] = (
|
||||
|
|
|
@ -84,10 +84,12 @@ def installed(name, updates=None):
|
|||
|
||||
Args:
|
||||
|
||||
name (str): The identifier of a single update to install.
|
||||
name (str):
|
||||
The identifier of a single update to install.
|
||||
|
||||
updates (list): A list of identifiers for updates to be installed.
|
||||
Overrides ``name``. Default is None.
|
||||
updates (list):
|
||||
A list of identifiers for updates to be installed. Overrides
|
||||
``name``. Default is None.
|
||||
|
||||
.. note:: Identifiers can be the GUID, the KB number, or any part of the
|
||||
Title of the Microsoft update. GUIDs and KBs are the preferred method
|
||||
|
@ -121,7 +123,7 @@ def installed(name, updates=None):
|
|||
# Install multiple updates
|
||||
install_updates:
|
||||
wua.installed:
|
||||
- name:
|
||||
- updates:
|
||||
- KB3194343
|
||||
- 28cf1b09-2b1a-458c-9bd1-971d1b26b211
|
||||
'''
|
||||
|
@ -215,10 +217,12 @@ def removed(name, updates=None):
|
|||
|
||||
Args:
|
||||
|
||||
name (str): The identifier of a single update to uninstall.
|
||||
name (str):
|
||||
The identifier of a single update to uninstall.
|
||||
|
||||
updates (list): A list of identifiers for updates to be removed.
|
||||
Overrides ``name``. Default is None.
|
||||
updates (list):
|
||||
A list of identifiers for updates to be removed. Overrides ``name``.
|
||||
Default is None.
|
||||
|
||||
.. note:: Identifiers can be the GUID, the KB number, or any part of the
|
||||
Title of the Microsoft update. GUIDs and KBs are the preferred method
|
||||
|
@ -329,3 +333,172 @@ def removed(name, updates=None):
|
|||
ret['comment'] = 'Updates removed successfully'
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def uptodate(name,
|
||||
software=True,
|
||||
drivers=False,
|
||||
skip_hidden=False,
|
||||
skip_mandatory=False,
|
||||
skip_reboot=True,
|
||||
categories=None,
|
||||
severities=None,):
|
||||
'''
|
||||
Ensure Microsoft Updates that match the passed criteria are installed.
|
||||
Updates will be downloaded if needed.
|
||||
|
||||
This state allows you to update a system without specifying a specific
|
||||
update to apply. All matching updates will be installed.
|
||||
|
||||
Args:
|
||||
|
||||
name (str):
|
||||
The name has no functional value and is only used as a tracking
|
||||
reference
|
||||
|
||||
software (bool):
|
||||
Include software updates in the results (default is True)
|
||||
|
||||
drivers (bool):
|
||||
Include driver updates in the results (default is False)
|
||||
|
||||
skip_hidden (bool):
|
||||
Skip updates that have been hidden. Default is False.
|
||||
|
||||
skip_mandatory (bool):
|
||||
Skip mandatory updates. Default is False.
|
||||
|
||||
skip_reboot (bool):
|
||||
Skip updates that require a reboot. Default is True.
|
||||
|
||||
categories (list):
|
||||
Specify the categories to list. Must be passed as a list. All
|
||||
categories returned by default.
|
||||
|
||||
Categories include the following:
|
||||
|
||||
* Critical Updates
|
||||
* Definition Updates
|
||||
* Drivers (make sure you set drivers=True)
|
||||
* Feature Packs
|
||||
* Security Updates
|
||||
* Update Rollups
|
||||
* Updates
|
||||
* Update Rollups
|
||||
* Windows 7
|
||||
* Windows 8.1
|
||||
* Windows 8.1 drivers
|
||||
* Windows 8.1 and later drivers
|
||||
* Windows Defender
|
||||
|
||||
severities (list):
|
||||
Specify the severities to include. Must be passed as a list. All
|
||||
severities returned by default.
|
||||
|
||||
Severities include the following:
|
||||
|
||||
* Critical
|
||||
* Important
|
||||
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the results of the update
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Update the system using the state defaults
|
||||
update_system:
|
||||
wua.up_to_date
|
||||
|
||||
# Update the drivers
|
||||
update_drivers:
|
||||
wua.up_to_date:
|
||||
- software: False
|
||||
- drivers: True
|
||||
- skip_reboot: False
|
||||
|
||||
# Apply all critical updates
|
||||
update_critical:
|
||||
wua.up_to_date:
|
||||
- severities:
|
||||
- Critical
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
wua = salt.utils.win_update.WindowsUpdateAgent()
|
||||
|
||||
available_updates = wua.available(
|
||||
skip_hidden=skip_hidden, skip_installed=True,
|
||||
skip_mandatory=skip_mandatory, skip_reboot=skip_reboot,
|
||||
software=software, drivers=drivers, categories=categories,
|
||||
severities=severities)
|
||||
|
||||
# No updates found
|
||||
if available_updates.count() == 0:
|
||||
ret['comment'] = 'No updates found'
|
||||
return ret
|
||||
|
||||
updates = list(available_updates.list().keys())
|
||||
|
||||
# Search for updates
|
||||
install_list = wua.search(updates)
|
||||
|
||||
# List of updates to download
|
||||
download = salt.utils.win_update.Updates()
|
||||
for item in install_list.updates:
|
||||
if not salt.utils.is_true(item.IsDownloaded):
|
||||
download.updates.Add(item)
|
||||
|
||||
# List of updates to install
|
||||
install = salt.utils.win_update.Updates()
|
||||
for item in install_list.updates:
|
||||
if not salt.utils.is_true(item.IsInstalled):
|
||||
install.updates.Add(item)
|
||||
|
||||
# Return comment of changes if test.
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Updates will be installed:'
|
||||
for update in install.updates:
|
||||
ret['comment'] += '\n'
|
||||
ret['comment'] += ': '.join(
|
||||
[update.Identity.UpdateID, update.Title])
|
||||
return ret
|
||||
|
||||
# Download updates
|
||||
wua.download(download)
|
||||
|
||||
# Install updates
|
||||
wua.install(install)
|
||||
|
||||
# Refresh windows update info
|
||||
wua.refresh()
|
||||
|
||||
post_info = wua.updates().list()
|
||||
|
||||
# Verify the installation
|
||||
for item in install.list():
|
||||
if not salt.utils.is_true(post_info[item]['Installed']):
|
||||
ret['changes']['failed'] = {
|
||||
item: {'Title': post_info[item]['Title'][:40] + '...',
|
||||
'KBs': post_info[item]['KBs']}
|
||||
}
|
||||
ret['result'] = False
|
||||
else:
|
||||
ret['changes']['installed'] = {
|
||||
item: {'Title': post_info[item]['Title'][:40] + '...',
|
||||
'NeedsReboot': post_info[item]['NeedsReboot'],
|
||||
'KBs': post_info[item]['KBs']}
|
||||
}
|
||||
|
||||
if ret['changes'].get('failed', False):
|
||||
ret['comment'] = 'Updates failed'
|
||||
else:
|
||||
ret['comment'] = 'Updates installed successfully'
|
||||
|
||||
return ret
|
||||
|
|
|
@ -217,7 +217,7 @@ class RecursiveDictDiffer(DictDiffer):
|
|||
Each inner difference is tabulated two space deeper
|
||||
'''
|
||||
changes_strings = []
|
||||
for p in diff_dict.keys():
|
||||
for p in sorted(diff_dict.keys()):
|
||||
if sorted(diff_dict[p].keys()) == ['new', 'old']:
|
||||
# Some string formatting
|
||||
old_value = diff_dict[p]['old']
|
||||
|
@ -267,7 +267,7 @@ class RecursiveDictDiffer(DictDiffer):
|
|||
keys.append('{0}{1}'.format(prefix, key))
|
||||
return keys
|
||||
|
||||
return _added(self._diffs, prefix='')
|
||||
return sorted(_added(self._diffs, prefix=''))
|
||||
|
||||
def removed(self):
|
||||
'''
|
||||
|
@ -290,7 +290,7 @@ class RecursiveDictDiffer(DictDiffer):
|
|||
prefix='{0}{1}.'.format(prefix, key)))
|
||||
return keys
|
||||
|
||||
return _removed(self._diffs, prefix='')
|
||||
return sorted(_removed(self._diffs, prefix=''))
|
||||
|
||||
def changed(self):
|
||||
'''
|
||||
|
@ -338,7 +338,7 @@ class RecursiveDictDiffer(DictDiffer):
|
|||
|
||||
return keys
|
||||
|
||||
return _changed(self._diffs, prefix='')
|
||||
return sorted(_changed(self._diffs, prefix=''))
|
||||
|
||||
def unchanged(self):
|
||||
'''
|
||||
|
@ -363,7 +363,7 @@ class RecursiveDictDiffer(DictDiffer):
|
|||
prefix='{0}{1}.'.format(prefix, key)))
|
||||
|
||||
return keys
|
||||
return _unchanged(self.current_dict, self._diffs, prefix='')
|
||||
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
|
||||
|
||||
@property
|
||||
def diffs(self):
|
||||
|
|
|
@ -966,6 +966,31 @@ class CkMinions(object):
|
|||
auth_list.append(matcher)
|
||||
return auth_list
|
||||
|
||||
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
|
||||
'''
|
||||
Returns a list of authorisation matchers that a user is eligible for.
|
||||
This list is a combination of the provided personal matchers plus the
|
||||
matchers of any group the user is in.
|
||||
'''
|
||||
if auth_list is None:
|
||||
auth_list = []
|
||||
if permissive is None:
|
||||
permissive = self.opts.get('permissive_acl')
|
||||
name_matched = False
|
||||
for match in auth_provider:
|
||||
if match == '*' and not permissive:
|
||||
continue
|
||||
if match.endswith('%'):
|
||||
if match.rstrip('%') in groups:
|
||||
auth_list.extend(auth_provider[match])
|
||||
else:
|
||||
if salt.utils.expr_match(match, name):
|
||||
name_matched = True
|
||||
auth_list.extend(auth_provider[match])
|
||||
if not permissive and not name_matched and '*' in auth_provider:
|
||||
auth_list.extend(auth_provider['*'])
|
||||
return auth_list
|
||||
|
||||
def wheel_check(self, auth_list, fun, args):
|
||||
'''
|
||||
Check special API permissions
|
||||
|
@ -982,6 +1007,8 @@ class CkMinions(object):
|
|||
'''
|
||||
Check special API permissions
|
||||
'''
|
||||
if not auth_list:
|
||||
return False
|
||||
if form != 'cloud':
|
||||
comps = fun.split('.')
|
||||
if len(comps) != 2:
|
||||
|
|
|
@ -981,6 +981,333 @@ def get_network_adapter_type(adapter_type):
|
|||
return vim.vm.device.VirtualE1000e()
|
||||
|
||||
|
||||
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
|
||||
'''
|
||||
Returns distributed virtual switches (DVSs) in a datacenter.
|
||||
|
||||
dc_ref
|
||||
The parent datacenter reference.
|
||||
|
||||
dvs_names
|
||||
The names of the DVSs to return. Default is None.
|
||||
|
||||
get_all_dvss
|
||||
Return all DVSs in the datacenter. Default is False.
|
||||
'''
|
||||
dc_name = get_managed_object_name(dc_ref)
|
||||
log.trace('Retrieving DVSs in datacenter \'{0}\', dvs_names=\'{1}\', '
|
||||
'get_all_dvss={2}'.format(dc_name,
|
||||
','.join(dvs_names) if dvs_names
|
||||
else None,
|
||||
get_all_dvss))
|
||||
properties = ['name']
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='networkFolder',
|
||||
skip=True,
|
||||
type=vim.Datacenter,
|
||||
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='childEntity',
|
||||
skip=False,
|
||||
type=vim.Folder)])
|
||||
service_instance = get_service_instance_from_managed_object(dc_ref)
|
||||
items = [i['object'] for i in
|
||||
get_mors_with_properties(service_instance,
|
||||
vim.DistributedVirtualSwitch,
|
||||
container_ref=dc_ref,
|
||||
property_list=properties,
|
||||
traversal_spec=traversal_spec)
|
||||
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
|
||||
return items
|
||||
|
||||
|
||||
def get_network_folder(dc_ref):
|
||||
'''
|
||||
Retrieves the network folder of a datacenter
|
||||
'''
|
||||
dc_name = get_managed_object_name(dc_ref)
|
||||
log.trace('Retrieving network folder in datacenter '
|
||||
'\'{0}\''.format(dc_name))
|
||||
service_instance = get_service_instance_from_managed_object(dc_ref)
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='networkFolder',
|
||||
skip=False,
|
||||
type=vim.Datacenter)
|
||||
entries = get_mors_with_properties(service_instance,
|
||||
vim.Folder,
|
||||
container_ref=dc_ref,
|
||||
property_list=['name'],
|
||||
traversal_spec=traversal_spec)
|
||||
if not entries:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
|
||||
''.format(dc_name))
|
||||
return entries[0]['object']
|
||||
|
||||
|
||||
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
|
||||
'''
|
||||
Creates a distributed virtual switches (DVS) in a datacenter.
|
||||
Returns the reference to the newly created distributed virtual switch.
|
||||
|
||||
dc_ref
|
||||
The parent datacenter reference.
|
||||
|
||||
dvs_name
|
||||
The name of the DVS to create.
|
||||
|
||||
dvs_create_spec
|
||||
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
|
||||
Default is None.
|
||||
'''
|
||||
dc_name = get_managed_object_name(dc_ref)
|
||||
log.trace('Creating DVS \'{0}\' in datacenter '
|
||||
'\'{1}\''.format(dvs_name, dc_name))
|
||||
if not dvs_create_spec:
|
||||
dvs_create_spec = vim.DVSCreateSpec()
|
||||
if not dvs_create_spec.configSpec:
|
||||
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
|
||||
dvs_create_spec.configSpec.name = dvs_name
|
||||
netw_folder_ref = get_network_folder(dc_ref)
|
||||
try:
|
||||
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, dvs_name, str(task.__class__))
|
||||
|
||||
|
||||
def update_dvs(dvs_ref, dvs_config_spec):
|
||||
'''
|
||||
Updates a distributed virtual switch with the config_spec.
|
||||
|
||||
dvs_ref
|
||||
The DVS reference.
|
||||
|
||||
dvs_config_spec
|
||||
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
|
||||
the DVS.
|
||||
'''
|
||||
dvs_name = get_managed_object_name(dvs_ref)
|
||||
log.trace('Updating dvs \'{0}\''.format(dvs_name))
|
||||
try:
|
||||
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, dvs_name, str(task.__class__))
|
||||
|
||||
|
||||
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
|
||||
'''
|
||||
Sets whether NIOC is enabled on a DVS.
|
||||
|
||||
dvs_ref
|
||||
The DVS reference.
|
||||
|
||||
enabled
|
||||
Flag specifying whether NIOC is enabled.
|
||||
'''
|
||||
dvs_name = get_managed_object_name(dvs_ref)
|
||||
log.trace('Setting network resource management enable to {0} on '
|
||||
'dvs \'{1}\''.format(enabled, dvs_name))
|
||||
try:
|
||||
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
|
||||
|
||||
def get_dvportgroups(parent_ref, portgroup_names=None,
|
||||
get_all_portgroups=False):
|
||||
'''
|
||||
Returns distributed virtual porgroups (dvportgroups).
|
||||
The parent object can be either a datacenter or a dvs.
|
||||
|
||||
parent_ref
|
||||
The parent object reference. Can be either a datacenter or a dvs.
|
||||
|
||||
portgroup_names
|
||||
The names of the dvss to return. Default is None.
|
||||
|
||||
get_all_portgroups
|
||||
Return all portgroups in the parent. Default is False.
|
||||
'''
|
||||
if not (isinstance(parent_ref, vim.Datacenter) or
|
||||
isinstance(parent_ref, vim.DistributedVirtualSwitch)):
|
||||
raise salt.exceptions.ArgumentValueError(
|
||||
'Parent has to be either a datacenter, '
|
||||
'or a distributed virtual switch')
|
||||
parent_name = get_managed_object_name(parent_ref)
|
||||
log.trace('Retrieving portgroup in {0} \'{1}\', portgroups_names=\'{2}\', '
|
||||
'get_all_portgroups={3}'.format(
|
||||
type(parent_ref).__name__, parent_name,
|
||||
','.join(portgroup_names) if portgroup_names else None,
|
||||
get_all_portgroups))
|
||||
properties = ['name']
|
||||
if isinstance(parent_ref, vim.Datacenter):
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='networkFolder',
|
||||
skip=True,
|
||||
type=vim.Datacenter,
|
||||
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='childEntity',
|
||||
skip=False,
|
||||
type=vim.Folder)])
|
||||
else: # parent is distributed virtual switch
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='portgroup',
|
||||
skip=False,
|
||||
type=vim.DistributedVirtualSwitch)
|
||||
|
||||
service_instance = get_service_instance_from_managed_object(parent_ref)
|
||||
items = [i['object'] for i in
|
||||
get_mors_with_properties(service_instance,
|
||||
vim.DistributedVirtualPortgroup,
|
||||
container_ref=parent_ref,
|
||||
property_list=properties,
|
||||
traversal_spec=traversal_spec)
|
||||
if get_all_portgroups or
|
||||
(portgroup_names and i['name'] in portgroup_names)]
|
||||
return items
|
||||
|
||||
|
||||
def get_uplink_dvportgroup(dvs_ref):
|
||||
'''
|
||||
Returns the uplink distributed virtual portgroup of a distributed virtual
|
||||
switch (dvs)
|
||||
|
||||
dvs_ref
|
||||
The dvs reference
|
||||
'''
|
||||
dvs_name = get_managed_object_name(dvs_ref)
|
||||
log.trace('Retrieving uplink portgroup of dvs \'{0}\''.format(dvs_name))
|
||||
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
|
||||
path='portgroup',
|
||||
skip=False,
|
||||
type=vim.DistributedVirtualSwitch)
|
||||
service_instance = get_service_instance_from_managed_object(dvs_ref)
|
||||
items = [entry['object'] for entry in
|
||||
get_mors_with_properties(service_instance,
|
||||
vim.DistributedVirtualPortgroup,
|
||||
container_ref=dvs_ref,
|
||||
property_list=['tag'],
|
||||
traversal_spec=traversal_spec)
|
||||
if entry['tag'] and
|
||||
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
|
||||
if not items:
|
||||
raise salt.exceptions.VMwareObjectRetrievalError(
|
||||
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
|
||||
return items[0]
|
||||
|
||||
|
||||
def create_dvportgroup(dvs_ref, spec):
|
||||
'''
|
||||
Creates a distributed virtual portgroup on a distributed virtual switch
|
||||
(dvs)
|
||||
|
||||
dvs_ref
|
||||
The dvs reference
|
||||
|
||||
spec
|
||||
Portgroup spec (vim.DVPortgroupConfigSpec)
|
||||
'''
|
||||
dvs_name = get_managed_object_name(dvs_ref)
|
||||
log.trace('Adding portgroup {0} to dvs '
|
||||
'\'{1}\''.format(spec.name, dvs_name))
|
||||
log.trace('spec = {}'.format(spec))
|
||||
try:
|
||||
task = dvs_ref.CreateDVPortgroup_Task(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, dvs_name, str(task.__class__))
|
||||
|
||||
|
||||
def update_dvportgroup(portgroup_ref, spec):
|
||||
'''
|
||||
Updates a distributed virtual portgroup
|
||||
|
||||
portgroup_ref
|
||||
The portgroup reference
|
||||
|
||||
spec
|
||||
Portgroup spec (vim.DVPortgroupConfigSpec)
|
||||
'''
|
||||
pg_name = get_managed_object_name(portgroup_ref)
|
||||
log.trace('Updating portgrouo {0}'.format(pg_name))
|
||||
try:
|
||||
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, pg_name, str(task.__class__))
|
||||
|
||||
|
||||
def remove_dvportgroup(portgroup_ref):
|
||||
'''
|
||||
Removes a distributed virtual portgroup
|
||||
|
||||
portgroup_ref
|
||||
The portgroup reference
|
||||
'''
|
||||
pg_name = get_managed_object_name(portgroup_ref)
|
||||
log.trace('Removing portgrouo {0}'.format(pg_name))
|
||||
try:
|
||||
task = portgroup_ref.Destroy_Task()
|
||||
except vim.fault.NoPermission as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(
|
||||
'Not enough permissions. Required privilege: '
|
||||
'{0}'.format(exc.privilegeId))
|
||||
except vim.fault.VimFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareApiError(exc.msg)
|
||||
except vmodl.RuntimeFault as exc:
|
||||
log.exception(exc)
|
||||
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
||||
wait_for_task(task, pg_name, str(task.__class__))
|
||||
|
||||
|
||||
def list_objects(service_instance, vim_object, properties=None):
|
||||
'''
|
||||
Returns a simple list of objects from a given service instance.
|
||||
|
|
|
@ -98,13 +98,13 @@ class Nilrt_ipModuleTest(ModuleCase):
|
|||
def test_static_all(self):
|
||||
interfaces = self.__interfaces()
|
||||
for interface in interfaces:
|
||||
result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 my.dns.com'])
|
||||
result = self.run_function('ip.set_static_all', [interface, '192.168.10.4', '255.255.255.0', '192.168.10.1', '8.8.4.4 8.8.8.8'])
|
||||
self.assertTrue(result)
|
||||
|
||||
info = self.run_function('ip.get_interfaces_details')
|
||||
for interface in info['interfaces']:
|
||||
self.assertIn('8.8.4.4', interface['ipv4']['dns'])
|
||||
self.assertIn('my.dns.com', interface['ipv4']['dns'])
|
||||
self.assertIn('8.8.8.8', interface['ipv4']['dns'])
|
||||
self.assertEqual(interface['ipv4']['requestmode'], 'static')
|
||||
self.assertEqual(interface['ipv4']['address'], '192.168.10.4')
|
||||
self.assertEqual(interface['ipv4']['netmask'], '255.255.255.0')
|
||||
|
|
|
@ -63,7 +63,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
|
||||
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.local_funcs.runner(load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -93,7 +93,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
||||
def test_runner_eauth_salt_invocation_errpr(self):
|
||||
def test_runner_eauth_salt_invocation_error(self):
|
||||
'''
|
||||
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
|
||||
command is malformed.
|
||||
|
@ -102,7 +102,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
mock_ret = {u'error': {u'name': u'SaltInvocationError',
|
||||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.local_funcs.runner(load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -146,7 +146,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
|
||||
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.local_funcs.wheel(load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -176,7 +176,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
||||
def test_wheel_eauth_salt_invocation_errpr(self):
|
||||
def test_wheel_eauth_salt_invocation_error(self):
|
||||
'''
|
||||
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
|
||||
command is malformed.
|
||||
|
@ -185,7 +185,7 @@ class LocalFuncsTestCase(TestCase):
|
|||
mock_ret = {u'error': {u'name': u'SaltInvocationError',
|
||||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.local_funcs.wheel(load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
|
|
@ -63,7 +63,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
|
||||
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.clear_funcs.runner(clear_load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -93,7 +93,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
||||
def test_runner_eauth_salt_invocation_errpr(self):
|
||||
def test_runner_eauth_salt_invocation_error(self):
|
||||
'''
|
||||
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
|
||||
command is malformed.
|
||||
|
@ -102,7 +102,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
mock_ret = {u'error': {u'name': u'SaltInvocationError',
|
||||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.clear_funcs.runner(clear_load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -155,7 +155,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
|
||||
with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.clear_funcs.wheel(clear_load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
@ -185,7 +185,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
||||
def test_wheel_eauth_salt_invocation_errpr(self):
|
||||
def test_wheel_eauth_salt_invocation_error(self):
|
||||
'''
|
||||
Asserts that an EauthAuthenticationError is returned when the user authenticates, but the
|
||||
command is malformed.
|
||||
|
@ -194,7 +194,7 @@ class ClearFuncsTestCase(TestCase):
|
|||
mock_ret = {u'error': {u'name': u'SaltInvocationError',
|
||||
u'message': u'A command invocation error occurred: Check syntax.'}}
|
||||
with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])):
|
||||
patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=['testing'])):
|
||||
ret = self.clear_funcs.wheel(clear_load)
|
||||
|
||||
self.assertDictEqual(mock_ret, ret)
|
||||
|
|
|
@ -18,6 +18,7 @@ import salt.utils.event as event
|
|||
from salt.exceptions import SaltSystemExit
|
||||
import salt.syspaths
|
||||
import tornado
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
__opts__ = {}
|
||||
|
||||
|
@ -69,7 +70,7 @@ class MinionTestCase(TestCase):
|
|||
mock_jid_queue = [123]
|
||||
try:
|
||||
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue), io_loop=tornado.ioloop.IOLoop())
|
||||
ret = minion._handle_decoded_payload(mock_data)
|
||||
ret = minion._handle_decoded_payload(mock_data).result()
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
self.assertIsNone(ret)
|
||||
finally:
|
||||
|
@ -98,7 +99,7 @@ class MinionTestCase(TestCase):
|
|||
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
|
||||
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
|
||||
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
|
||||
minion._handle_decoded_payload(mock_data)
|
||||
minion._handle_decoded_payload(mock_data).result()
|
||||
mock_jid_queue.append(mock_jid)
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
finally:
|
||||
|
@ -126,8 +127,54 @@ class MinionTestCase(TestCase):
|
|||
|
||||
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
|
||||
# and contains the new jid
|
||||
minion._handle_decoded_payload(mock_data)
|
||||
minion._handle_decoded_payload(mock_data).result()
|
||||
self.assertEqual(len(minion.jid_queue), 2)
|
||||
self.assertEqual(minion.jid_queue, [456, 789])
|
||||
finally:
|
||||
minion.destroy()
|
||||
|
||||
def test_process_count_max(self):
|
||||
'''
|
||||
Tests that the _handle_decoded_payload function does not spawn more than the configured amount of processes,
|
||||
as per process_count_max.
|
||||
'''
|
||||
with patch('salt.minion.Minion.ctx', MagicMock(return_value={})), \
|
||||
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True)), \
|
||||
patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True)), \
|
||||
patch('salt.utils.minion.running', MagicMock(return_value=[])), \
|
||||
patch('tornado.gen.sleep', MagicMock(return_value=tornado.concurrent.Future())):
|
||||
process_count_max = 10
|
||||
mock_opts = salt.config.DEFAULT_MINION_OPTS
|
||||
mock_opts['minion_jid_queue_hwm'] = 100
|
||||
mock_opts["process_count_max"] = process_count_max
|
||||
|
||||
try:
|
||||
io_loop = tornado.ioloop.IOLoop()
|
||||
minion = salt.minion.Minion(mock_opts, jid_queue=[], io_loop=io_loop)
|
||||
|
||||
# mock gen.sleep to throw a special Exception when called, so that we detect it
|
||||
class SleepCalledEception(Exception):
|
||||
"""Thrown when sleep is called"""
|
||||
pass
|
||||
tornado.gen.sleep.return_value.set_exception(SleepCalledEception())
|
||||
|
||||
# up until process_count_max: gen.sleep does not get called, processes are started normally
|
||||
for i in range(process_count_max):
|
||||
mock_data = {'fun': 'foo.bar',
|
||||
'jid': i}
|
||||
io_loop.run_sync(lambda data=mock_data: minion._handle_decoded_payload(data))
|
||||
self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count, i + 1)
|
||||
self.assertEqual(len(minion.jid_queue), i + 1)
|
||||
salt.utils.minion.running.return_value += [i]
|
||||
|
||||
# above process_count_max: gen.sleep does get called, JIDs are created but no new processes are started
|
||||
mock_data = {'fun': 'foo.bar',
|
||||
'jid': process_count_max + 1}
|
||||
|
||||
self.assertRaises(SleepCalledEception,
|
||||
lambda: io_loop.run_sync(lambda: minion._handle_decoded_payload(mock_data)))
|
||||
self.assertEqual(salt.utils.process.SignalHandlingMultiprocessingProcess.start.call_count,
|
||||
process_count_max)
|
||||
self.assertEqual(len(minion.jid_queue), process_count_max + 1)
|
||||
finally:
|
||||
minion.destroy()
|
||||
|
|
|
@ -49,7 +49,7 @@ class RecursiveDictDifferTestCase(TestCase):
|
|||
def test_changed_without_ignore_unset_values(self):
|
||||
self.recursive_diff.ignore_unset_values = False
|
||||
self.assertEqual(self.recursive_diff.changed(),
|
||||
['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i'])
|
||||
['a.c', 'a.e', 'a.f', 'a.g', 'h', 'i'])
|
||||
|
||||
def test_unchanged(self):
|
||||
self.assertEqual(self.recursive_diff.unchanged(),
|
||||
|
@ -89,7 +89,7 @@ class RecursiveDictDifferTestCase(TestCase):
|
|||
'a:\n'
|
||||
' c from 2 to 4\n'
|
||||
' e from \'old_value\' to \'new_value\'\n'
|
||||
' g from nothing to \'new_key\'\n'
|
||||
' f from \'old_key\' to nothing\n'
|
||||
' g from nothing to \'new_key\'\n'
|
||||
'h from nothing to \'new_key\'\n'
|
||||
'i from nothing to None')
|
||||
|
|
|
@ -32,34 +32,43 @@ class ListDictDifferTestCase(TestCase):
|
|||
continue
|
||||
|
||||
def test_added(self):
|
||||
self.assertEqual(self.list_diff.added,
|
||||
[{'key': 5, 'value': 'foo5', 'int_value': 105}])
|
||||
self.assertEqual(len(self.list_diff.added), 1)
|
||||
self.assertDictEqual(self.list_diff.added[0],
|
||||
{'key': 5, 'value': 'foo5', 'int_value': 105})
|
||||
|
||||
def test_removed(self):
|
||||
self.assertEqual(self.list_diff.removed,
|
||||
[{'key': 3, 'value': 'foo3', 'int_value': 103}])
|
||||
self.assertEqual(len(self.list_diff.removed), 1)
|
||||
self.assertDictEqual(self.list_diff.removed[0],
|
||||
{'key': 3, 'value': 'foo3', 'int_value': 103})
|
||||
|
||||
def test_diffs(self):
|
||||
self.assertEqual(self.list_diff.diffs,
|
||||
[{2: {'int_value': {'new': 112, 'old': 102}}},
|
||||
# Added items
|
||||
{5: {'int_value': {'new': 105, 'old': NONE},
|
||||
'key': {'new': 5, 'old': NONE},
|
||||
'value': {'new': 'foo5', 'old': NONE}}},
|
||||
# Removed items
|
||||
{3: {'int_value': {'new': NONE, 'old': 103},
|
||||
'key': {'new': NONE, 'old': 3},
|
||||
'value': {'new': NONE, 'old': 'foo3'}}}])
|
||||
self.assertEqual(len(self.list_diff.diffs), 3)
|
||||
self.assertDictEqual(self.list_diff.diffs[0],
|
||||
{2: {'int_value': {'new': 112, 'old': 102}}})
|
||||
self.assertDictEqual(self.list_diff.diffs[1],
|
||||
# Added items
|
||||
{5: {'int_value': {'new': 105, 'old': NONE},
|
||||
'key': {'new': 5, 'old': NONE},
|
||||
'value': {'new': 'foo5', 'old': NONE}}})
|
||||
self.assertDictEqual(self.list_diff.diffs[2],
|
||||
# Removed items
|
||||
{3: {'int_value': {'new': NONE, 'old': 103},
|
||||
'key': {'new': NONE, 'old': 3},
|
||||
'value': {'new': NONE, 'old': 'foo3'}}})
|
||||
|
||||
def test_new_values(self):
|
||||
self.assertEqual(self.list_diff.new_values,
|
||||
[{'key': 2, 'int_value': 112},
|
||||
{'key': 5, 'value': 'foo5', 'int_value': 105}])
|
||||
self.assertEqual(len(self.list_diff.new_values), 2)
|
||||
self.assertDictEqual(self.list_diff.new_values[0],
|
||||
{'key': 2, 'int_value': 112})
|
||||
self.assertDictEqual(self.list_diff.new_values[1],
|
||||
{'key': 5, 'value': 'foo5', 'int_value': 105})
|
||||
|
||||
def test_old_values(self):
|
||||
self.assertEqual(self.list_diff.old_values,
|
||||
[{'key': 2, 'int_value': 102},
|
||||
{'key': 3, 'value': 'foo3', 'int_value': 103}])
|
||||
self.assertEqual(len(self.list_diff.old_values), 2)
|
||||
self.assertDictEqual(self.list_diff.old_values[0],
|
||||
{'key': 2, 'int_value': 102})
|
||||
self.assertDictEqual(self.list_diff.old_values[1],
|
||||
{'key': 3, 'value': 'foo3', 'int_value': 103})
|
||||
|
||||
def test_changed_all(self):
|
||||
self.assertEqual(self.list_diff.changed(selection='all'),
|
||||
|
@ -78,11 +87,3 @@ class ListDictDifferTestCase(TestCase):
|
|||
'\twill be removed\n'
|
||||
'\tidentified by key 5:\n'
|
||||
'\twill be added\n')
|
||||
|
||||
def test_changes_str2(self):
|
||||
self.assertEqual(self.list_diff.changes_str2,
|
||||
' key=2 (updated):\n'
|
||||
' int_value from 102 to 112\n'
|
||||
' key=3 (removed)\n'
|
||||
' key=5 (added): {\'int_value\': 105, \'key\': 5, '
|
||||
'\'value\': \'foo5\'}')
|
||||
|
|
|
@ -958,5 +958,47 @@ class SaltAPIParserTestCase(LogSettingsParserTests):
|
|||
self.addCleanup(delattr, self, 'parser')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class DaemonMixInTestCase(TestCase):
|
||||
'''
|
||||
Tests the PIDfile deletion in the DaemonMixIn.
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Setting up
|
||||
'''
|
||||
# Set PID
|
||||
self.pid = '/some/fake.pid'
|
||||
|
||||
# Setup mixin
|
||||
self.mixin = salt.utils.parsers.DaemonMixIn()
|
||||
self.mixin.info = None
|
||||
self.mixin.config = {}
|
||||
self.mixin.config['pidfile'] = self.pid
|
||||
|
||||
def test_pid_file_deletion(self):
|
||||
'''
|
||||
PIDfile deletion without exception.
|
||||
'''
|
||||
with patch('os.unlink', MagicMock()) as os_unlink:
|
||||
with patch('os.path.isfile', MagicMock(return_value=True)):
|
||||
with patch.object(self.mixin, 'info', MagicMock()):
|
||||
self.mixin._mixin_before_exit()
|
||||
assert self.mixin.info.call_count == 0
|
||||
assert os_unlink.call_count == 1
|
||||
|
||||
def test_pid_file_deletion_with_oserror(self):
|
||||
'''
|
||||
PIDfile deletion with exception
|
||||
'''
|
||||
with patch('os.unlink', MagicMock(side_effect=OSError())) as os_unlink:
|
||||
with patch('os.path.isfile', MagicMock(return_value=True)):
|
||||
with patch.object(self.mixin, 'info', MagicMock()):
|
||||
self.mixin._mixin_before_exit()
|
||||
assert os_unlink.call_count == 1
|
||||
self.mixin.info.assert_called_with(
|
||||
'PIDfile could not be deleted: {0}'.format(self.pid))
|
||||
|
||||
# Hide the class from unittest framework when it searches for TestCase classes in the module
|
||||
del LogSettingsParserTests
|
||||
|
|
784
tests/unit/utils/vmware/test_dvs.py
Normal file
784
tests/unit/utils/vmware/test_dvs.py
Normal file
|
@ -0,0 +1,784 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstanley.com>`
|
||||
|
||||
Tests for dvs related functions in salt.utils.vmware
|
||||
'''
|
||||
|
||||
# Import python libraries
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt testing libraries
|
||||
from tests.support.unit import TestCase, skipIf
|
||||
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call
|
||||
from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \
|
||||
ArgumentValueError, VMwareRuntimeError
|
||||
|
||||
#i Import Salt libraries
|
||||
import salt.utils.vmware as vmware
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
# Get Logging Started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FakeTaskClass(object):
|
||||
pass
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class GetDvssTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_si = MagicMock()
|
||||
self.mock_dc_ref = MagicMock()
|
||||
self.mock_traversal_spec = MagicMock()
|
||||
self.mock_items = [{'object': MagicMock(),
|
||||
'name': 'fake_dvs1'},
|
||||
{'object': MagicMock(),
|
||||
'name': 'fake_dvs2'},
|
||||
{'object': MagicMock(),
|
||||
'name': 'fake_dvs3'}]
|
||||
self.mock_get_mors = MagicMock(return_value=self.mock_items)
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock()),
|
||||
('salt.utils.vmware.get_mors_with_properties',
|
||||
self.mock_get_mors),
|
||||
('salt.utils.vmware.get_service_instance_from_managed_object',
|
||||
MagicMock(return_value=self.mock_si)),
|
||||
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
MagicMock(return_value=self.mock_traversal_spec)))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec',
|
||||
'mock_items', 'mock_get_mors'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.get_dvss(self.mock_dc_ref)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
|
||||
|
||||
def test_traversal_spec(self):
|
||||
mock_traversal_spec = MagicMock(return_value='traversal_spec')
|
||||
with patch(
|
||||
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
mock_traversal_spec):
|
||||
|
||||
vmware.get_dvss(self.mock_dc_ref)
|
||||
mock_traversal_spec.assert_has_calls(
|
||||
[call(path='childEntity', skip=False, type=vim.Folder),
|
||||
call(path='networkFolder', skip=True, type=vim.Datacenter,
|
||||
selectSet=['traversal_spec'])])
|
||||
|
||||
def test_get_mors_with_properties(self):
|
||||
vmware.get_dvss(self.mock_dc_ref)
|
||||
self.mock_get_mors.assert_called_once_with(
|
||||
self.mock_si, vim.DistributedVirtualSwitch,
|
||||
container_ref=self.mock_dc_ref, property_list=['name'],
|
||||
traversal_spec=self.mock_traversal_spec)
|
||||
|
||||
def test_get_no_dvss(self):
|
||||
ret = vmware.get_dvss(self.mock_dc_ref)
|
||||
self.assertEqual(ret, [])
|
||||
|
||||
def test_get_all_dvss(self):
|
||||
ret = vmware.get_dvss(self.mock_dc_ref, get_all_dvss=True)
|
||||
self.assertEqual(ret, [i['object'] for i in self.mock_items])
|
||||
|
||||
def test_filtered_all_dvss(self):
|
||||
ret = vmware.get_dvss(self.mock_dc_ref,
|
||||
dvs_names=['fake_dvs1', 'fake_dvs3', 'no_dvs'])
|
||||
self.assertEqual(ret, [self.mock_items[0]['object'],
|
||||
self.mock_items[2]['object']])
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class GetNetworkFolderTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_si = MagicMock()
|
||||
self.mock_dc_ref = MagicMock()
|
||||
self.mock_traversal_spec = MagicMock()
|
||||
self.mock_entries = [{'object': MagicMock(),
|
||||
'name': 'fake_netw_folder'}]
|
||||
self.mock_get_mors = MagicMock(return_value=self.mock_entries)
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dc')),
|
||||
('salt.utils.vmware.get_service_instance_from_managed_object',
|
||||
MagicMock(return_value=self.mock_si)),
|
||||
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
MagicMock(return_value=self.mock_traversal_spec)),
|
||||
('salt.utils.vmware.get_mors_with_properties',
|
||||
self.mock_get_mors))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_si', 'mock_dc_ref', 'mock_traversal_spec',
|
||||
'mock_entries', 'mock_get_mors'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.get_network_folder(self.mock_dc_ref)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
|
||||
|
||||
def test_traversal_spec(self):
|
||||
mock_traversal_spec = MagicMock(return_value='traversal_spec')
|
||||
with patch(
|
||||
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
mock_traversal_spec):
|
||||
|
||||
vmware.get_network_folder(self.mock_dc_ref)
|
||||
mock_traversal_spec.assert_called_once_with(
|
||||
path='networkFolder', skip=False, type=vim.Datacenter)
|
||||
|
||||
def test_get_mors_with_properties(self):
|
||||
vmware.get_network_folder(self.mock_dc_ref)
|
||||
self.mock_get_mors.assert_called_once_with(
|
||||
self.mock_si, vim.Folder, container_ref=self.mock_dc_ref,
|
||||
property_list=['name'], traversal_spec=self.mock_traversal_spec)
|
||||
|
||||
def test_get_no_network_folder(self):
|
||||
with patch('salt.utils.vmware.get_mors_with_properties',
|
||||
MagicMock(return_value=[])):
|
||||
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
|
||||
vmware.get_network_folder(self.mock_dc_ref)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Network folder in datacenter \'fake_dc\' wasn\'t '
|
||||
'retrieved')
|
||||
|
||||
def test_get_network_folder(self):
|
||||
ret = vmware.get_network_folder(self.mock_dc_ref)
|
||||
self.assertEqual(ret, self.mock_entries[0]['object'])
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class CreateDvsTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_dc_ref = MagicMock()
|
||||
self.mock_dvs_create_spec = MagicMock()
|
||||
self.mock_task = MagicMock(spec=FakeTaskClass)
|
||||
self.mock_netw_folder = \
|
||||
MagicMock(CreateDVS_Task=MagicMock(
|
||||
return_value=self.mock_task))
|
||||
self.mock_wait_for_task = MagicMock()
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dc')),
|
||||
('salt.utils.vmware.get_network_folder',
|
||||
MagicMock(return_value=self.mock_netw_folder)),
|
||||
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_dc_ref', 'mock_dvs_create_spec',
|
||||
'mock_task', 'mock_netw_folder', 'mock_wait_for_task'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
|
||||
|
||||
def test_no_dvs_create_spec(self):
|
||||
mock_spec = MagicMock(configSpec=None)
|
||||
mock_config_spec = MagicMock()
|
||||
mock_dvs_create_spec = MagicMock(return_value=mock_spec)
|
||||
mock_vmware_dvs_config_spec = \
|
||||
MagicMock(return_value=mock_config_spec)
|
||||
with patch('salt.utils.vmware.vim.DVSCreateSpec',
|
||||
mock_dvs_create_spec):
|
||||
with patch('salt.utils.vmware.vim.VMwareDVSConfigSpec',
|
||||
mock_vmware_dvs_config_spec):
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
|
||||
mock_dvs_create_spec.assert_called_once_with()
|
||||
mock_vmware_dvs_config_spec.assert_called_once_with()
|
||||
self.assertEqual(mock_spec.configSpec, mock_config_spec)
|
||||
self.assertEqual(mock_config_spec.name, 'fake_dvs')
|
||||
self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(mock_spec)
|
||||
|
||||
def test_get_network_folder(self):
|
||||
mock_get_network_folder = MagicMock()
|
||||
with patch('salt.utils.vmware.get_network_folder',
|
||||
mock_get_network_folder):
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs')
|
||||
mock_get_network_folder.assert_called_once_with(self.mock_dc_ref)
|
||||
|
||||
def test_create_dvs_task_passed_in_spec(self):
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
|
||||
dvs_create_spec=self.mock_dvs_create_spec)
|
||||
self.mock_netw_folder.CreateDVS_Task.assert_called_once_with(
|
||||
self.mock_dvs_create_spec)
|
||||
|
||||
def test_create_dvs_task_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
|
||||
dvs_create_spec=self.mock_dvs_create_spec)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_create_dvs_task_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
|
||||
dvs_create_spec=self.mock_dvs_create_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
|
||||
|
||||
def test_create_dvs_task_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_netw_folder.CreateDVS_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
|
||||
dvs_create_spec=self.mock_dvs_create_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
def test_wait_for_tasks(self):
|
||||
vmware.create_dvs(self.mock_dc_ref, 'fake_dvs',
|
||||
dvs_create_spec=self.mock_dvs_create_spec)
|
||||
self.mock_wait_for_task.assert_called_once_with(
|
||||
self.mock_task, 'fake_dvs',
|
||||
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class UpdateDvsTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_task = MagicMock(spec=FakeTaskClass)
|
||||
self.mock_dvs_ref = MagicMock(
|
||||
ReconfigureDvs_Task=MagicMock(return_value=self.mock_task))
|
||||
self.mock_dvs_spec = MagicMock()
|
||||
self.mock_wait_for_task = MagicMock()
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dvs')),
|
||||
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_dvs_ref', 'mock_task', 'mock_dvs_spec',
|
||||
'mock_wait_for_task'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
|
||||
|
||||
def test_reconfigure_dvs_task(self):
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
self.mock_dvs_ref.ReconfigureDvs_Task.assert_called_once_with(
|
||||
self.mock_dvs_spec)
|
||||
|
||||
def test_reconfigure_dvs_task_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_reconfigure_dvs_task_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
|
||||
|
||||
def test_reconfigure_dvs_task_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_dvs_ref.ReconfigureDvs_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
def test_wait_for_tasks(self):
|
||||
vmware.update_dvs(self.mock_dvs_ref, self.mock_dvs_spec)
|
||||
self.mock_wait_for_task.assert_called_once_with(
|
||||
self.mock_task, 'fake_dvs',
|
||||
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class SetDvsNetworkResourceManagementEnabledTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_enabled = MagicMock()
|
||||
self.mock_dvs_ref = MagicMock(
|
||||
EnableNetworkResourceManagement=MagicMock())
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dvs')),)
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_dvs_ref', 'mock_enabled'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.set_dvs_network_resource_management_enabled(
|
||||
self.mock_dvs_ref, self.mock_enabled)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
|
||||
|
||||
def test_enable_network_resource_management(self):
|
||||
vmware.set_dvs_network_resource_management_enabled(
|
||||
self.mock_dvs_ref, self.mock_enabled)
|
||||
self.mock_dvs_ref.EnableNetworkResourceManagement.assert_called_once_with(
|
||||
enable=self.mock_enabled)
|
||||
|
||||
def test_enable_network_resource_management_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_dvs_ref.EnableNetworkResourceManagement = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.set_dvs_network_resource_management_enabled(
|
||||
self.mock_dvs_ref, self.mock_enabled)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_enable_network_resource_management_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_dvs_ref.EnableNetworkResourceManagement = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.set_dvs_network_resource_management_enabled(
|
||||
self.mock_dvs_ref, self.mock_enabled)
|
||||
|
||||
def test_enable_network_resource_management_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_dvs_ref.EnableNetworkResourceManagement = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.set_dvs_network_resource_management_enabled(
|
||||
self.mock_dvs_ref, self.mock_enabled)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class GetDvportgroupsTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_si = MagicMock()
|
||||
self.mock_dc_ref = MagicMock(spec=vim.Datacenter)
|
||||
self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch)
|
||||
self.mock_traversal_spec = MagicMock()
|
||||
self.mock_items = [{'object': MagicMock(),
|
||||
'name': 'fake_pg1'},
|
||||
{'object': MagicMock(),
|
||||
'name': 'fake_pg2'},
|
||||
{'object': MagicMock(),
|
||||
'name': 'fake_pg3'}]
|
||||
self.mock_get_mors = MagicMock(return_value=self.mock_items)
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock()),
|
||||
('salt.utils.vmware.get_mors_with_properties',
|
||||
self.mock_get_mors),
|
||||
('salt.utils.vmware.get_service_instance_from_managed_object',
|
||||
MagicMock(return_value=self.mock_si)),
|
||||
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
MagicMock(return_value=self.mock_traversal_spec)))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_si', 'mock_dc_ref', 'mock_dvs_ref',
|
||||
'mock_traversal_spec', 'mock_items', 'mock_get_mors'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_unsupported_parrent(self):
|
||||
with self.assertRaises(ArgumentValueError) as excinfo:
|
||||
vmware.get_dvportgroups(MagicMock())
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Parent has to be either a datacenter, or a '
|
||||
'distributed virtual switch')
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.get_dvportgroups(self.mock_dc_ref)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dc_ref)
|
||||
|
||||
def test_traversal_spec_datacenter_parent(self):
|
||||
mock_traversal_spec = MagicMock(return_value='traversal_spec')
|
||||
with patch(
|
||||
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
mock_traversal_spec):
|
||||
|
||||
vmware.get_dvportgroups(self.mock_dc_ref)
|
||||
mock_traversal_spec.assert_has_calls(
|
||||
[call(path='childEntity', skip=False, type=vim.Folder),
|
||||
call(path='networkFolder', skip=True, type=vim.Datacenter,
|
||||
selectSet=['traversal_spec'])])
|
||||
|
||||
def test_traversal_spec_dvs_parent(self):
|
||||
mock_traversal_spec = MagicMock(return_value='traversal_spec')
|
||||
with patch(
|
||||
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
mock_traversal_spec):
|
||||
|
||||
vmware.get_dvportgroups(self.mock_dvs_ref)
|
||||
mock_traversal_spec.assert_called_once_with(
|
||||
path='portgroup', skip=False, type=vim.DistributedVirtualSwitch)
|
||||
|
||||
def test_get_mors_with_properties(self):
|
||||
vmware.get_dvportgroups(self.mock_dvs_ref)
|
||||
self.mock_get_mors.assert_called_once_with(
|
||||
self.mock_si, vim.DistributedVirtualPortgroup,
|
||||
container_ref=self.mock_dvs_ref, property_list=['name'],
|
||||
traversal_spec=self.mock_traversal_spec)
|
||||
|
||||
def test_get_no_pgs(self):
|
||||
ret = vmware.get_dvportgroups(self.mock_dvs_ref)
|
||||
self.assertEqual(ret, [])
|
||||
|
||||
def test_get_all_pgs(self):
|
||||
ret = vmware.get_dvportgroups(self.mock_dvs_ref,
|
||||
get_all_portgroups=True)
|
||||
self.assertEqual(ret, [i['object'] for i in self.mock_items])
|
||||
|
||||
def test_filtered_pgs(self):
|
||||
ret = vmware.get_dvss(self.mock_dc_ref,
|
||||
dvs_names=['fake_pg1', 'fake_pg3', 'no_pg'])
|
||||
self.assertEqual(ret, [self.mock_items[0]['object'],
|
||||
self.mock_items[2]['object']])
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class GetUplinkDvportgroupTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_si = MagicMock()
|
||||
self.mock_dvs_ref = MagicMock(spec=vim.DistributedVirtualSwitch)
|
||||
self.mock_traversal_spec = MagicMock()
|
||||
self.mock_items = [{'object': MagicMock(),
|
||||
'tag': [MagicMock(key='fake_tag')]},
|
||||
{'object': MagicMock(),
|
||||
'tag': [MagicMock(key='SYSTEM/DVS.UPLINKPG')]}]
|
||||
self.mock_get_mors = MagicMock(return_value=self.mock_items)
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dvs')),
|
||||
('salt.utils.vmware.get_mors_with_properties',
|
||||
self.mock_get_mors),
|
||||
('salt.utils.vmware.get_service_instance_from_managed_object',
|
||||
MagicMock(return_value=self.mock_si)),
|
||||
('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
MagicMock(return_value=self.mock_traversal_spec)))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_si', 'mock_dvs_ref', 'mock_traversal_spec',
|
||||
'mock_items', 'mock_get_mors'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
|
||||
|
||||
def test_traversal_spec(self):
|
||||
mock_traversal_spec = MagicMock(return_value='traversal_spec')
|
||||
with patch(
|
||||
'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec',
|
||||
mock_traversal_spec):
|
||||
|
||||
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
|
||||
mock_traversal_spec.assert_called_once_with(
|
||||
path='portgroup', skip=False, type=vim.DistributedVirtualSwitch)
|
||||
|
||||
def test_get_mors_with_properties(self):
|
||||
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
|
||||
self.mock_get_mors.assert_called_once_with(
|
||||
self.mock_si, vim.DistributedVirtualPortgroup,
|
||||
container_ref=self.mock_dvs_ref, property_list=['tag'],
|
||||
traversal_spec=self.mock_traversal_spec)
|
||||
|
||||
def test_get_no_uplink_pg(self):
|
||||
with patch('salt.utils.vmware.get_mors_with_properties',
|
||||
MagicMock(return_value=[])):
|
||||
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
|
||||
vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Uplink portgroup of DVS \'fake_dvs\' wasn\'t found')
|
||||
|
||||
def test_get_uplink_pg(self):
|
||||
ret = vmware.get_uplink_dvportgroup(self.mock_dvs_ref)
|
||||
self.assertEqual(ret, self.mock_items[1]['object'])
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class CreateDvportgroupTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_pg_spec = MagicMock()
|
||||
self.mock_task = MagicMock(spec=FakeTaskClass)
|
||||
self.mock_dvs_ref = \
|
||||
MagicMock(CreateDVPortgroup_Task=MagicMock(
|
||||
return_value=self.mock_task))
|
||||
self.mock_wait_for_task = MagicMock()
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_dvs')),
|
||||
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_pg_spec', 'mock_dvs_ref', 'mock_task',
|
||||
'mock_wait_for_task'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_dvs_ref)
|
||||
|
||||
def test_create_dvporgroup_task(self):
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
self.mock_dvs_ref.CreateDVPortgroup_Task.assert_called_once_with(
|
||||
self.mock_pg_spec)
|
||||
|
||||
def test_create_dvporgroup_task_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_create_dvporgroup_task_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
|
||||
|
||||
def test_create_dvporgroup_task_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_dvs_ref.CreateDVPortgroup_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
def test_wait_for_tasks(self):
|
||||
vmware.create_dvportgroup(self.mock_dvs_ref, self.mock_pg_spec)
|
||||
self.mock_wait_for_task.assert_called_once_with(
|
||||
self.mock_task, 'fake_dvs',
|
||||
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class UpdateDvportgroupTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_pg_spec = MagicMock()
|
||||
self.mock_task = MagicMock(spec=FakeTaskClass)
|
||||
self.mock_pg_ref = \
|
||||
MagicMock(ReconfigureDVPortgroup_Task=MagicMock(
|
||||
return_value=self.mock_task))
|
||||
self.mock_wait_for_task = MagicMock()
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_pg')),
|
||||
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_pg_spec', 'mock_pg_ref', 'mock_task',
|
||||
'mock_wait_for_task'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref)
|
||||
|
||||
def test_reconfigure_dvporgroup_task(self):
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
self.mock_pg_ref.ReconfigureDVPortgroup_Task.assert_called_once_with(
|
||||
self.mock_pg_spec)
|
||||
|
||||
def test_reconfigure_dvporgroup_task_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_reconfigure_dvporgroup_task_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
|
||||
|
||||
def test_reconfigure_dvporgroup_task_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_pg_ref.ReconfigureDVPortgroup_Task = \
|
||||
MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
def test_wait_for_tasks(self):
|
||||
vmware.update_dvportgroup(self.mock_pg_ref, self.mock_pg_spec)
|
||||
self.mock_wait_for_task.assert_called_once_with(
|
||||
self.mock_task, 'fake_pg',
|
||||
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing')
|
||||
class RemoveDvportgroupTestCase(TestCase):
|
||||
def setUp(self):
|
||||
self.mock_task = MagicMock(spec=FakeTaskClass)
|
||||
self.mock_pg_ref = \
|
||||
MagicMock(Destroy_Task=MagicMock(
|
||||
return_value=self.mock_task))
|
||||
self.mock_wait_for_task = MagicMock()
|
||||
|
||||
patches = (
|
||||
('salt.utils.vmware.get_managed_object_name',
|
||||
MagicMock(return_value='fake_pg')),
|
||||
('salt.utils.vmware.wait_for_task', self.mock_wait_for_task))
|
||||
for mod, mock in patches:
|
||||
patcher = patch(mod, mock)
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def tearDown(self):
|
||||
for attr in ('mock_pg_ref', 'mock_task', 'mock_wait_for_task'):
|
||||
delattr(self, attr)
|
||||
|
||||
def test_get_managed_object_name_call(self):
|
||||
mock_get_managed_object_name = MagicMock()
|
||||
with patch('salt.utils.vmware.get_managed_object_name',
|
||||
mock_get_managed_object_name):
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
mock_get_managed_object_name.assert_called_once_with(self.mock_pg_ref)
|
||||
|
||||
def test_destroy_task(self):
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
self.mock_pg_ref.Destroy_Task.assert_called_once_with()
|
||||
|
||||
def test_destroy_task_raises_no_permission(self):
|
||||
exc = vim.fault.NoPermission()
|
||||
exc.privilegeId = 'Fake privilege'
|
||||
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
self.assertEqual(excinfo.exception.strerror,
|
||||
'Not enough permissions. Required privilege: '
|
||||
'Fake privilege')
|
||||
|
||||
def test_destroy_treconfigure_dvporgroup_task_raises_vim_fault(self):
|
||||
exc = vim.fault.VimFault()
|
||||
exc.msg = 'VimFault msg'
|
||||
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
self.assertEqual(excinfo.exception.strerror, 'VimFault msg')
|
||||
|
||||
def test_destroy_treconfigure_dvporgroup_task_raises_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
exc.msg = 'RuntimeFault msg'
|
||||
self.mock_pg_ref.Destroy_Task = MagicMock(side_effect=exc)
|
||||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg')
|
||||
|
||||
def test_wait_for_tasks(self):
|
||||
vmware.remove_dvportgroup(self.mock_pg_ref)
|
||||
self.mock_wait_for_task.assert_called_once_with(
|
||||
self.mock_task, 'fake_pg',
|
||||
'<class \'unit.utils.vmware.test_dvs.FakeTaskClass\'>')
|
|
@ -264,14 +264,14 @@ class GetDatastoresTestCase(TestCase):
|
|||
mock_reference,
|
||||
get_all_datastores=True)
|
||||
|
||||
mock_traversal_spec_init.assert_called([
|
||||
mock_traversal_spec_init.assert_has_calls([
|
||||
call(path='datastore',
|
||||
skip=False,
|
||||
type=vim.Datacenter),
|
||||
call(path='childEntity',
|
||||
selectSet=['traversal'],
|
||||
skip=False,
|
||||
type=vim.Folder),
|
||||
call(path='datastore',
|
||||
skip=False,
|
||||
type=vim.Datacenter)])
|
||||
type=vim.Folder)])
|
||||
|
||||
def test_unsupported_reference_type(self):
|
||||
class FakeClass(object):
|
||||
|
@ -379,7 +379,7 @@ class RenameDatastoreTestCase(TestCase):
|
|||
with self.assertRaises(VMwareApiError) as excinfo:
|
||||
salt.utils.vmware.rename_datastore(self.mock_ds_ref,
|
||||
'fake_new_name')
|
||||
self.assertEqual(excinfo.exception.message, 'vim_fault')
|
||||
self.assertEqual(excinfo.exception.strerror, 'vim_fault')
|
||||
|
||||
def test_rename_datastore_raise_runtime_fault(self):
|
||||
exc = vmodl.RuntimeFault()
|
||||
|
@ -388,7 +388,7 @@ class RenameDatastoreTestCase(TestCase):
|
|||
with self.assertRaises(VMwareRuntimeError) as excinfo:
|
||||
salt.utils.vmware.rename_datastore(self.mock_ds_ref,
|
||||
'fake_new_name')
|
||||
self.assertEqual(excinfo.exception.message, 'runtime_fault')
|
||||
self.assertEqual(excinfo.exception.strerror, 'runtime_fault')
|
||||
|
||||
def test_rename_datastore(self):
|
||||
salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name')
|
||||
|
|
Loading…
Add table
Reference in a new issue