This commit is contained in:
Alexandru Bleotu 2017-09-26 17:28:06 -04:00
parent ac3a3bdda5
commit 951d43e0a9
6 changed files with 40 additions and 41 deletions

View file

@ -17,7 +17,6 @@ from salt.utils.schema import (DefinitionsSchema,
Schema,
ComplexSchemaItem,
ArrayItem,
DictItem,
IntegerItem,
BooleanItem,
StringItem,
@ -36,7 +35,6 @@ class DiskGroupDiskScsiAddressItem(ComplexSchemaItem):
title = 'Diskgroup Disk Scsi Address Item'
description = 'ESXi host diskgroup item containing disk SCSI addresses'
cache_scsi_addr = VMwareScsiAddressItem(
title='Cache Disk Scsi Address',
description='Specifies the SCSI address of the cache disk',
@ -57,7 +55,6 @@ class DiskGroupDiskIdItem(ComplexSchemaItem):
title = 'Diskgroup Disk Id Item'
description = 'ESXi host diskgroup item containing disk ids'
cache_id = StringItem(
title='Cache Disk Id',
description='Specifies the id of the cache disk',
@ -80,7 +77,7 @@ class DiskGroupsDiskScsiAddressSchema(DefinitionsSchema):
diskgroups = ArrayItem(
title='Diskgroups',
description='List of diskgroups in an ESXi host',
min_items = 1,
min_items=1,
items=DiskGroupDiskScsiAddressItem(),
required=True)
erase_disks = BooleanItem(
@ -98,7 +95,7 @@ class DiskGroupsDiskIdSchema(DefinitionsSchema):
diskgroups = ArrayItem(
title='DiskGroups',
description='List of disk groups in an ESXi host',
min_items = 1,
min_items=1,
items=DiskGroupDiskIdItem(),
required=True)
@ -207,8 +204,8 @@ class EsxiProxySchema(Schema):
additional_properties = False
proxytype = StringItem(required=True,
enum=['esxi'])
host = StringItem(pattern=r'[^\s]+') # Used when connecting directly
vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter
host = StringItem(pattern=r'[^\s]+') # Used when connecting directly
vcenter = StringItem(pattern=r'[^\s]+') # Used when connecting via a vCenter
esxi_host = StringItem()
username = StringItem()
passwords = ArrayItem(min_items=1,

View file

@ -180,7 +180,7 @@ import salt.utils.vsan
import salt.utils.pbm
from salt.exceptions import CommandExecutionError, VMwareSaltError, \
ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \
VMwareApiError, InvalidEntityError
VMwareApiError, InvalidEntityError, VMwareObjectExistsError
from salt.utils.decorators import depends, ignores_kwargs
from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \
ESXClusterEntitySchema
@ -5992,7 +5992,7 @@ def list_disks(disk_ids=None, scsi_addresses=None, service_instance=None):
host_ref, hostname=hostname)
canonical_name_to_scsi_address = {
lun.canonicalName: scsi_addr
for scsi_addr, lun in scsi_address_to_lun.iteritems()}
for scsi_addr, lun in six.iteritems(scsi_address_to_lun)}
for d in salt.utils.vmware.get_disks(host_ref, disk_ids, scsi_addresses,
get_all_disks):
ret_list.append({'id': d.canonicalName,
@ -6052,7 +6052,7 @@ def erase_disk_partitions(disk_id=None, scsi_address=None,
host_ref, disk_id,
hostname=hostname)
log.info('Erased disk partitions on disk \'{0}\' on host \'{1}\''
''.format(disk_id, esxi_host))
''.format(disk_id, hostname))
return True
@ -6220,7 +6220,7 @@ def create_diskgroup(cache_disk_id, capacity_disk_ids, safety_checks=True,
for id in disk_ids:
if not [d for d in disks if d.canonicalName == id]:
raise VMwareObjectRetrievalError(
'No disk with id \'{0}\' was found in ESXi host \'{0}\''
'No disk with id \'{0}\' was found in ESXi host \'{1}\''
''.format(id, hostname))
cache_disk = [d for d in disks if d.canonicalName == cache_disk_id][0]
capacity_disks = [d for d in disks if d.canonicalName in capacity_disk_ids]
@ -6287,7 +6287,7 @@ def add_capacity_to_diskgroup(cache_disk_id, capacity_disk_ids,
if not diskgroups:
raise VMwareObjectRetrievalError(
'No diskgroup with cache disk id \'{0}\' was found in ESXi '
'host \'{1}\''.format(cache_disk_id, esxi_host))
'host \'{1}\''.format(cache_disk_id, hostname))
vsan_disk_mgmt_system = \
salt.utils.vsan.get_vsan_disk_management_system(service_instance)
salt.utils.vsan.add_capacity_to_diskgroup(service_instance,
@ -6490,7 +6490,7 @@ def configure_host_cache(enabled, datastore=None, swap_size_MiB=None,
if not ds_refs:
raise VMwareObjectRetrievalError(
'Datastore \'{0}\' was not found on host '
'\'{1}\''.format(datastore_name, hostname))
'\'{1}\''.format(datastore, hostname))
ds_ref = ds_refs[0]
salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB)
return True

View file

@ -276,7 +276,7 @@ import logging
import os
# Import Salt Libs
from salt.exceptions import SaltSystemExit
from salt.exceptions import SaltSystemExit, InvalidConfigError
from salt.config.schemas.esxi import EsxiProxySchema
from salt.utils.dictupdate import merge
@ -300,6 +300,7 @@ log = logging.getLogger(__file__)
# Define the module's virtual name
__virtualname__ = 'esxi'
def __virtual__():
'''
Only load if the ESXi execution module is available.
@ -309,6 +310,7 @@ def __virtual__():
return False, 'The ESXi Proxy Minion module did not load.'
def init(opts):
'''
This function gets called when the proxy starts up. For
@ -325,7 +327,7 @@ def init(opts):
try:
jsonschema.validate(proxy_conf, schema)
except jsonschema.exceptions.ValidationError as exc:
raise excs.InvalidProxyInputError(exc)
raise InvalidConfigError(exc)
DETAILS['proxytype'] = proxy_conf['proxytype']
if ('host' not in proxy_conf) and ('vcenter' not in proxy_conf):
@ -345,7 +347,7 @@ def init(opts):
# Get the correct login details
try:
username, password = find_credentials(host)
except excs.SaltSystemExit as err:
except SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
@ -366,7 +368,7 @@ def init(opts):
if 'mechanism' not in proxy_conf:
log.critical('No \'mechanism\' key found in pillar for this proxy.')
return False
mechanism = proxy_conf['mechanism']
mechanism = proxy_conf['mechanism']
# Save mandatory fields in cache
for key in ('vcenter', 'mechanism'):
DETAILS[key] = proxy_conf[key]
@ -376,7 +378,7 @@ def init(opts):
log.critical('No \'username\' key found in pillar for this '
'proxy.')
return False
if not 'passwords' in proxy_conf and \
if 'passwords' not in proxy_conf and \
len(proxy_conf['passwords']) > 0:
log.critical('Mechanism is set to \'userpass\' , but no '
@ -386,11 +388,11 @@ def init(opts):
for key in ('username', 'passwords'):
DETAILS[key] = proxy_conf[key]
elif mechanism == 'sspi':
if not 'domain' in proxy_conf:
if 'domain' not in proxy_conf:
log.critical('Mechanism is set to \'sspi\' , but no '
'\'domain\' key found in pillar for this proxy.')
return False
if not 'principal' in proxy_conf:
if 'principal' not in proxy_conf:
log.critical('Mechanism is set to \'sspi\' , but no '
'\'principal\' key found in pillar for this '
'proxy.')
@ -405,7 +407,7 @@ def init(opts):
try:
username, password = find_credentials()
DETAILS['password'] = password
except excs.SaltSystemExit as err:
except SaltSystemExit as err:
log.critical('Error: {0}'.format(err))
return False
@ -456,7 +458,7 @@ def ping():
__salt__['vsphere.system_info'](host=DETAILS['host'],
username=DETAILS['username'],
password=DETAILS['password'])
except excs.SaltSystemExit as err:
except SaltSystemExit as err:
log.warning(err)
return False
return True

View file

@ -117,7 +117,7 @@ except ImportError:
log = logging.getLogger(__name__)
try:
from pyVmomi import vim, vmodl, VmomiSupport
from pyVmomi import VmomiSupport
# We check the supported vim versions to infer the pyVmomi version
if 'vim25/6.0' in VmomiSupport.versionMap and \
@ -1122,7 +1122,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
capacity_disk_ids = []
capacity_disk_displays = []
for scsi_addr in dg['capacity_scsi_addrs']:
if not scsi_addr in scsi_addr_to_disk_map:
if scsi_addr not in scsi_addr_to_disk_map:
bad_scsi_addrs.append(scsi_addr)
continue
capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id'])
@ -1153,7 +1153,7 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
capacity_disk_displays])))
else:
# Erase disk group disks
for disk_id in ([cache_disk_id] + capacity_disk_ids):
for disk_id in [cache_disk_id] + capacity_disk_ids:
__salt__['vsphere.erase_disk_partitions'](
disk_id=disk_id, service_instance=si)
comments.append('Erased disks of diskgroup #{0}; '
@ -1287,9 +1287,9 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
__salt__['vsphere.disconnect'](si)
#Build the final return message
result = (True if not (changes or errors) else # no changes/errors
None if __opts__['test'] else # running in test mode
False if errors else True) # found errors; defaults to True
result = (True if not (changes or errors) else # no changes/errors
None if __opts__['test'] else # running in test mode
False if errors else True) # found errors; defaults to True
ret.update({'result': result,
'comment': '\n'.join(comments)})
if changes:
@ -1385,7 +1385,7 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
'\'{0}\''.format(hostname))
ret = {'name': hostname, 'comment': 'Default comments',
'result': None, 'changes': {}, 'pchanges': {}}
result = None if __opts__['test'] else True #We assume success
result = None if __opts__['test'] else True # We assume success
needs_setting = False
comments = []
changes = {}
@ -1518,7 +1518,6 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
log.trace('existing_datastore = {0}'.format(existing_datastore))
log.info(comments[-1])
if existing_datastore:
# The following comparisons can be done if the existing_datastore
# is set; it may not be set if running in test mode
@ -1533,23 +1532,22 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
else:
raw_size_MiB = swap_size_value * 1024
log.trace('raw_size = {0}MiB'.format(raw_size_MiB))
swap_size_MiB= int(raw_size_MiB/1024)*1024
swap_size_MiB = int(raw_size_MiB/1024)*1024
log.trace('adjusted swap_size = {0}MiB'.format(swap_size_MiB))
existing_swap_size_MiB = 0
m = re.match('(\d+)MiB', host_cache.get('swap_size')) if \
m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \
host_cache.get('swap_size') else None
if m:
# if swap_size from the host is set and has an expected value
# we are going to parse it to get the number of MiBs
existing_swap_size_MiB = int(m.group(1))
if not (existing_swap_size_MiB == swap_size_MiB):
if not existing_swap_size_MiB == swap_size_MiB:
needs_setting = True
changes.update(
{'swap_size':
{'old': '{}GiB'.format(existing_swap_size_MiB/1024),
'new': '{}GiB'.format(swap_size_MiB/1024)}})
if needs_setting:
if __opts__['test']:
comments.append('State {0} will configure '

View file

@ -2471,7 +2471,7 @@ def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
lun_ids_to_scsi_addr_map.iteritems()}
lun_ids_to_six.iteritems(scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
@ -2513,8 +2513,9 @@ def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key in lun_key_by_scsi_addr.iteritems()
if scsi_addr in scsi_addresses]
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = {0}'.format(disk_keys))
scsi_luns = get_all_luns(host_ref, storage_system)
@ -2695,8 +2696,8 @@ def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if \
(get_all_disk_groups or \
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace('Retrieved disk groups on host \'{0}\', with cache disk ids : '
'{1}'.format(hostname,

View file

@ -49,7 +49,8 @@ import logging
import ssl
# Import Salt Libs
from salt.exceptions import VMwareApiError, VMwareRuntimeError
from salt.exceptions import VMwareApiError, VMwareRuntimeError, \
VMwareObjectRetrievalError
import salt.utils.vmware
try:
@ -282,7 +283,7 @@ def add_capacity_to_diskgroup(service_instance, vsan_disk_mgmt_system,
spec.host = host_ref
try:
task = vsan_disk_mgmt_system.InitializeDiskMappings(spec)
except fault.NoPermission as exc:
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))