Merge pull request #26923 from rallytime/cloud-cleanup

Code clean up of cloud drivers and files
This commit is contained in:
C. R. Oldham 2015-09-10 10:37:26 -06:00
commit 68eb508e6c
18 changed files with 167 additions and 174 deletions

View file

@ -637,9 +637,9 @@ class Cloud(object):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if 'selected_query_option' not in opts:
if '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
if 'selected_query_option' not in opts and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error(
@ -697,12 +697,8 @@ class Cloud(object):
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile:
if alias not in \
self.opts['profiles'][profile]['provider'].split(
':'
)[0]:
continue
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
@ -1048,17 +1044,15 @@ class Cloud(object):
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
if not os.path.isfile(key_file) and globbed_key_file:
# Since we have globbed matches, there are probably
# some keys for which their minion configuration has
# append_domain set.
if len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
@ -1208,22 +1202,21 @@ class Cloud(object):
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True:
if 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug(
'Generating the master keys for {0[name]!r}'.format(
vm_
)
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug(
'Generating the master keys for {0[name]!r}'.format(
vm_
)
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
@ -1662,7 +1655,7 @@ class Map(Cloud):
'Only \'start\', \'stop\', and \'reboot\' are supported options.'.format(action)
)
raise SaltCloudException()
if (vm_details != 'Absent') and (vm_details['state'].lower() in state_action):
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
vm_names.append(vm_name)
return vm_names
@ -1845,16 +1838,17 @@ class Map(Cloud):
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides and 'minion' in nodedata:
if 'grains' in overrides['minion']:
if 'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if len(overrides['minion']) == 0:
del overrides['minion']
if 'minion' in overrides and \
'minion' in nodedata and \
'grains' in overrides['minion'] and \
'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if len(overrides['minion']) == 0:
del overrides['minion']
nodedata.update(overrides)
# Add the computed information to the return data
@ -1868,9 +1862,8 @@ class Map(Cloud):
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name:
if driver not in matches:
matches[driver] = details['state']
if vm_name == name and driver not in matches:
matches[driver] = details['state']
return matches
for alias, drivers in six.iteritems(pmap):
@ -2282,7 +2275,7 @@ def run_parallel_map_providers_query(data, queue=None):
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any nodes
return (data['alias'], data['driver'], ())
return data['alias'], data['driver'], ()
# for pickle and multiprocessing, we can't use directly decorators

View file

@ -716,7 +716,7 @@ def _compute_signature(parameters, access_key_secret):
sortedParameters = sorted(list(parameters.items()), key=lambda items: items[0])
canonicalizedQueryString = ''
for (k, v) in sortedParameters:
for k, v in sortedParameters:
canonicalizedQueryString += '&' + percent_encode(k) \
+ '=' + percent_encode(v)

View file

@ -725,9 +725,11 @@ def destroy(name, call=None):
delete_record = config.get_cloud_config_value(
'delete_dns_record', get_configured_provider(), __opts__, search_global=False, default=None,
)
if delete_record is not None:
if not isinstance(delete_record, bool):
raise SaltCloudConfigError("'delete_dns_record' should be a boolean value.")
if delete_record and not isinstance(delete_record, bool):
raise SaltCloudConfigError(
'\'delete_dns_record\' should be a boolean value.'
)
if delete_record:
delete_dns_record(name)

View file

@ -1177,10 +1177,9 @@ def _create_eni_if_necessary(interface):
if 'item' in subnet_query_result:
if isinstance(subnet_query_result['item'], dict):
for key, value in subnet_query_result['item'].iteritems():
if key == "subnetId":
if value == interface['SubnetId']:
found = True
break
if key == "subnetId" and value == interface['SubnetId']:
found = True
break
else:
for subnet in subnet_query_result['item']:
if subnet['subnetId'] == interface['SubnetId']:
@ -1194,12 +1193,11 @@ def _create_eni_if_necessary(interface):
params = {'SubnetId': interface['SubnetId']}
for k in ('Description', 'PrivateIpAddress',
'SecondaryPrivateIpAddressCount'):
for k in 'Description', 'PrivateIpAddress', 'SecondaryPrivateIpAddressCount':
if k in interface:
params[k] = interface[k]
for k in ('PrivateIpAddresses', 'SecurityGroupId'):
for k in 'PrivateIpAddresses', 'SecurityGroupId':
if k in interface:
params.update(_param_from_config(k, interface[k]))
@ -1233,7 +1231,7 @@ def _create_eni_if_necessary(interface):
_new_eip = _request_eip(interface)
_associate_eip_with_interface(eni_id, _new_eip)
elif interface.get('allocate_new_eips'):
addr_list = _list_interface_private_addresses(eni_desc)
addr_list = _list_interface_private_addrs(eni_desc)
eip_list = []
for idx, addr in enumerate(addr_list):
eip_list.append(_request_eip(interface))
@ -1258,7 +1256,7 @@ def _create_eni_if_necessary(interface):
'NetworkInterfaceId': eni_id}
def _list_interface_private_addresses(eni_desc):
def _list_interface_private_addrs(eni_desc):
'''
Returns a list of all of the private IP addresses attached to a
network interface. The 'primary' address will be listed first.
@ -1606,7 +1604,7 @@ def request_instance(vm_=None, call=None):
if not isinstance(ex_securitygroupid, list):
params[spot_prefix + 'SecurityGroupId.1'] = ex_securitygroupid
else:
for (counter, sg_) in enumerate(ex_securitygroupid):
for counter, sg_ in enumerate(ex_securitygroupid):
params[
spot_prefix + 'SecurityGroupId.{0}'.format(counter)
] = sg_
@ -1651,11 +1649,10 @@ def request_instance(vm_=None, call=None):
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
)
if set_del_root_vol_on_destroy is not None:
if not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_root_vol_on_destroy\' should be a boolean value.'
)
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
@ -1737,11 +1734,10 @@ def request_instance(vm_=None, call=None):
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
)
if set_del_all_vols_on_destroy is not None:
if not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
if set_del_all_vols_on_destroy and not isinstance(set_del_all_vols_on_destroy, bool):
raise SaltCloudConfigError(
'\'del_all_vols_on_destroy\' should be a boolean value.'
)
salt.utils.cloud.fire_event(
'event',

View file

@ -1580,32 +1580,36 @@ def create_disk(kwargs=None, call=None):
'The create_disk function must be called with -f or --function.'
)
if not kwargs or 'location' not in kwargs:
if kwargs is None:
kwargs = {}
name = kwargs.get('disk_name', None)
image = kwargs.get('image', None)
location = kwargs.get('location', None)
size = kwargs.get('size', None)
snapshot = kwargs.get('snapshot', None)
if location is None:
log.error(
'A location (zone) must be specified when creating a disk.'
)
return False
if 'disk_name' not in kwargs:
if name is None:
log.error(
'A disk_name must be specified when creating a disk.'
)
return False
if 'size' not in kwargs:
if 'image' not in kwargs and 'snapshot' not in kwargs:
log.error(
'Must specify image, snapshot, or size.'
)
return False
if 'size' is None and 'image' is None and 'snapshot' is None:
log.error(
'Must specify image, snapshot, or size.'
)
return False
conn = get_conn()
size = kwargs.get('size', None)
name = kwargs.get('disk_name')
location = conn.ex_get_zone(kwargs['location'])
snapshot = kwargs.get('snapshot', None)
image = kwargs.get('image', None)
use_existing = True
salt.utils.cloud.fire_event(
@ -1960,9 +1964,9 @@ def destroy(vm_name, call=None):
profile = md['value']
vm_ = get_configured_provider()
delete_boot_pd = False
if profile is not None and profile in vm_['profiles']:
if 'delete_boot_pd' in vm_['profiles'][profile]:
delete_boot_pd = vm_['profiles'][profile]['delete_boot_pd']
if profile and profile in vm_['profiles'] and 'delete_boot_pd' in vm_['profiles'][profile]:
delete_boot_pd = vm_['profiles'][profile]['delete_boot_pd']
try:
inst_deleted = conn.destroy_node(node)

View file

@ -220,7 +220,7 @@ def list_nodes(full=False, call=None):
for node in nodes:
ret[node] = {}
for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'):
for item in 'id', 'image', 'size', 'public_ips', 'private_ips', 'state':
ret[node][item] = nodes[node][item]
return ret

View file

@ -258,7 +258,7 @@ def ssh_username(vm_):
initial = usernames[:]
# Add common usernames to the list to be tested
for name in ('ec2-user', 'ubuntu', 'admin', 'bitnami', 'root'):
for name in 'ec2-user', 'ubuntu', 'admin', 'bitnami', 'root':
if name not in usernames:
usernames.append(name)
# Add the user provided usernames to the end of the list since enough time

View file

@ -1362,9 +1362,8 @@ def _query(action=None,
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action:
if 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
@ -1421,9 +1420,8 @@ def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id:
if jobs_result[0]['HOST_SUCCESS'] == 1:
return True
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
if not quiet:

View file

@ -222,7 +222,7 @@ def list_nodes(conn=None, call=None):
nodes = list_nodes_full(conn, call)
for node in nodes:
ret[node] = {}
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
ret[node][prop] = nodes[node][prop]
return ret

View file

@ -110,7 +110,7 @@ except NameError as exc:
HAS_NOVA = False
# Import Salt Cloud Libs
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
import salt.utils.cloud
import salt.utils.pycrypto as sup
import salt.config as config
@ -129,12 +129,6 @@ try:
except ImportError:
HAS_NETADDR = False
try:
import requests # pylint: disable=unused-import
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
# Get logging started
log = logging.getLogger(__name__)
request_log = logging.getLogger('requests')
@ -183,7 +177,6 @@ def get_dependencies():
'libcloud': HAS_LIBCLOUD,
'netaddr': HAS_NETADDR,
'nova': HAS_NOVA,
'requests': HAS_REQUESTS
}
return config.check_driver_dependencies(
__virtualname__,
@ -404,10 +397,11 @@ def destroy(name, conn=None, call=None):
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile is not None and profile in profiles:
if 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
salt_client = salt.client.get_local_client(__opts__['conf_file'])
@ -724,10 +718,9 @@ def create(vm_):
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if rackconnect(vm_) is True:
if ssh_interface(vm_) != 'private_ips' or rackconnectv3:
data.public_ips = access_ip
return data
if rackconnect(vm_) is True and (ssh_interface(vm_) != 'private_ips' or rackconnectv3):
data.public_ips = access_ip
return data
if cloudnetwork(vm_) is True:
data.public_ips = access_ip

View file

@ -707,10 +707,9 @@ def create(vm_):
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if rackconnect(vm_) is True:
if ssh_interface(vm_) != 'private_ips':
data.public_ips = access_ip
return data
if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
data.public_ips = access_ip
return data
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist

View file

@ -188,7 +188,7 @@ def query(conn_type, option, post_data=None):
log.error(response)
def _getVmByName(name, allDetails=False):
def _get_vm_by_name(name, allDetails=False):
'''
Since Proxmox works based op id's rather than names as identifiers this
requires some filtering to retrieve the required information.
@ -201,7 +201,7 @@ def _getVmByName(name, allDetails=False):
return False
def _getVmById(vmid, allDetails=False):
def _get_vm_by_id(vmid, allDetails=False):
'''
Retrieve a VM based on the ID.
'''
@ -641,7 +641,7 @@ def create_node(vm_):
vmhost = vm_['host']
newnode['vmid'] = _get_next_vmid()
for prop in ('cpuunits', 'description', 'memory', 'onboot'):
for prop in 'cpuunits', 'description', 'memory', 'onboot':
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
@ -651,12 +651,12 @@ def create_node(vm_):
newnode['ostemplate'] = vm_['image']
# optional VZ settings
for prop in ('cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid'):
for prop in 'cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid':
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
elif vm_['technology'] == 'qemu':
# optional Qemu settings
for prop in ('acpi', 'cores', 'cpu', 'pool'):
for prop in 'acpi', 'cores', 'cpu', 'pool':
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
@ -777,7 +777,7 @@ def destroy(name, call=None):
transport=__opts__['transport']
)
vmobj = _getVmByName(name)
vmobj = _get_vm_by_name(name)
if vmobj is not None:
# stop the vm
if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped':
@ -812,11 +812,11 @@ def set_vm_status(status, name=None, vmid=None):
if vmid is not None:
log.debug('set_vm_status: via ID - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _getVmById(vmid)
vmobj = _get_vm_by_id(vmid)
else:
log.debug('set_vm_status: via name - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _getVmByName(name)
vmobj = _get_vm_by_name(name)
if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj:
log.error('Unable to set status {0} for {1} ({2})'.format(
@ -842,10 +842,10 @@ def get_vm_status(vmid=None, name=None):
'''
if vmid is not None:
log.debug('get_vm_status: VMID {0}'.format(vmid))
vmobj = _getVmById(vmid)
vmobj = _get_vm_by_id(vmid)
elif name is not None:
log.debug('get_vm_status: name {0}'.format(name))
vmobj = _getVmByName(name)
vmobj = _get_vm_by_name(name)
else:
log.debug("get_vm_status: No ID or NAME given")
raise SaltCloudExecutionFailure

View file

@ -414,9 +414,10 @@ def create(vm_):
'''
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
for node in node_info:
if node['id'] == response['id']:
if 'passwords' in node['operatingSystem'] and len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
if node['id'] == response['id'] and \
'passwords' in node['operatingSystem'] and \
len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False

View file

@ -372,9 +372,10 @@ def create(vm_):
'''
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
for node in node_info:
if node['id'] == response['id']:
if 'passwords' in node['operatingSystem'] and len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['password']
if node['id'] == response['id'] \
and 'passwords' in node['operatingSystem'] \
and len(node['operatingSystem']['passwords']) > 0:
return node['operatingSystem']['passwords'][0]['password']
time.sleep(5)
return False

View file

@ -103,6 +103,8 @@ except ImportError:
except ImportError:
HAS_SIX = False
IP_RE = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
# Get logging started
log = logging.getLogger(__name__)
@ -365,7 +367,7 @@ def _get_network_adapter_type(adapter_type):
return vim.vm.device.VirtualE1000e()
def _edit_existing_network_adapter_helper(network_adapter, new_network_name, adapter_type, switch_type):
def _edit_existing_network_adapter(network_adapter, new_network_name, adapter_type, switch_type):
adapter_type.strip().lower()
switch_type.strip().lower()
@ -470,7 +472,7 @@ def _add_new_network_adapter_helper(network_adapter_label, network_name, adapter
return network_spec
def _edit_existing_scsi_adapter_helper(scsi_adapter, bus_sharing):
def _edit_existing_scsi_adapter(scsi_adapter, bus_sharing):
scsi_adapter.sharedBus = bus_sharing
scsi_spec = vim.vm.device.VirtualDeviceSpec()
scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
@ -549,7 +551,7 @@ def _set_cd_or_dvd_backing_type(drive, device_type, mode, iso_path):
return drive
def _edit_existing_cd_or_dvd_drive_helper(drive, device_type, mode, iso_path):
def _edit_existing_cd_or_dvd_drive(drive, device_type, mode, iso_path):
device_type.strip().lower()
mode.strip().lower()
@ -592,7 +594,7 @@ def _add_new_cd_or_dvd_drive_helper(drive_label, controller_key, device_type, mo
return drive_spec
def _set_network_adapter_mapping_helper(adapter_specs):
def _set_network_adapter_mapping(adapter_specs):
adapter_mapping = vim.vm.customization.AdapterMapping()
adapter_mapping.adapter = vim.vm.customization.IPSettings()
@ -650,8 +652,8 @@ def _manage_devices(devices, vm):
network_name = devices['network'][device.deviceInfo.label]['name']
adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else ''
switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else ''
network_spec = _edit_existing_network_adapter_helper(device, network_name, adapter_type, switch_type)
adapter_mapping = _set_network_adapter_mapping_helper(devices['network'][device.deviceInfo.label])
network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type)
adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label])
device_specs.append(network_spec)
nics_map.append(adapter_mapping)
@ -669,7 +671,7 @@ def _manage_devices(devices, vm):
bus_sharing = '{0}Sharing'.format(bus_sharing)
if bus_sharing != device.sharedBus:
# Only edit the SCSI adapter if bus_sharing is different
scsi_spec = _edit_existing_scsi_adapter_helper(device, bus_sharing)
scsi_spec = _edit_existing_scsi_adapter(device, bus_sharing)
device_specs.append(scsi_spec)
elif isinstance(device, vim.vm.device.VirtualCdrom):
@ -681,7 +683,7 @@ def _manage_devices(devices, vm):
device_type = devices['cd'][device.deviceInfo.label]['device_type'] if 'device_type' in devices['cd'][device.deviceInfo.label] else ''
mode = devices['cd'][device.deviceInfo.label]['mode'] if 'mode' in devices['cd'][device.deviceInfo.label] else ''
iso_path = devices['cd'][device.deviceInfo.label]['iso_path'] if 'iso_path' in devices['cd'][device.deviceInfo.label] else ''
cd_drive_spec = _edit_existing_cd_or_dvd_drive_helper(device, device_type, mode, iso_path)
cd_drive_spec = _edit_existing_cd_or_dvd_drive(device, device_type, mode, iso_path)
device_specs.append(cd_drive_spec)
elif isinstance(device, vim.vm.device.VirtualIDEController):
@ -709,7 +711,7 @@ def _manage_devices(devices, vm):
switch_type = devices['network'][network_adapter_label]['switch_type'] if 'switch_type' in devices['network'][network_adapter_label] else ''
# create the network adapter
network_spec = _add_new_network_adapter_helper(network_adapter_label, network_name, adapter_type, switch_type)
adapter_mapping = _set_network_adapter_mapping_helper(devices['network'][network_adapter_label])
adapter_mapping = _set_network_adapter_mapping(devices['network'][network_adapter_label])
device_specs.append(network_spec)
nics_map.append(adapter_mapping)
@ -783,15 +785,14 @@ def _wait_for_ip(vm_ref, max_wait):
if time_counter % 5 == 0:
log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter))
if vm_ref.summary.guest.ipAddress:
if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1':
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
return vm_ref.summary.guest.ipAddress
if vm_ref.summary.guest.ipAddress and match(IP_RE, vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1':
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
return vm_ref.summary.guest.ipAddress
for net in vm_ref.guest.net:
if net.ipConfig.ipAddress:
for current_ip in net.ipConfig.ipAddress:
if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1':
if match(IP_RE, current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1':
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
return current_ip.ipAddress
time.sleep(1.0 - ((time.time() - starttime) % 1.0))
@ -2381,24 +2382,24 @@ def create(vm_):
config=config_spec
)
if devices and 'network' in list(devices.keys()):
if "Windows" not in object_ref.config.guestFullName:
global_ip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in list(vm_.keys()):
global_ip.dnsServerList = vm_['dns_servers']
if devices and 'network' in list(devices.keys()) and 'Windows' not in object_ref.config.guestFullName:
global_ip = vim.vm.customization.GlobalIPSettings()
identity = vim.vm.customization.LinuxPrep()
hostName = vm_name.split('.')[0]
domainName = vm_name.split('.', 1)[-1]
identity.hostName = vim.vm.customization.FixedName(name=hostName)
identity.domain = domainName if hostName != domainName else domain
if 'dns_servers' in list(vm_.keys()):
global_ip.dnsServerList = vm_['dns_servers']
custom_spec = vim.vm.customization.Specification(
globalIPSettings=global_ip,
identity=identity,
nicSettingMap=specs['nics_map']
)
clone_spec.customization = custom_spec
identity = vim.vm.customization.LinuxPrep()
hostName = vm_name.split('.')[0]
domainName = vm_name.split('.', 1)[-1]
identity.hostName = vim.vm.customization.FixedName(name=hostName)
identity.domain = domainName if hostName != domainName else domain
custom_spec = vim.vm.customization.Specification(
globalIPSettings=global_ip,
identity=identity,
nicSettingMap=specs['nics_map']
)
clone_spec.customization = custom_spec
if not template:
clone_spec.powerOn = power

View file

@ -363,9 +363,11 @@ def wait_for_ip(vm_):
Wait for the IP address to become available
'''
instance = show_instance(name=vm_['name'], call='action')
if 'ip_address' in instance:
if instance['ip_address'] is not None:
return instance['ip_address']
ip_addrs = instance.get('ip_address', None)
if ip_addrs is not None:
return ip_addrs
time.sleep(1)
return False
@ -514,10 +516,12 @@ def _get_instance_properties(instance, from_cache=True):
for device in ret['devices']:
if '_obj' in ret['devices'][device]:
del ret['devices'][device]['_obj']
# TODO: this is a workaround because the net does not return mac...?
if ret['mac_address'] is None:
if 'macAddress' in ret['devices'][device]:
ret['mac_address'] = ret['devices'][device]['macAddress']
mac_address = ret.get('mac_address', None)
if mac_address is None and 'macAddress' in ret['devices'][device]:
ret['mac_address'] = ret['devices'][device]['macAddress']
ret['status'] = instance.get_status()
ret['tools_status'] = instance.get_tools_status()

View file

@ -343,10 +343,11 @@ def destroy(name, conn=None, call=None):
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile is not None and profile in profiles:
if 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))

View file

@ -173,7 +173,7 @@ def accept_key(pki_dir, pub, id_):
the opts directory, this method places the pub key in the accepted
keys dir and removes it from the unaccepted keys dir if that is the case.
'''
for key_dir in ('minions', 'minions_pre', 'minions_rejected'):
for key_dir in 'minions', 'minions_pre', 'minions_rejected':
key_path = os.path.join(pki_dir, key_dir)
if not os.path.exists(key_path):
os.makedirs(key_path)
@ -2455,7 +2455,7 @@ def delete_minion_cachedir(minion_id, provider, opts, base=None):
driver = next(six.iterkeys(opts['providers'][provider]))
fname = '{0}.p'.format(minion_id)
for cachedir in ('requested', 'active'):
for cachedir in 'requested', 'active':
path = os.path.join(base, cachedir, driver, provider, fname)
log.debug('path: {0}'.format(path))
if os.path.exists(path):
@ -2525,7 +2525,7 @@ def update_bootstrap(config, url=None):
url = default_url
if not url:
raise ValueError('Cant get any source to update')
if (url.startswith('http')) or ('://' in url):
if url.startswith('http') or '://' in url:
log.debug('Updating the bootstrap-salt.sh script to latest stable')
try:
import requests