mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Code clean up of cloud drivers and files
Sonar Cube!
This commit is contained in:
parent
d513acb3e5
commit
eee0291ff8
14 changed files with 137 additions and 143 deletions
|
@ -637,9 +637,9 @@ class Cloud(object):
|
|||
# If driver has function list_nodes_min, just replace it
|
||||
# with query param to check existing vms on this driver
|
||||
# for minimum information, Otherwise still use query param.
|
||||
if 'selected_query_option' not in opts:
|
||||
if '{0}.list_nodes_min'.format(driver) in self.clouds:
|
||||
this_query = 'list_nodes_min'
|
||||
if 'selected_query_option' not in opts and '{0}.list_nodes_min'.format(driver) in self.clouds:
|
||||
this_query = 'list_nodes_min'
|
||||
|
||||
fun = '{0}.{1}'.format(driver, this_query)
|
||||
if fun not in self.clouds:
|
||||
log.error(
|
||||
|
@ -697,12 +697,8 @@ class Cloud(object):
|
|||
# the search returns the same instance for each provider
|
||||
# because amazon returns all instances in a region, not
|
||||
# availability zone.
|
||||
if profile:
|
||||
if alias not in \
|
||||
self.opts['profiles'][profile]['provider'].split(
|
||||
':'
|
||||
)[0]:
|
||||
continue
|
||||
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
|
||||
continue
|
||||
|
||||
for vm_name, details in six.iteritems(vms):
|
||||
# XXX: The logic below can be removed once the aws driver
|
||||
|
@ -1048,17 +1044,15 @@ class Cloud(object):
|
|||
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
|
||||
continue
|
||||
|
||||
if not os.path.isfile(key_file) and globbed_key_file:
|
||||
# Since we have globbed matches, there are probably
|
||||
# some keys for which their minion configuration has
|
||||
# append_domain set.
|
||||
if len(globbed_key_file) == 1:
|
||||
# Single entry, let's remove it!
|
||||
salt.utils.cloud.remove_key(
|
||||
self.opts['pki_dir'],
|
||||
os.path.basename(globbed_key_file[0])
|
||||
)
|
||||
continue
|
||||
# Since we have globbed matches, there are probably some keys for which their minion
|
||||
# configuration has append_domain set.
|
||||
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
|
||||
# Single entry, let's remove it!
|
||||
salt.utils.cloud.remove_key(
|
||||
self.opts['pki_dir'],
|
||||
os.path.basename(globbed_key_file[0])
|
||||
)
|
||||
continue
|
||||
|
||||
# Since we can't get the profile or map entry used to create
|
||||
# the VM, we can't also get the append_domain setting.
|
||||
|
@ -1208,22 +1202,21 @@ class Cloud(object):
|
|||
if 'append_domain' in minion_dict:
|
||||
key_id = '.'.join([key_id, minion_dict['append_domain']])
|
||||
|
||||
if make_master is True:
|
||||
if 'master_pub' not in vm_ and 'master_pem' not in vm_:
|
||||
log.debug(
|
||||
'Generating the master keys for {0[name]!r}'.format(
|
||||
vm_
|
||||
)
|
||||
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
|
||||
log.debug(
|
||||
'Generating the master keys for {0[name]!r}'.format(
|
||||
vm_
|
||||
)
|
||||
master_priv, master_pub = salt.utils.cloud.gen_keys(
|
||||
salt.config.get_cloud_config_value(
|
||||
'keysize',
|
||||
vm_,
|
||||
self.opts
|
||||
)
|
||||
)
|
||||
master_priv, master_pub = salt.utils.cloud.gen_keys(
|
||||
salt.config.get_cloud_config_value(
|
||||
'keysize',
|
||||
vm_,
|
||||
self.opts
|
||||
)
|
||||
vm_['master_pub'] = master_pub
|
||||
vm_['master_pem'] = master_priv
|
||||
)
|
||||
vm_['master_pub'] = master_pub
|
||||
vm_['master_pem'] = master_priv
|
||||
|
||||
if local_master is True and deploy is True:
|
||||
# Accept the key on the local master
|
||||
|
@ -1662,7 +1655,7 @@ class Map(Cloud):
|
|||
'Only \'start\', \'stop\', and \'reboot\' are supported options.'.format(action)
|
||||
)
|
||||
raise SaltCloudException()
|
||||
if (vm_details != 'Absent') and (vm_details['state'].lower() in state_action):
|
||||
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
|
||||
vm_names.append(vm_name)
|
||||
return vm_names
|
||||
|
||||
|
@ -1845,16 +1838,17 @@ class Map(Cloud):
|
|||
overrides[setting] = overrides.pop(deprecated)
|
||||
|
||||
# merge minion grains from map file
|
||||
if 'minion' in overrides and 'minion' in nodedata:
|
||||
if 'grains' in overrides['minion']:
|
||||
if 'grains' in nodedata['minion']:
|
||||
nodedata['minion']['grains'].update(
|
||||
overrides['minion']['grains']
|
||||
)
|
||||
del overrides['minion']['grains']
|
||||
# remove minion key if now is empty dict
|
||||
if len(overrides['minion']) == 0:
|
||||
del overrides['minion']
|
||||
if 'minion' in overrides and \
|
||||
'minion' in nodedata and \
|
||||
'grains' in overrides['minion'] and \
|
||||
'grains' in nodedata['minion']:
|
||||
nodedata['minion']['grains'].update(
|
||||
overrides['minion']['grains']
|
||||
)
|
||||
del overrides['minion']['grains']
|
||||
# remove minion key if now is empty dict
|
||||
if len(overrides['minion']) == 0:
|
||||
del overrides['minion']
|
||||
|
||||
nodedata.update(overrides)
|
||||
# Add the computed information to the return data
|
||||
|
@ -1868,9 +1862,8 @@ class Map(Cloud):
|
|||
for alias, drivers in six.iteritems(pmap):
|
||||
for driver, vms in six.iteritems(drivers):
|
||||
for vm_name, details in six.iteritems(vms):
|
||||
if vm_name == name:
|
||||
if driver not in matches:
|
||||
matches[driver] = details['state']
|
||||
if vm_name == name and driver not in matches:
|
||||
matches[driver] = details['state']
|
||||
return matches
|
||||
|
||||
for alias, drivers in six.iteritems(pmap):
|
||||
|
|
|
@ -725,9 +725,11 @@ def destroy(name, call=None):
|
|||
delete_record = config.get_cloud_config_value(
|
||||
'delete_dns_record', get_configured_provider(), __opts__, search_global=False, default=None,
|
||||
)
|
||||
if delete_record is not None:
|
||||
if not isinstance(delete_record, bool):
|
||||
raise SaltCloudConfigError("'delete_dns_record' should be a boolean value.")
|
||||
|
||||
if delete_record and not isinstance(delete_record, bool):
|
||||
raise SaltCloudConfigError(
|
||||
'\'delete_dns_record\' should be a boolean value.'
|
||||
)
|
||||
|
||||
if delete_record:
|
||||
delete_dns_record(name)
|
||||
|
|
|
@ -1177,10 +1177,9 @@ def _create_eni_if_necessary(interface):
|
|||
if 'item' in subnet_query_result:
|
||||
if isinstance(subnet_query_result['item'], dict):
|
||||
for key, value in subnet_query_result['item'].iteritems():
|
||||
if key == "subnetId":
|
||||
if value == interface['SubnetId']:
|
||||
found = True
|
||||
break
|
||||
if key == "subnetId" and value == interface['SubnetId']:
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
for subnet in subnet_query_result['item']:
|
||||
if subnet['subnetId'] == interface['SubnetId']:
|
||||
|
@ -1651,11 +1650,10 @@ def request_instance(vm_=None, call=None):
|
|||
'del_root_vol_on_destroy', vm_, __opts__, search_global=False
|
||||
)
|
||||
|
||||
if set_del_root_vol_on_destroy is not None:
|
||||
if not isinstance(set_del_root_vol_on_destroy, bool):
|
||||
raise SaltCloudConfigError(
|
||||
'\'del_root_vol_on_destroy\' should be a boolean value.'
|
||||
)
|
||||
if set_del_root_vol_on_destroy and not isinstance(set_del_root_vol_on_destroy, bool):
|
||||
raise SaltCloudConfigError(
|
||||
'\'del_root_vol_on_destroy\' should be a boolean value.'
|
||||
)
|
||||
|
||||
vm_['set_del_root_vol_on_destroy'] = set_del_root_vol_on_destroy
|
||||
|
||||
|
@ -1737,11 +1735,10 @@ def request_instance(vm_=None, call=None):
|
|||
'del_all_vols_on_destroy', vm_, __opts__, search_global=False, default=False
|
||||
)
|
||||
|
||||
if set_del_all_vols_on_destroy is not None:
|
||||
if not isinstance(set_del_all_vols_on_destroy, bool):
|
||||
raise SaltCloudConfigError(
|
||||
'\'del_all_vols_on_destroy\' should be a boolean value.'
|
||||
)
|
||||
if set_del_all_vols_on_destroy and not isinstance(set_del_all_vols_on_destroy, bool):
|
||||
raise SaltCloudConfigError(
|
||||
'\'del_all_vols_on_destroy\' should be a boolean value.'
|
||||
)
|
||||
|
||||
salt.utils.cloud.fire_event(
|
||||
'event',
|
||||
|
|
|
@ -1580,32 +1580,36 @@ def create_disk(kwargs=None, call=None):
|
|||
'The create_disk function must be called with -f or --function.'
|
||||
)
|
||||
|
||||
if not kwargs or 'location' not in kwargs:
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
name = kwargs.get('disk_name', None)
|
||||
image = kwargs.get('image', None)
|
||||
location = kwargs.get('location', None)
|
||||
size = kwargs.get('size', None)
|
||||
snapshot = kwargs.get('snapshot', None)
|
||||
|
||||
if location is None:
|
||||
log.error(
|
||||
'A location (zone) must be specified when creating a disk.'
|
||||
)
|
||||
return False
|
||||
|
||||
if 'disk_name' not in kwargs:
|
||||
if name is None:
|
||||
log.error(
|
||||
'A disk_name must be specified when creating a disk.'
|
||||
)
|
||||
return False
|
||||
|
||||
if 'size' not in kwargs:
|
||||
if 'image' not in kwargs and 'snapshot' not in kwargs:
|
||||
log.error(
|
||||
'Must specify image, snapshot, or size.'
|
||||
)
|
||||
return False
|
||||
if 'size' is None and ('image' is None and 'snapshot' is None):
|
||||
log.error(
|
||||
'Must specify image, snapshot, or size.'
|
||||
)
|
||||
return False
|
||||
|
||||
conn = get_conn()
|
||||
|
||||
size = kwargs.get('size', None)
|
||||
name = kwargs.get('disk_name')
|
||||
location = conn.ex_get_zone(kwargs['location'])
|
||||
snapshot = kwargs.get('snapshot', None)
|
||||
image = kwargs.get('image', None)
|
||||
use_existing = True
|
||||
|
||||
salt.utils.cloud.fire_event(
|
||||
|
@ -1960,9 +1964,9 @@ def destroy(vm_name, call=None):
|
|||
profile = md['value']
|
||||
vm_ = get_configured_provider()
|
||||
delete_boot_pd = False
|
||||
if profile is not None and profile in vm_['profiles']:
|
||||
if 'delete_boot_pd' in vm_['profiles'][profile]:
|
||||
delete_boot_pd = vm_['profiles'][profile]['delete_boot_pd']
|
||||
|
||||
if profile and profile in vm_['profiles'] and 'delete_boot_pd' in vm_['profiles'][profile]:
|
||||
delete_boot_pd = vm_['profiles'][profile]['delete_boot_pd']
|
||||
|
||||
try:
|
||||
inst_deleted = conn.destroy_node(node)
|
||||
|
|
|
@ -220,7 +220,7 @@ def list_nodes(full=False, call=None):
|
|||
|
||||
for node in nodes:
|
||||
ret[node] = {}
|
||||
for item in ('id', 'image', 'size', 'public_ips', 'private_ips', 'state'):
|
||||
for item in 'id', 'image', 'size', 'public_ips', 'private_ips', 'state':
|
||||
ret[node][item] = nodes[node][item]
|
||||
|
||||
return ret
|
||||
|
|
|
@ -1362,9 +1362,8 @@ def _query(action=None,
|
|||
if 'api_key' not in args.keys():
|
||||
args['api_key'] = apikey
|
||||
|
||||
if action:
|
||||
if 'api_action' not in args.keys():
|
||||
args['api_action'] = '{0}.{1}'.format(action, command)
|
||||
if action and 'api_action' not in args.keys():
|
||||
args['api_action'] = '{0}.{1}'.format(action, command)
|
||||
|
||||
if header_dict is None:
|
||||
header_dict = {}
|
||||
|
@ -1421,9 +1420,8 @@ def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
|
|||
jobs_result = _query('linode',
|
||||
'job.list',
|
||||
args={'LinodeID': linode_id})['DATA']
|
||||
if jobs_result[0]['JOBID'] == job_id:
|
||||
if jobs_result[0]['HOST_SUCCESS'] == 1:
|
||||
return True
|
||||
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
|
||||
return True
|
||||
|
||||
time.sleep(interval)
|
||||
if not quiet:
|
||||
|
|
|
@ -222,7 +222,7 @@ def list_nodes(conn=None, call=None):
|
|||
nodes = list_nodes_full(conn, call)
|
||||
for node in nodes:
|
||||
ret[node] = {}
|
||||
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
|
||||
for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips':
|
||||
ret[node][prop] = nodes[node][prop]
|
||||
return ret
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ except NameError as exc:
|
|||
HAS_NOVA = False
|
||||
|
||||
# Import Salt Cloud Libs
|
||||
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
|
||||
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
|
||||
import salt.utils.cloud
|
||||
import salt.utils.pycrypto as sup
|
||||
import salt.config as config
|
||||
|
@ -129,12 +129,6 @@ try:
|
|||
except ImportError:
|
||||
HAS_NETADDR = False
|
||||
|
||||
try:
|
||||
import requests # pylint: disable=unused-import
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
request_log = logging.getLogger('requests')
|
||||
|
@ -183,7 +177,6 @@ def get_dependencies():
|
|||
'libcloud': HAS_LIBCLOUD,
|
||||
'netaddr': HAS_NETADDR,
|
||||
'nova': HAS_NOVA,
|
||||
'requests': HAS_REQUESTS
|
||||
}
|
||||
return config.check_driver_dependencies(
|
||||
__virtualname__,
|
||||
|
@ -404,10 +397,11 @@ def destroy(name, conn=None, call=None):
|
|||
profile = None
|
||||
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
|
||||
profile = node.extra['metadata']['profile']
|
||||
|
||||
flush_mine_on_destroy = False
|
||||
if profile is not None and profile in profiles:
|
||||
if 'flush_mine_on_destroy' in profiles[profile]:
|
||||
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
|
||||
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
|
||||
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
|
||||
|
||||
if flush_mine_on_destroy:
|
||||
log.info('Clearing Salt Mine: {0}'.format(name))
|
||||
salt_client = salt.client.get_local_client(__opts__['conf_file'])
|
||||
|
@ -724,10 +718,9 @@ def create(vm_):
|
|||
if private_ip not in data.private_ips and not ignore_ip:
|
||||
result.append(private_ip)
|
||||
|
||||
if rackconnect(vm_) is True:
|
||||
if ssh_interface(vm_) != 'private_ips' or rackconnectv3:
|
||||
data.public_ips = access_ip
|
||||
return data
|
||||
if rackconnect(vm_) is True and (ssh_interface(vm_) != 'private_ips' or rackconnectv3):
|
||||
data.public_ips = access_ip
|
||||
return data
|
||||
|
||||
if cloudnetwork(vm_) is True:
|
||||
data.public_ips = access_ip
|
||||
|
|
|
@ -707,10 +707,9 @@ def create(vm_):
|
|||
if private_ip not in data.private_ips and not ignore_ip:
|
||||
result.append(private_ip)
|
||||
|
||||
if rackconnect(vm_) is True:
|
||||
if ssh_interface(vm_) != 'private_ips':
|
||||
data.public_ips = access_ip
|
||||
return data
|
||||
if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
|
||||
data.public_ips = access_ip
|
||||
return data
|
||||
|
||||
# populate return data with private_ips
|
||||
# when ssh_interface is set to private_ips and public_ips exist
|
||||
|
|
|
@ -414,9 +414,10 @@ def create(vm_):
|
|||
'''
|
||||
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
|
||||
for node in node_info:
|
||||
if node['id'] == response['id']:
|
||||
if 'passwords' in node['operatingSystem'] and len(node['operatingSystem']['passwords']) > 0:
|
||||
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
|
||||
if node['id'] == response['id'] and \
|
||||
'passwords' in node['operatingSystem'] and \
|
||||
len(node['operatingSystem']['passwords']) > 0:
|
||||
return node['operatingSystem']['passwords'][0]['username'], node['operatingSystem']['passwords'][0]['password']
|
||||
time.sleep(5)
|
||||
return False
|
||||
|
||||
|
|
|
@ -372,9 +372,10 @@ def create(vm_):
|
|||
'''
|
||||
node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask)
|
||||
for node in node_info:
|
||||
if node['id'] == response['id']:
|
||||
if 'passwords' in node['operatingSystem'] and len(node['operatingSystem']['passwords']) > 0:
|
||||
return node['operatingSystem']['passwords'][0]['password']
|
||||
if node['id'] == response['id'] \
|
||||
and 'passwords' in node['operatingSystem'] \
|
||||
and len(node['operatingSystem']['passwords']) > 0:
|
||||
return node['operatingSystem']['passwords'][0]['password']
|
||||
time.sleep(5)
|
||||
return False
|
||||
|
||||
|
|
|
@ -103,6 +103,8 @@ except ImportError:
|
|||
except ImportError:
|
||||
HAS_SIX = False
|
||||
|
||||
IP_RE = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -783,15 +785,14 @@ def _wait_for_ip(vm_ref, max_wait):
|
|||
if time_counter % 5 == 0:
|
||||
log.info("[ {0} ] Waiting to retrieve IPv4 information [{1} s]".format(vm_ref.name, time_counter))
|
||||
|
||||
if vm_ref.summary.guest.ipAddress:
|
||||
if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1':
|
||||
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
|
||||
return vm_ref.summary.guest.ipAddress
|
||||
if vm_ref.summary.guest.ipAddress and match(IP_RE, vm_ref.summary.guest.ipAddress) and vm_ref.summary.guest.ipAddress != '127.0.0.1':
|
||||
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
|
||||
return vm_ref.summary.guest.ipAddress
|
||||
|
||||
for net in vm_ref.guest.net:
|
||||
if net.ipConfig.ipAddress:
|
||||
for current_ip in net.ipConfig.ipAddress:
|
||||
if match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1':
|
||||
if match(IP_RE, current_ip.ipAddress) and current_ip.ipAddress != '127.0.0.1':
|
||||
log.info("[ {0} ] Successfully retrieved IPv4 information in {1} seconds".format(vm_ref.name, time_counter))
|
||||
return current_ip.ipAddress
|
||||
time.sleep(1.0 - ((time.time() - starttime) % 1.0))
|
||||
|
@ -2381,24 +2382,24 @@ def create(vm_):
|
|||
config=config_spec
|
||||
)
|
||||
|
||||
if devices and 'network' in list(devices.keys()):
|
||||
if "Windows" not in object_ref.config.guestFullName:
|
||||
global_ip = vim.vm.customization.GlobalIPSettings()
|
||||
if 'dns_servers' in list(vm_.keys()):
|
||||
global_ip.dnsServerList = vm_['dns_servers']
|
||||
if devices and 'network' in list(devices.keys()) and 'Windows' not in object_ref.config.guestFullName:
|
||||
global_ip = vim.vm.customization.GlobalIPSettings()
|
||||
|
||||
identity = vim.vm.customization.LinuxPrep()
|
||||
hostName = vm_name.split('.')[0]
|
||||
domainName = vm_name.split('.', 1)[-1]
|
||||
identity.hostName = vim.vm.customization.FixedName(name=hostName)
|
||||
identity.domain = domainName if hostName != domainName else domain
|
||||
if 'dns_servers' in list(vm_.keys()):
|
||||
global_ip.dnsServerList = vm_['dns_servers']
|
||||
|
||||
custom_spec = vim.vm.customization.Specification(
|
||||
globalIPSettings=global_ip,
|
||||
identity=identity,
|
||||
nicSettingMap=specs['nics_map']
|
||||
)
|
||||
clone_spec.customization = custom_spec
|
||||
identity = vim.vm.customization.LinuxPrep()
|
||||
hostName = vm_name.split('.')[0]
|
||||
domainName = vm_name.split('.', 1)[-1]
|
||||
identity.hostName = vim.vm.customization.FixedName(name=hostName)
|
||||
identity.domain = domainName if hostName != domainName else domain
|
||||
|
||||
custom_spec = vim.vm.customization.Specification(
|
||||
globalIPSettings=global_ip,
|
||||
identity=identity,
|
||||
nicSettingMap=specs['nics_map']
|
||||
)
|
||||
clone_spec.customization = custom_spec
|
||||
|
||||
if not template:
|
||||
clone_spec.powerOn = power
|
||||
|
|
|
@ -363,9 +363,11 @@ def wait_for_ip(vm_):
|
|||
Wait for the IP address to become available
|
||||
'''
|
||||
instance = show_instance(name=vm_['name'], call='action')
|
||||
if 'ip_address' in instance:
|
||||
if instance['ip_address'] is not None:
|
||||
return instance['ip_address']
|
||||
ip_addrs = instance.get('ip_address', None)
|
||||
|
||||
if ip_addrs is not None:
|
||||
return ip_addrs
|
||||
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
@ -514,10 +516,12 @@ def _get_instance_properties(instance, from_cache=True):
|
|||
for device in ret['devices']:
|
||||
if '_obj' in ret['devices'][device]:
|
||||
del ret['devices'][device]['_obj']
|
||||
|
||||
# TODO: this is a workaround because the net does not return mac...?
|
||||
if ret['mac_address'] is None:
|
||||
if 'macAddress' in ret['devices'][device]:
|
||||
ret['mac_address'] = ret['devices'][device]['macAddress']
|
||||
mac_address = ret.get('mac_address', None)
|
||||
if mac_address is None and 'macAddress' in ret['devices'][device]:
|
||||
ret['mac_address'] = ret['devices'][device]['macAddress']
|
||||
|
||||
ret['status'] = instance.get_status()
|
||||
ret['tools_status'] = instance.get_tools_status()
|
||||
|
||||
|
|
|
@ -343,10 +343,11 @@ def destroy(name, conn=None, call=None):
|
|||
profile = None
|
||||
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
|
||||
profile = node.extra['metadata']['profile']
|
||||
|
||||
flush_mine_on_destroy = False
|
||||
if profile is not None and profile in profiles:
|
||||
if 'flush_mine_on_destroy' in profiles[profile]:
|
||||
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
|
||||
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
|
||||
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
|
||||
|
||||
if flush_mine_on_destroy:
|
||||
log.info('Clearing Salt Mine: {0}'.format(name))
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue