Merge branch 'nitrogen' into 'develop'

No conflicts.
This commit is contained in:
rallytime 2017-04-27 10:37:05 -06:00
commit a876bcdb8a
13 changed files with 452 additions and 741 deletions

View file

@ -30,7 +30,6 @@ Full list of Salt Cloud modules
proxmox
pyrax
qingcloud
rackspace
saltify
scaleway
softlayer

View file

@ -1,6 +0,0 @@
===========================
salt.cloud.clouds.rackspace
===========================
.. automodule:: salt.cloud.clouds.rackspace
:members:

View file

@ -3,13 +3,7 @@ Getting Started With Rackspace
==============================
Rackspace is a major public cloud platform which may be configured using either
the `rackspace` or the `openstack` driver, depending on your needs.
Please note that the `rackspace` driver is intended only for 1st gen instances,
aka, "the old cloud" at Rackspace. It is required for 1st gen instances, but
will *not* work with OpenStack-based instances. Unless you explicitly have a
reason to use it, it is highly recommended that you use the `openstack` driver
instead.
the `openstack` driver.
Dependencies
@ -50,22 +44,6 @@ To use the `openstack` driver (recommended), set up the cloud configuration at
driver: openstack
To use the `rackspace` driver, set up the cloud configuration at
``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/rackspace.conf``:
.. code-block:: yaml
my-rackspace-config:
driver: rackspace
# The Rackspace login user
user: fred
# The Rackspace user's apikey
apikey: 901d3f579h23c8v73q9
The settings that follow are for using Rackspace with the `openstack` driver,
and will not work with the `rackspace` driver.
.. note::
.. versionchanged:: 2015.8.0

View file

@ -133,6 +133,60 @@ def get_dependencies():
)
def _query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action')
running = (node['state'] == NodeState.RUNNING)
log.debug('Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(node['name']), node['state'])
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return running
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning('Private IPs returned, but not public. Checking for misidentified IPs.')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('Contents of the node data:')
log.debug(data)
def create(vm_):
'''
Create a single VM from a data dict
@ -220,69 +274,9 @@ def create(vm_):
)
return False
def __query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action')
running = (node['state'] == NodeState.RUNNING)
log.debug(
'Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'],
pprint.pformat(node['name']),
node['state']
)
except Exception as err:
log.error(
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning(
'Private IPs returned, but not public... Checking for '
'misidentified IPs'
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
else:
log.warning('%s is a private IP', private_ip)
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
log.debug('DATA')
log.debug(data)
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),

View file

@ -764,6 +764,145 @@ def request_instance(vm_=None, call=None):
return data, vm_
def _query_node_data(vm_, data, conn):
try:
node = show_instance(vm_['name'], 'action')
log.debug('Loaded node data for {0}:'
'\n{1}'.format(vm_['name'], pprint.pformat(node)))
except Exception as err:
# Show the traceback if the debug logging level is enabled
log.error('Failed to get nodes list: {0}'.format(err),
exc_info_on_loglevel=logging.DEBUG)
# Trigger a failure in the wait for IP function
return False
running = node['state'] == 'ACTIVE'
if not running:
# Still not running, trigger another iteration
return
if rackconnect(vm_) is True:
extra = node.get('extra', {})
rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '')
if rc_status != 'DEPLOYED':
log.debug('Waiting for Rackconnect automation to complete')
return
if managedcloud(vm_) is True:
extra = conn.server_show_libcloud(node['id']).extra
mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '')
if mc_status != 'Complete':
log.debug('Waiting for managed cloud automation to complete')
return
access_ip = node.get('extra', {}).get('access_ip', '')
rcv3 = rackconnectv3(vm_) in node['addresses']
sshif = ssh_interface(vm_) in node['addresses']
if any((rcv3, sshif)):
networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_)
for network in node['addresses'].get(networkname, []):
if network['version'] is 4:
access_ip = network['addr']
break
vm_['cloudnetwork'] = True
# Conditions to pass this
#
# Rackconnect v2: vm_['rackconnect'] = True
# If this is True, then the server will not be accessible from the ipv4 addres in public_ips.
# That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the
# server. In this case we can use the private_ips for ssh_interface, or the access_ip.
#
# Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork>
# If this is the case, salt will need to use the cloud network to login to the server. There
# is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud
# also cannot use the private_ips, because that traffic is dropped at the hypervisor.
#
# CloudNetwork: vm['cloudnetwork'] = True
# If this is True, then we should have an access_ip at this point set to the ip on the cloud
# network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will
# use the initial access_ip, and not overwrite anything.
if (any((cloudnetwork(vm_), rackconnect(vm_)))
and (ssh_interface(vm_) != 'private_ips' or rcv3)
and access_ip != ''):
data.public_ips = [access_ip]
return data
result = []
if ('private_ips' not in node
and 'public_ips' not in node
and 'floating_ips' not in node
and 'fixed_ips' not in node
and 'access_ip' in node.get('extra', {})):
result = [node['extra']['access_ip']]
private = node.get('private_ips', [])
public = node.get('public_ips', [])
fixed = node.get('fixed_ips', [])
floating = node.get('floating_ips', [])
if private and not public:
log.warning('Private IPs returned, but not public. '
'Checking for misidentified IPs')
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
log.warning('Public IP address was not ready when we last checked. '
'Appending public IP address now.')
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
non_private_ips = []
if public:
data.public_ips = public
if ssh_interface(vm_) == 'public_ips':
non_private_ips.append(public)
if floating:
data.floating_ips = floating
if ssh_interface(vm_) == 'floating_ips':
non_private_ips.append(floating)
if fixed:
data.fixed_ips = fixed
if ssh_interface(vm_) == 'fixed_ips':
non_private_ips.append(fixed)
if non_private_ips:
log.debug('result = {0}'.format(non_private_ips))
data.private_ips = result
if ssh_interface(vm_) != 'private_ips':
return data
if result:
log.debug('result = {0}'.format(result))
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
def create(vm_):
'''
Create a single VM from a data dict
@ -825,162 +964,10 @@ def create(vm_):
# Pull the instance ID, valid for both spot and normal instances
vm_['instance_id'] = data.id
def __query_node_data(vm_, data):
try:
node = show_instance(vm_['name'], 'action')
log.debug(
'Loaded node data for {0}:\n{1}'.format(
vm_['name'],
pprint.pformat(node)
)
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
running = node['state'] == 'ACTIVE'
if not running:
# Still not running, trigger another iteration
return
if rackconnect(vm_) is True:
extra = node.get('extra', {})
rc_status = extra.get('metadata', {}).get(
'rackconnect_automation_status', '')
if rc_status != 'DEPLOYED':
log.debug('Waiting for Rackconnect automation to complete')
return
if managedcloud(vm_) is True:
extra = conn.server_show_libcloud(
node['id']
).extra
mc_status = extra.get('metadata', {}).get(
'rax_service_level_automation', '')
if mc_status != 'Complete':
log.debug('Waiting for managed cloud automation to complete')
return
access_ip = node.get('extra', {}).get('access_ip', '')
rcv3 = rackconnectv3(vm_) in node['addresses']
sshif = ssh_interface(vm_) in node['addresses']
if any((rcv3, sshif)):
networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_)
for network in node['addresses'].get(networkname, []):
if network['version'] is 4:
access_ip = network['addr']
break
vm_['cloudnetwork'] = True
# Conditions to pass this
#
# Rackconnect v2: vm_['rackconnect'] = True
# If this is True, then the server will not be accessible from the ipv4 addres in public_ips.
# That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the
# server. In this case we can use the private_ips for ssh_interface, or the access_ip.
#
# Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork>
# If this is the case, salt will need to use the cloud network to login to the server. There
# is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud
# also cannot use the private_ips, because that traffic is dropped at the hypervisor.
#
# CloudNetwork: vm['cloudnetwork'] = True
# If this is True, then we should have an access_ip at this point set to the ip on the cloud
# network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will
# use the initial access_ip, and not overwrite anything.
if any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != '':
data.public_ips = [access_ip, ]
return data
result = []
if 'private_ips' not in node and 'public_ips' not in node and \
'floating_ips' not in node and 'fixed_ips' not in node and \
'access_ip' in node.get('extra', {}):
result = [node['extra']['access_ip']]
private = node.get('private_ips', [])
public = node.get('public_ips', [])
fixed = node.get('fixed_ips', [])
floating = node.get('floating_ips', [])
if private and not public:
log.warning(
'Private IPs returned, but not public... Checking for '
'misidentified IPs'
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
log.warning(
(
'Public IP address was not ready when we last'
' checked. Appending public IP address now.'
)
)
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
non_private_ips = []
if public:
data.public_ips = public
if ssh_interface(vm_) == 'public_ips':
non_private_ips.append(public)
if floating:
data.floating_ips = floating
if ssh_interface(vm_) == 'floating_ips':
non_private_ips.append(floating)
if fixed:
data.fixed_ips = fixed
if ssh_interface(vm_) == 'fixed_ips':
non_private_ips.append(fixed)
if non_private_ips:
log.debug('result = {0}'.format(non_private_ips))
data.private_ips = result
if ssh_interface(vm_) != 'private_ips':
return data
if result:
log.debug('result = {0}'.format(result))
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data),
_query_node_data,
update_args=(vm_, data, conn),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(

View file

@ -587,6 +587,119 @@ def request_instance(vm_=None, call=None):
return data, vm_
def _query_node_data(vm_, data, floating, conn):
try:
node = show_instance(vm_['name'], 'action')
log.debug(
'Loaded node data for {0}:\n{1}'.format(
vm_['name'],
pprint.pformat(node)
)
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
running = node['state'] == NodeState.RUNNING
if not running:
# Still not running, trigger another iteration
return
if rackconnect(vm_) is True:
check_libcloud_version((0, 14, 0), why='rackconnect: True')
extra = node.get('extra')
rc_status = extra.get('metadata', {}).get(
'rackconnect_automation_status', '')
access_ip = extra.get('access_ip', '')
if rc_status != 'DEPLOYED':
log.debug('Waiting for Rackconnect automation to complete')
return
if managedcloud(vm_) is True:
extra = node.get('extra')
mc_status = extra.get('metadata', {}).get(
'rax_service_level_automation', '')
if mc_status != 'Complete':
log.debug('Waiting for managed cloud automation to complete')
return
public = node['public_ips']
if floating:
try:
name = data.name
ip = floating[0].ip_address
conn.ex_attach_floating_ip_to_node(data, ip)
log.info(
'Attaching floating IP \'{0}\' to node \'{1}\''.format(
ip, name
)
)
data.public_ips.append(ip)
public = data.public_ips
except Exception:
# Note(pabelanger): Because we loop, we only want to attach the
# floating IP address one. So, expect failures if the IP is
# already attached.
pass
result = []
private = node['private_ips']
if private and not public:
log.warning(
'Private IPs returned, but not public... Checking for '
'misidentified IPs'
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
log.warning(
'Public IP address was not ready when we last checked.'
' Appending public IP address now.'
)
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
data.public_ips = access_ip
return data
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if result:
log.debug('result = {0}'.format(result))
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
def create(vm_):
'''
Create a single VM from a data dict
@ -652,122 +765,10 @@ def create(vm_):
# Pull the instance ID, valid for both spot and normal instances
vm_['instance_id'] = data.id
def __query_node_data(vm_, data, floating):
try:
node = show_instance(vm_['name'], 'action')
log.debug(
'Loaded node data for {0}:\n{1}'.format(
vm_['name'],
pprint.pformat(node)
)
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
running = node['state'] == NodeState.RUNNING
if not running:
# Still not running, trigger another iteration
return
if rackconnect(vm_) is True:
check_libcloud_version((0, 14, 0), why='rackconnect: True')
extra = node.get('extra')
rc_status = extra.get('metadata', {}).get(
'rackconnect_automation_status', '')
access_ip = extra.get('access_ip', '')
if rc_status != 'DEPLOYED':
log.debug('Waiting for Rackconnect automation to complete')
return
if managedcloud(vm_) is True:
extra = node.get('extra')
mc_status = extra.get('metadata', {}).get(
'rax_service_level_automation', '')
if mc_status != 'Complete':
log.debug('Waiting for managed cloud automation to complete')
return
public = node['public_ips']
if floating:
try:
name = data.name
ip = floating[0].ip_address
conn.ex_attach_floating_ip_to_node(data, ip)
log.info(
'Attaching floating IP \'{0}\' to node \'{1}\''.format(
ip, name
)
)
data.public_ips.append(ip)
public = data.public_ips
except Exception:
# Note(pabelanger): Because we loop, we only want to attach the
# floating IP address one. So, expect failures if the IP is
# already attached.
pass
result = []
private = node['private_ips']
if private and not public:
log.warning(
'Private IPs returned, but not public... Checking for '
'misidentified IPs'
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
log.warning(
'Public IP address was not ready when we last checked.'
' Appending public IP address now.'
)
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips':
data.public_ips = access_ip
return data
# populate return data with private_ips
# when ssh_interface is set to private_ips and public_ips exist
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
if result:
log.debug('result = {0}'.format(result))
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data, vm_['floating']),
_query_node_data,
update_args=(vm_, data, vm_['floating'], conn),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(

View file

@ -1,378 +0,0 @@
# -*- coding: utf-8 -*-
'''
Rackspace Cloud Module
======================
The Rackspace cloud module. This module uses the preferred means to set up a
libcloud based cloud module and should be used as the general template for
setting up additional libcloud based modules.
:depends: libcloud >= 0.13.2
Please note that the `rackspace` driver is only intended for 1st gen instances,
aka, "the old cloud" at Rackspace. It is required for 1st gen instances, but
will *not* work with OpenStack-based instances. Unless you explicitly have a
reason to use it, it is highly recommended that you use the `openstack` driver
instead.
The rackspace cloud module interfaces with the Rackspace public cloud service
and requires that two configuration parameters be set for use, ``user`` and
``apikey``.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/rackspace.conf``:
.. code-block:: yaml
my-rackspace-config:
driver: rackspace
# The Rackspace login user
user: fred
# The Rackspace user's apikey
apikey: 901d3f579h23c8v73q9
'''
# Import python libs
from __future__ import absolute_import
import logging
import socket
import pprint
# Import salt libs
import salt.utils
import salt.config as config
from salt.utils import namespaced_function
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401
import salt.utils.cloud
# Import Third Party Libs
try:
from libcloud.compute.base import NodeState
# See https://github.com/saltstack/salt/issues/32743
import libcloud.security
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'rackspace'
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
get_size = namespaced_function(get_size, globals())
get_image = namespaced_function(get_image, globals())
avail_locations = namespaced_function(avail_locations, globals())
avail_images = namespaced_function(avail_images, globals())
avail_sizes = namespaced_function(avail_sizes, globals())
script = namespaced_function(script, globals())
destroy = namespaced_function(destroy, globals())
list_nodes = namespaced_function(list_nodes, globals())
list_nodes_full = namespaced_function(list_nodes_full, globals())
list_nodes_select = namespaced_function(list_nodes_select, globals())
show_instance = namespaced_function(show_instance, globals())
# Only load in this module is the RACKSPACE configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for Rackspace configuration.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return True
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('user', 'apikey')
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'libcloud': HAS_LIBCLOUD}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
force_first_gen = config.get_cloud_config_value(
'force_first_gen',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
if force_first_gen:
log.info('Rackspace driver will only have access to first-gen images')
driver = get_driver(Provider.RACKSPACE_FIRST_GEN)
else:
driver = get_driver(Provider.RACKSPACE)
return driver(
config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
),
config.get_cloud_config_value(
'apikey',
get_configured_provider(),
__opts__,
search_global=False
),
region=config.get_cloud_config_value(
'compute_region',
get_configured_provider(),
__opts__,
search_global=False,
default='dfw'
).lower()
)
def preferred_ip(vm_, ips):
'''
Return the preferred Internet protocol. Either 'ipv4' (default) or 'ipv6'.
'''
proto = config.get_cloud_config_value(
'protocol', vm_, __opts__, default='ipv4', search_global=False
)
family = socket.AF_INET
if proto == 'ipv6':
family = socket.AF_INET6
for ip in ips:
try:
socket.inet_pton(family, ip)
return ip
except Exception:
continue
return False
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'rackspace',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
conn = get_conn()
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_)
}
event_kwargs = {
'name': kwargs['name'],
'image': kwargs['image'].name,
'size': kwargs['size'].name,
}
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', event_kwargs, event_kwargs.keys()),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating {0} on RACKSPACE\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], exc
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
def __query_node_data(vm_, data):
running = False
try:
node = show_instance(vm_['name'], 'action')
running = (node['state'] == NodeState.RUNNING)
log.debug(
'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format(
vm_['name'],
pprint.pformat(node['name']),
node['state']
)
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
if not running:
# Still not running, trigger another iteration
return
private = node['private_ips']
public = node['public_ips']
if private and not public:
log.warning(
'Private IPs returned, but not public... Checking for '
'misidentified IPs'
)
for private_ip in private:
private_ip = preferred_ip(vm_, [private_ip])
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
data.public_ips.append(private_ip)
else:
log.warning('{0} is a private IP'.format(private_ip))
if private_ip not in data.private_ips:
data.private_ips.append(private_ip)
if ssh_interface(vm_) == 'private_ips' and data.private_ips:
return data
if private:
data.private_ips = private
if ssh_interface(vm_) == 'private_ips':
return data
if public:
data.public_ips = public
if ssh_interface(vm_) != 'private_ips':
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=25 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
ip_address = preferred_ip(vm_, data.private_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address {0}'.format(ip_address))
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
if not ip_address:
raise SaltCloudSystemExit(
'No IP addresses could be found.'
)
vm_['salt_host'] = salt_ip_address
vm_['ssh_host'] = ip_address
vm_['password'] = data.extra['password']
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret

View file

@ -4696,6 +4696,9 @@ def manage_file(name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it

View file

@ -244,11 +244,16 @@ def _get_key_dir():
'''
return the location of the GPG key directory
'''
gpg_keydir = None
if 'config.get' in __salt__:
gpg_keydir = __salt__['config.get']('gpg_keydir')
else:
if not gpg_keydir:
gpg_keydir = __opts__.get('gpg_keydir')
return gpg_keydir or os.path.join(__opts__['config_dir'], 'gpgkeys')
if not gpg_keydir and 'config_dir' in __opts__:
gpg_keydir = os.path.join(__opts__['config_dir'], 'gpgkeys')
else:
gpg_keydir = os.path.join(os.path.split(__opts__['conf_file'])[0], 'gpgkeys')
return gpg_keydir
def _decrypt_ciphertext(cipher, translate_newlines=False):

View file

@ -1 +1,18 @@
# -*- coding: utf-8 -*-
def _preferred_ip(ip_set, preferred=None):
'''
Returns a function that reacts which ip is prefered
:param ip_set:
:param private:
:return:
'''
def _ip_decider(vm, ips):
for ip in ips:
if ip in preferred:
return ip
return False
return _ip_decider

View file

@ -24,6 +24,7 @@ from salt.utils.versions import LooseVersion
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch, __version__ as mock_version
from tests.unit.cloud.clouds import _preferred_ip
VM_NAME = 'winterfell'
@ -32,7 +33,7 @@ try:
if HAS_LIBCLOUD:
import certifi
libcloud.security.CA_CERTS_PATH.append(certifi.where())
except ImportError:
except (ImportError, NameError):
pass
@ -132,7 +133,7 @@ class DimensionDataTestCase(ExtendedTestCase, LoaderModuleMockMixin):
call='function'
)
@skipIf(HAS_LIBCLOUD is False, 'libcloud not found')
@skipIf(HAS_LIBCLOUD is False, "Install 'libcloud' to be able to run this unit test.")
def test_avail_sizes(self):
'''
Tests that avail_sizes returns an empty dictionary.
@ -163,3 +164,26 @@ class DimensionDataTestCase(ExtendedTestCase, LoaderModuleMockMixin):
"""
p = dimensiondata.get_configured_provider()
self.assertNotEqual(p, None)
def test_query_node_data_filter_preferred_ip_addresses(self):
'''
Test if query node data is filtering out unpreferred IP addresses.
'''
zero_ip = '0.0.0.0'
private_ips = [zero_ip, '1.1.1.1', '2.2.2.2']
vm = {'name': None}
data = MagicMock()
data.public_ips = []
dimensiondata.NodeState = MagicMock() # pylint: disable=blacklisted-unmocked-patching
dimensiondata.NodeState.RUNNING = True
with patch('salt.cloud.clouds.dimensiondata.show_instance',
MagicMock(return_value={'state': True,
'name': 'foo',
'public_ips': [],
'private_ips': private_ips})):
with patch('salt.cloud.clouds.dimensiondata.preferred_ip',
_preferred_ip(private_ips, [zero_ip])):
with patch('salt.cloud.clouds.dimensiondata.ssh_interface',
MagicMock(return_value='private_ips')):
self.assertEqual(dimensiondata._query_node_data(vm, data).public_ips, [zero_ip])

View file

@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.unit import TestCase
from tests.support.mock import MagicMock, patch
from tests.unit.cloud.clouds import _preferred_ip
# Import Salt libs
from salt.cloud.clouds import nova
class NovaTestCase(TestCase):
'''
Test case for openstack
'''
PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2']
@patch('salt.cloud.clouds.nova.show_instance',
MagicMock(return_value={'state': 'ACTIVE',
'public_ips': [],
'addresses': [],
'private_ips': PRIVATE_IPS}))
@patch('salt.cloud.clouds.nova.rackconnect', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.rackconnectv3', MagicMock(return_value={'mynet': ['1.1.1.1']}))
@patch('salt.cloud.clouds.nova.cloudnetwork', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.managedcloud', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0']))
@patch('salt.cloud.clouds.nova.ssh_interface', MagicMock(return_value='public_ips'))
def test_query_node_data_filter_preferred_ip_addresses(self):
'''
Test if query node data is filtering out unpreferred IP addresses.
'''
vm = {'name': None}
data = MagicMock()
data.public_ips = []
assert nova._query_node_data(vm, data, MagicMock()).public_ips == ['0.0.0.0']

View file

@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.unit import TestCase
from tests.support.mock import MagicMock, patch
from tests.unit.cloud.clouds import _preferred_ip
# Import Salt libs
from salt.cloud.clouds import openstack
class OpenstackTestCase(TestCase):
'''
Test case for openstack
'''
PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2']
@patch('salt.cloud.clouds.openstack.show_instance',
MagicMock(return_value={'state': True,
'public_ips': [],
'private_ips': PRIVATE_IPS}))
@patch('salt.cloud.clouds.openstack.rackconnect', MagicMock(return_value=False))
@patch('salt.cloud.clouds.openstack.managedcloud', MagicMock(return_value=False))
@patch('salt.cloud.clouds.openstack.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0']))
@patch('salt.cloud.clouds.openstack.ssh_interface', MagicMock(return_value=False))
def test_query_node_data_filter_preferred_ip_addresses(self):
'''
Test if query node data is filtering out unpreferred IP addresses.
'''
openstack.NodeState = MagicMock() # pylint: disable=blacklisted-unmocked-patching
openstack.NodeState.RUNNING = True
vm = {'name': None}
data = MagicMock()
data.public_ips = []
with patch('salt.utils.cloud.is_public_ip', MagicMock(return_value=True)):
assert openstack._query_node_data(vm, data, False, MagicMock()).public_ips == ['0.0.0.0']