mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Revert "cloud.clouds.ec2: cache each named node (#33164)"
This reverts commitb3805d825a
("cloud.clouds.ec2: cache each named node (#33164)"). Commit25771fc030
("_get_node return instance info directly, not a dict of instances") from 2016.3 feature release changed how the result of _get_node should be interpreted. Before it was like res = _get_node(...) node = res[name] and after the commit it became node = _get_node(...) Commitb3805d825a
("cloud.clouds.ec2: cache each named node (#33164)") submitted into 2015.8 as a bugfix for #33162 added loops over the keys of a dictionary returned by _get_node, fixing salt.utils.cloud.cache_node calls in queue_instances() and show_instance(). But after being merged into 2016.3 with merge commit679200aeb2
("Merge branch '2015.8' into '2016.3'"), the commit in question reintroduced the bug on 2016.3 because of changed return value of _get_node. Fixes #39782
This commit is contained in:
parent
4ee59be22c
commit
b71c3fe13c
1 changed files with 2 additions and 9 deletions
|
@ -2647,11 +2647,7 @@ def queue_instances(instances):
|
|||
'''
|
||||
for instance_id in instances:
|
||||
node = _get_node(instance_id=instance_id)
|
||||
for name in node:
|
||||
if instance_id == node[name]['instanceId']:
|
||||
__utils__['cloud.cache_node'](node[name],
|
||||
__active_provider_name__,
|
||||
__opts__)
|
||||
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
|
||||
|
||||
|
||||
def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
|
||||
|
@ -3200,10 +3196,7 @@ def show_instance(name=None, instance_id=None, call=None, kwargs=None):
|
|||
)
|
||||
|
||||
node = _get_node(name=name, instance_id=instance_id)
|
||||
for name in node:
|
||||
__utils__['cloud.cache_node'](node[name],
|
||||
__active_provider_name__,
|
||||
__opts__)
|
||||
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
|
||||
return node
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue