mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
Merge pull request #31287 from joejulian/2015.8_30932_peer_probe_by_ip
Rework tests and fix reverse peering with gluster 3.7
This commit is contained in:
commit
5d31714b44
4 changed files with 475 additions and 135 deletions
|
@ -68,9 +68,10 @@ def _gluster_xml(cmd):
|
|||
|
||||
|
||||
def _etree_to_dict(t):
|
||||
if len(t.getchildren()) > 0:
|
||||
list_t = list(t)
|
||||
if len(list_t) > 0:
|
||||
d = {}
|
||||
for child in t.getchildren():
|
||||
for child in list_t:
|
||||
d[child.tag] = _etree_to_dict(child)
|
||||
else:
|
||||
d = t.text
|
||||
|
@ -117,7 +118,10 @@ def list_peers():
|
|||
|
||||
'''
|
||||
root = _gluster_xml('peer status')
|
||||
result = [x.find('hostname').text for x in _iter(root, 'peer')]
|
||||
result = {}
|
||||
for et_peer in _iter(root, 'peer'):
|
||||
result.update({et_peer.find('hostname').text: [
|
||||
x.text for x in _iter(et_peer.find('hostnames'), 'hostname')]})
|
||||
if len(result) == 0:
|
||||
return None
|
||||
else:
|
||||
|
@ -161,7 +165,7 @@ def peer(name):
|
|||
cmd = 'peer probe {0}'.format(name)
|
||||
|
||||
op_result = {
|
||||
"exitval": _gluster_xml(cmd).find('opRet').text,
|
||||
"exitval": _gluster_xml(cmd).find('opErrno').text,
|
||||
"output": _gluster_xml(cmd).find('output').text
|
||||
}
|
||||
return op_result
|
||||
|
@ -349,7 +353,7 @@ def info(name):
|
|||
for i, brick in enumerate(_iter(volume, 'brick'), start=1):
|
||||
brickkey = 'brick{0}'.format(i)
|
||||
bricks[brickkey] = {'path': brick.text}
|
||||
for child in brick.getchildren():
|
||||
for child in list(brick):
|
||||
if not child.tag == 'name':
|
||||
bricks[brickkey].update({child.tag: child.text})
|
||||
for k, v in brick.items():
|
||||
|
|
|
@ -11,9 +11,24 @@ import socket
|
|||
|
||||
# Import salt libs
|
||||
import salt.utils.cloud as suc
|
||||
from salt.exceptions import SaltCloudException
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
RESULT_CODES = [
|
||||
'Peer {0} added successfully.',
|
||||
'Probe on localhost not needed',
|
||||
'Host {0} is already in the peer group',
|
||||
'Host {0} is already part of another cluster',
|
||||
'Volume on {0} conflicts with existing volumes',
|
||||
'UUID of {0} is the same as local uuid',
|
||||
'{0} responded with "unknown peer". This could happen if {0} doesn\'t have localhost defined',
|
||||
'Failed to add peer. Information on {0}\'s logs',
|
||||
'Cluster quorum is not met. Changing peers is not allowed.',
|
||||
'Failed to update list of missed snapshots from {0}',
|
||||
'Conflict comparing list of snapshots from {0}',
|
||||
'Peer is already being detached from cluster.']
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
|
@ -48,38 +63,40 @@ def peered(name):
|
|||
'comment': '',
|
||||
'result': False}
|
||||
|
||||
peers = __salt__['glusterfs.list_peers']()
|
||||
|
||||
if peers:
|
||||
if name in peers:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Host {0} already peered'.format(name)
|
||||
return ret
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Peer {0} will be added.'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
if suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
try:
|
||||
suc.check_name(name, 'a-zA-Z0-9._-')
|
||||
except SaltCloudException as e:
|
||||
ret['comment'] = 'Invalid characters in peer name.'
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
if 'output' in __salt__['glusterfs.peer'](name):
|
||||
ret['comment'] = __salt__['glusterfs.peer'](name)['output']
|
||||
else:
|
||||
ret['comment'] = ''
|
||||
peers = __salt__['glusterfs.list_peers']()
|
||||
|
||||
if peers:
|
||||
if name in peers or any([name in peers[x] for x in peers]):
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Host {0} already peered'.format(name)
|
||||
return ret
|
||||
|
||||
result = __salt__['glusterfs.peer'](name)
|
||||
ret['comment'] = ''
|
||||
if 'exitval' in result:
|
||||
if int(result['exitval']) <= len(RESULT_CODES):
|
||||
ret['comment'] = RESULT_CODES[int(result['exitval'])].format(name)
|
||||
else:
|
||||
if 'comment' in result:
|
||||
ret['comment'] = result['comment']
|
||||
|
||||
newpeers = __salt__['glusterfs.list_peers']()
|
||||
#if newpeers was null, we know something didn't work.
|
||||
if newpeers and name in newpeers:
|
||||
# if newpeers was null, we know something didn't work.
|
||||
if newpeers and name in newpeers or any([name in newpeers[x] for x in newpeers]):
|
||||
ret['result'] = True
|
||||
ret['changes'] = {'new': newpeers, 'old': peers}
|
||||
#In case the hostname doesn't have any periods in it
|
||||
# In case the hostname doesn't have any periods in it
|
||||
elif name == socket.gethostname():
|
||||
ret['result'] = True
|
||||
return ret
|
||||
#In case they have a hostname like "example.com"
|
||||
# In case they have a hostname like "example.com"
|
||||
elif name == socket.gethostname().split('.')[0]:
|
||||
ret['result'] = True
|
||||
return ret
|
||||
|
@ -234,9 +251,9 @@ def add_volume_bricks(name, bricks):
|
|||
- host2:/srv/gluster/drive3
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': False}
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': False}
|
||||
|
||||
current_bricks = __salt__['glusterfs.status'](name)
|
||||
|
||||
|
@ -257,7 +274,8 @@ def add_volume_bricks(name, bricks):
|
|||
old_bricks = current_bricks
|
||||
new_bricks = __salt__['glusterfs.status'](name)
|
||||
ret['result'] = True
|
||||
ret['changes'] = {'new': list(new_bricks['bricks'].keys()), 'old': list(old_bricks['bricks'].keys())}
|
||||
ret['changes'] = {'new': list(new_bricks['bricks'].keys()), 'old': list(
|
||||
old_bricks['bricks'].keys())}
|
||||
return ret
|
||||
|
||||
if 'Bricks already in volume' in add_bricks:
|
||||
|
|
|
@ -24,12 +24,176 @@ from salt.exceptions import CommandExecutionError, SaltInvocationError
|
|||
# Globals
|
||||
glusterfs.__salt__ = {}
|
||||
|
||||
|
||||
class GlusterResults(object):
|
||||
''' This class holds the xml results from gluster cli transactions '''
|
||||
|
||||
class v34(object):
|
||||
''' This is for version 3.4 results '''
|
||||
|
||||
class list_peers(object):
|
||||
''' results from "peer status" '''
|
||||
pass
|
||||
|
||||
class peer_probe(object):
|
||||
fail_cant_connect = fail_bad_hostname = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||
'<cliOutput>',
|
||||
' <opRet>-1</opRet>',
|
||||
' <opErrno>107</opErrno>',
|
||||
' <opErrstr>Probe returned with unknown errno 107</opErrstr>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
|
||||
success_self = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>1</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success: on localhost not needed</output>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
success_other = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>0</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success</output>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
success_hostname_after_ip = success_other
|
||||
success_ip_after_hostname = success_other
|
||||
success_already_peer = {
|
||||
'ip': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success: host 10.0.0.2 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
'']),
|
||||
'hostname': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success: host server2 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
''])}
|
||||
success_reverse_already_peer = {
|
||||
'ip': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success: host 10.0.0.1 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
'']),
|
||||
'hostname': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr>(null)</opErrstr>',
|
||||
' <output>success: host server1 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
''])}
|
||||
success_first_hostname_from_second_first_time = success_other
|
||||
success_first_hostname_from_second_second_time = success_reverse_already_peer[
|
||||
'hostname']
|
||||
success_first_ip_from_second_first_time = success_reverse_already_peer[
|
||||
'ip']
|
||||
|
||||
class v37(object):
|
||||
|
||||
class peer_probe(object):
|
||||
fail_cant_connect = fail_bad_hostname = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||
'<cliOutput>',
|
||||
' <opRet>-1</opRet>',
|
||||
' <opErrno>107</opErrno>',
|
||||
' <opErrstr>Probe returned with Transport endpoint is not connected</opErrstr>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
success_self = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>1</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output>Probe on localhost not needed</output>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
success_other = '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>0</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output/>',
|
||||
'</cliOutput>',
|
||||
''])
|
||||
success_hostname_after_ip = success_other
|
||||
success_ip_after_hostname = success_other
|
||||
success_already_peer = {
|
||||
'ip': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output>Host 10.0.0.2 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
'']),
|
||||
'hostname': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output>Host server2 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
''])}
|
||||
success_reverse_already_peer = {
|
||||
'ip': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output>Host 10.0.0.1 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
'']),
|
||||
'hostname': '\n'.join([
|
||||
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
|
||||
' <cliOutput>',
|
||||
' <opRet>0</opRet>',
|
||||
' <opErrno>2</opErrno>',
|
||||
' <opErrstr/>',
|
||||
' <output>Host server1 port 24007 already in peer list</output>',
|
||||
'</cliOutput>',
|
||||
''])}
|
||||
success_first_hostname_from_second_first_time = success_reverse_already_peer[
|
||||
'hostname']
|
||||
success_first_ip_from_second_first_time = success_other
|
||||
success_first_ip_from_second_second_time = success_reverse_already_peer[
|
||||
'ip']
|
||||
|
||||
xml_peer_present = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>0</opRet>
|
||||
<peer>
|
||||
<hostname>node02</hostname>
|
||||
<hostnames>
|
||||
<hostname>node02.domain.dom</hostname>
|
||||
<hostname>10.0.0.2</hostname>
|
||||
</hostnames>
|
||||
</peer>
|
||||
</cliOutput>
|
||||
"""
|
||||
|
@ -147,45 +311,6 @@ xml_volume_info_stopped = """
|
|||
</cliOutput>
|
||||
"""
|
||||
|
||||
xml_peer_probe_success = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>0</opRet>
|
||||
<opErrno>0</opErrno>
|
||||
<opErrstr/>
|
||||
<output/>
|
||||
</cliOutput>
|
||||
"""
|
||||
|
||||
xml_peer_probe_fail_already_member = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>0</opRet>
|
||||
<opErrno>2</opErrno>
|
||||
<opErrstr/>
|
||||
<output>Host salt port 24007 already in peer list</output>
|
||||
</cliOutput>
|
||||
"""
|
||||
|
||||
xml_peer_probe_fail_localhost = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>0</opRet>
|
||||
<opErrno>1</opErrno>
|
||||
<opErrstr/>
|
||||
<output>Probe on localhost not needed</output>
|
||||
</cliOutput>
|
||||
"""
|
||||
|
||||
xml_peer_probe_fail_cant_connect = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
<opRet>-1</opRet>
|
||||
<opErrno>107</opErrno>
|
||||
<opErrstr>Probe returned with Transport endpoint is not connected</opErrstr>
|
||||
</cliOutput>
|
||||
"""
|
||||
|
||||
xml_command_success = """
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<cliOutput>
|
||||
|
@ -211,13 +336,16 @@ class GlusterfsTestCase(TestCase):
|
|||
'''
|
||||
# 'list_peers' function tests: 1
|
||||
|
||||
maxDiff = None
|
||||
|
||||
def test_list_peers(self):
|
||||
'''
|
||||
Test if it return a list of gluster peers
|
||||
'''
|
||||
mock = MagicMock(return_value=xml_peer_present)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertListEqual(glusterfs.list_peers(), ['node02'])
|
||||
self.assertDictEqual(glusterfs.list_peers(), {
|
||||
'node02': ['node02.domain.dom', '10.0.0.2']})
|
||||
|
||||
mock = MagicMock(return_value=xml_command_success)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
|
@ -230,29 +358,139 @@ class GlusterfsTestCase(TestCase):
|
|||
Test if it adds another node into the peer list.
|
||||
'''
|
||||
|
||||
# Peers can be added successfully, already present, be the localhost, or not be connected.
|
||||
mock = MagicMock(return_value=xml_peer_probe_success)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('salt'),
|
||||
{'exitval': '0', 'output': None})
|
||||
|
||||
mock = MagicMock(return_value=xml_peer_probe_fail_already_member)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('salt'),
|
||||
{'exitval': '0', 'output': 'Host salt port 24007 already in peer list'})
|
||||
|
||||
mock = MagicMock(return_value=xml_peer_probe_fail_localhost)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('salt'),
|
||||
{'exitval': '0', 'output': 'Probe on localhost not needed'})
|
||||
|
||||
mock = MagicMock(return_value=xml_peer_probe_fail_cant_connect)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(CommandExecutionError, glusterfs.peer, 'salt')
|
||||
|
||||
# invalid characters
|
||||
mock = MagicMock(return_value=True)
|
||||
with patch.object(suc, 'check_name', mock):
|
||||
self.assertRaises(SaltInvocationError, glusterfs.peer, 'a')
|
||||
# version 3.4
|
||||
# by hostname
|
||||
# peer failed unknown hostname
|
||||
# peer failed can't connect
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.fail_cant_connect)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2')
|
||||
# peer self
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_self)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server1'),
|
||||
{'exitval': '1', 'output': 'success: on localhost not needed'})
|
||||
# peer added
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_other)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server2'),
|
||||
{'exitval': '0', 'output': 'success'})
|
||||
# peer already member
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server2'),
|
||||
{'exitval': '2', 'output': 'success: host server2 port 24007 already in peer list'})
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_already_peer['ip'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.2'),
|
||||
{'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'})
|
||||
# peer in reverse (probe server1 from server2)
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_first_time)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server1'),
|
||||
{'exitval': '0', 'output': 'success'})
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_first_hostname_from_second_second_time)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server1'),
|
||||
{'exitval': '2', 'output': 'success: host server1 port 24007 already in peer list'})
|
||||
# peer in reverse using ip address instead of hostname
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_reverse_already_peer['ip'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.1'),
|
||||
{'exitval': '2', 'output': 'success: host 10.0.0.1 port 24007 already in peer list'})
|
||||
# by ip address
|
||||
# peer self
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_self)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.1'),
|
||||
{'exitval': '1', 'output': 'success: on localhost not needed'})
|
||||
# peer added
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_other)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.2'),
|
||||
{'exitval': '0', 'output': 'success'})
|
||||
# peer already member
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_already_peer['ip'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.2'),
|
||||
{'exitval': '2', 'output': 'success: host 10.0.0.2 port 24007 already in peer list'})
|
||||
# version 3.7
|
||||
# peer failed unknown hostname
|
||||
# peer failed can't connect
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.fail_cant_connect)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(CommandExecutionError, glusterfs.peer, 'server2')
|
||||
# peer self
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_self)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server1'),
|
||||
{'exitval': '1', 'output': 'Probe on localhost not needed'})
|
||||
# peer added
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_other)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server2'),
|
||||
{'exitval': '0', 'output': None})
|
||||
# peer already member
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server2'),
|
||||
{'exitval': '2', 'output': 'Host server2 port 24007 already in peer list'})
|
||||
# peer in reverse
|
||||
# by ip address
|
||||
# peer added
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_other)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.2'),
|
||||
{'exitval': '0', 'output': None})
|
||||
# peer already member
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_already_peer['ip'])
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.2'),
|
||||
{'exitval': '2', 'output': 'Host 10.0.0.2 port 24007 already in peer list'})
|
||||
# peer self
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_self)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.1'),
|
||||
{'exitval': '1', 'output': 'Probe on localhost not needed'})
|
||||
# peer in reverse (probe server1 from server2)
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_first_hostname_from_second_first_time)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('server1'),
|
||||
{'exitval': '2', 'output': 'Host server1 port 24007 already in peer list'})
|
||||
# peer in reverse using ip address instead of hostname first time
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.1'),
|
||||
{'exitval': '0', 'output': None})
|
||||
mock = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertEqual(glusterfs.peer('10.0.0.1'),
|
||||
{'exitval': '2', 'output': 'Host 10.0.0.1 port 24007 already in peer list'})
|
||||
|
||||
# 'create' function tests: 1
|
||||
|
||||
|
@ -263,17 +501,31 @@ class GlusterfsTestCase(TestCase):
|
|||
mock = MagicMock(return_value='')
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(
|
||||
SaltInvocationError, glusterfs.create, 'newvolume', 'host1:brick')
|
||||
SaltInvocationError,
|
||||
glusterfs.create,
|
||||
'newvolume',
|
||||
'host1:brick')
|
||||
|
||||
mock = MagicMock(return_value='')
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(
|
||||
SaltInvocationError, glusterfs.create, 'newvolume', 'host1/brick')
|
||||
SaltInvocationError,
|
||||
glusterfs.create,
|
||||
'newvolume',
|
||||
'host1/brick')
|
||||
|
||||
mock = MagicMock(return_value=xml_command_fail)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(CommandExecutionError, glusterfs.create, 'newvolume', 'host1:/brick',
|
||||
True, True, True, 'tcp', True)
|
||||
self.assertRaises(
|
||||
CommandExecutionError,
|
||||
glusterfs.create,
|
||||
'newvolume',
|
||||
'host1:/brick',
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
'tcp',
|
||||
True)
|
||||
|
||||
mock = MagicMock(return_value=xml_command_success)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
|
@ -440,7 +692,10 @@ class GlusterfsTestCase(TestCase):
|
|||
# volume exists, should not be stopped, and is started
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(
|
||||
SaltInvocationError, glusterfs.delete, 'Newvolume1', False)
|
||||
SaltInvocationError,
|
||||
glusterfs.delete,
|
||||
'Newvolume1',
|
||||
False)
|
||||
|
||||
# volume exists, should be stopped, and is started
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
|
@ -465,7 +720,10 @@ class GlusterfsTestCase(TestCase):
|
|||
mock = MagicMock(return_value=xml_command_fail)
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
self.assertRaises(
|
||||
CommandExecutionError, glusterfs.add_volume_bricks, 'Newvolume1', ['bricks'])
|
||||
CommandExecutionError,
|
||||
glusterfs.add_volume_bricks,
|
||||
'Newvolume1',
|
||||
['bricks'])
|
||||
|
||||
ret = '1 bricks successfully added to the volume Newvolume1'
|
||||
# volume does exist
|
||||
|
@ -483,11 +741,17 @@ class GlusterfsTestCase(TestCase):
|
|||
# ... and the added brick does exist
|
||||
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
|
||||
# As a list
|
||||
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', ['bricks']),
|
||||
'Bricks already in volume Newvolume1')
|
||||
self.assertEqual(
|
||||
glusterfs.add_volume_bricks(
|
||||
'Newvolume1',
|
||||
['bricks']),
|
||||
'Bricks already in volume Newvolume1')
|
||||
# As a string
|
||||
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', 'bricks'),
|
||||
'Bricks already in volume Newvolume1')
|
||||
self.assertEqual(
|
||||
glusterfs.add_volume_bricks(
|
||||
'Newvolume1',
|
||||
'bricks'),
|
||||
'Bricks already in volume Newvolume1')
|
||||
# And empty list
|
||||
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []),
|
||||
'Bricks already in volume Newvolume1')
|
||||
|
|
|
@ -19,10 +19,13 @@ ensure_in_syspath('../../')
|
|||
|
||||
# Import Salt Libs
|
||||
from salt.states import glusterfs
|
||||
import salt.utils.cloud
|
||||
import socket
|
||||
from tests.unit.modules.glusterfs_test import GlusterResults
|
||||
import salt.modules.glusterfs as mod_glusterfs
|
||||
|
||||
glusterfs.__salt__ = {}
|
||||
import salt.utils.cloud
|
||||
|
||||
import salt.modules.glusterfs as mod_glusterfs
|
||||
glusterfs.__salt__ = {'glusterfs.peer': mod_glusterfs.peer}
|
||||
glusterfs.__opts__ = {}
|
||||
|
||||
|
||||
|
@ -37,47 +40,93 @@ class GlusterfsTestCase(TestCase):
|
|||
'''
|
||||
Test to verify if node is peered.
|
||||
'''
|
||||
name = 'salt'
|
||||
name = 'server1'
|
||||
other_name = 'server1'
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'comment': '',
|
||||
'changes': {}}
|
||||
|
||||
mock = MagicMock(side_effect=[[name], [''], [], [], [], [], [], [],
|
||||
[name]])
|
||||
mock_lst = MagicMock(return_value=[])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock,
|
||||
'glusterfs.peer': mock_lst}):
|
||||
comt = ('Host {0} already peered'.format(name))
|
||||
ret.update({'comment': comt})
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
|
||||
with patch.dict(glusterfs.__opts__, {'test': True}):
|
||||
comt = ('Peer {0} will be added.'.format(name))
|
||||
ret.update({'comment': comt, 'result': None})
|
||||
# probe new peer server2 under gluster 3.4.x
|
||||
comt = ('Peer {0} added successfully.'.format(name))
|
||||
ret.update({'comment': comt, 'result': True,
|
||||
'changes': {'new': {name: []}, 'old': {}}})
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_other)
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{}, {name: []}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
|
||||
with patch.object(salt.utils.cloud, 'check_name',
|
||||
MagicMock(return_value=True)):
|
||||
comt = ('Invalid characters in peer name.')
|
||||
ret.update({'comment': comt, 'result': False})
|
||||
# probe new peer server2 under gluster 3.7.x
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_other)
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{}, {name: []}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
|
||||
with patch.object(socket, 'gethostname',
|
||||
MagicMock(side_effect=[name, 'salt.host',
|
||||
'salt.host'])):
|
||||
ret.update({'comment': '', 'result': True})
|
||||
# probe already existing server2 under gluster 3.4.x
|
||||
comt = ('Host {0} already peered'.format(name))
|
||||
ret.update({'comment': comt, 'changes': {}})
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname'])
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{name: []}, {name: []}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
|
||||
comt = ('Host {0} already peered'.format(name))
|
||||
ret.update({'comment': '', 'result': True})
|
||||
# probe already existing server2 under gluster 3.7.x
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname'])
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{name: []}, {name: []}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
|
||||
comt = ('Host {0} already peered'.format(name))
|
||||
ret.update({'comment': '', 'result': True,
|
||||
'changes': {'new': ['salt'], 'old': []}})
|
||||
self.assertDictEqual(glusterfs.peered(name), ret)
|
||||
# Issue 30932: Peering an existing server by IP fails with gluster 3.7+
|
||||
#
|
||||
# server2 was probed by address, 10.0.0.2. Under 3.4, server1 would be
|
||||
# known as 10.0.0.1 but starting with 3.7, its hostname of server1 would be
|
||||
# known instead. Subsequent probing of server1 by server2 used to result in
|
||||
# "success_already_peer" but now it should succeed in adding an alternate
|
||||
# hostname entry.
|
||||
|
||||
name = 'server1'
|
||||
ip = '10.0.0.1'
|
||||
comt = ('Host {0} already peered'.format(ip))
|
||||
ret.update({'name': ip, 'comment': comt, 'changes': {}})
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v34.peer_probe.success_first_ip_from_second_first_time)
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{ip: []}, {ip: []}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(ip), ret)
|
||||
|
||||
comt = ('Peer {0} added successfully.'.format(ip))
|
||||
ret.update({'name': ip, 'comment': comt, 'changes': {
|
||||
'old': {name: []}, 'new': {name: [ip]}}})
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time)
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{name: []}, {name: [ip]}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(ip), ret)
|
||||
|
||||
comt = ('Host {0} already peered'.format(ip))
|
||||
ret.update({'name': ip, 'comment': comt, 'changes': {}})
|
||||
mock_xml = MagicMock(
|
||||
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time)
|
||||
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
|
||||
mock = MagicMock(side_effect=[{name: [ip]}, {name: [ip]}])
|
||||
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
|
||||
self.assertDictEqual(glusterfs.peered(ip), ret)
|
||||
|
||||
# test for invalid characters
|
||||
comt = ('Invalid characters in peer name.')
|
||||
ret.update({'name': '#badhostname', 'comment': comt, 'result': False})
|
||||
self.assertDictEqual(glusterfs.peered('#badhostname'), ret)
|
||||
|
||||
# 'created' function tests: 1
|
||||
|
||||
|
@ -178,20 +227,25 @@ class GlusterfsTestCase(TestCase):
|
|||
{'glusterfs.status': mock,
|
||||
'glusterfs.add_volume_bricks': mock_t}):
|
||||
ret.update({'comment': 'does not exist'})
|
||||
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
self.assertDictEqual(
|
||||
glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
|
||||
ret.update({'comment': 'is not started'})
|
||||
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
self.assertDictEqual(
|
||||
glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
|
||||
ret.update({'comment': 'bricks successfully added', 'result': True,
|
||||
'changes': {'new': ['host1'], 'old': ['host1']}})
|
||||
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
self.assertDictEqual(
|
||||
glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
|
||||
ret.update({'comment': 'Bricks already in volume', 'changes': {}})
|
||||
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
self.assertDictEqual(
|
||||
glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
|
||||
ret.update({'comment': '', 'result': False})
|
||||
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
self.assertDictEqual(
|
||||
glusterfs.add_volume_bricks(name, bricks), ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
Loading…
Add table
Reference in a new issue