virt: convert volumes to disks for xen

The libvirt xen driver does not handle disk of 'volume' type. We thus
need to convert them into their equivalent using the 'file' or 'block'
type (issue #58333).
This commit is contained in:
Cédric Bosdonnat 2020-09-01 18:42:34 +02:00 committed by Daniel Wozniak
parent c798380a16
commit ec32aaa4ef
8 changed files with 474 additions and 60 deletions

1
changelog/58333.fixed Normal file
View file

@ -0,0 +1 @@
Convert disks of volume type to file or block disks on Xen

View file

@ -767,6 +767,62 @@ def _migrate(dom, dst_uri, **kwargs):
raise CommandExecutionError(err.get_error_message())
def _disk_from_pool(conn, pool, pool_xml, volume_name):
"""
Create a disk definition out of the pool XML and volume name.
The aim of this function is to replace the volume-based definition when not handled by libvirt.
It returns the disk Jinja context to be used when creating the VM
"""
pool_type = pool_xml.get("type")
disk_context = {}
# handle dir, fs and netfs
if pool_type in ["dir", "netfs", "fs"]:
disk_context["type"] = "file"
volume = pool.storageVolLookupByName(volume_name)
volume_xml = ElementTree.fromstring(volume.XMLDesc())
disk_context["source_file"] = volume_xml.find("./target/path").text
elif pool_type in ["logical", "disk", "iscsi", "scsi"]:
disk_context["type"] = "block"
disk_context["format"] = "raw"
volume = pool.storageVolLookupByName(volume_name)
volume_xml = ElementTree.fromstring(volume.XMLDesc())
disk_context["source_file"] = volume_xml.find("./target/path").text
elif pool_type in ["rbd", "gluster", "sheepdog"]:
# libvirt can't handle rbd, gluster and sheepdog as volumes
disk_context["type"] = "network"
disk_context["protocol"] = pool_type
# Copy the hosts from the pool definition
disk_context["hosts"] = [
{"name": host.get("name"), "port": host.get("port")}
for host in pool_xml.findall(".//host")
]
dir_node = pool_xml.find("./source/dir")
# Gluster and RBD need pool/volume name
name_node = pool_xml.find("./source/name")
if name_node is not None:
disk_context["volume"] = "{}/{}".format(name_node.text, volume_name)
# Copy the authentication if any for RBD
auth_node = pool_xml.find("./source/auth")
if auth_node is not None:
username = auth_node.get("username")
secret_node = auth_node.find("./secret")
usage = secret_node.get("usage")
if not usage:
# Get the usage from the UUID
uuid = secret_node.get("uuid")
usage = conn.secretLookupByUUIDString(uuid).usageID()
disk_context["auth"] = {
"type": "ceph",
"username": username,
"usage": usage,
}
return disk_context
def _gen_xml(
conn,
name,
@ -872,41 +928,16 @@ def _gen_xml(
elif disk.get("pool"):
disk_context["volume"] = disk["filename"]
# If we had no source_file, then we want a volume
pool_xml = ElementTree.fromstring(
conn.storagePoolLookupByName(disk["pool"]).XMLDesc()
)
pool = conn.storagePoolLookupByName(disk["pool"])
pool_xml = ElementTree.fromstring(pool.XMLDesc())
pool_type = pool_xml.get("type")
if pool_type in ["rbd", "gluster", "sheepdog"]:
# libvirt can't handle rbd, gluster and sheepdog as volumes
disk_context["type"] = "network"
disk_context["protocol"] = pool_type
# Copy the hosts from the pool definition
disk_context["hosts"] = [
{"name": host.get("name"), "port": host.get("port")}
for host in pool_xml.findall(".//host")
]
dir_node = pool_xml.find("./source/dir")
# Gluster and RBD need pool/volume name
name_node = pool_xml.find("./source/name")
if name_node is not None:
disk_context["volume"] = "{}/{}".format(
name_node.text, disk_context["volume"]
)
# Copy the authentication if any for RBD
auth_node = pool_xml.find("./source/auth")
if auth_node is not None:
username = auth_node.get("username")
secret_node = auth_node.find("./secret")
usage = secret_node.get("usage")
if not usage:
# Get the usage from the UUID
uuid = secret_node.get("uuid")
usage = conn.secretLookupByUUIDString(uuid).usageID()
disk_context["auth"] = {
"type": "ceph",
"username": username,
"usage": usage,
}
# TODO For Xen VMs convert all pool types (issue #58333)
if hypervisor == "xen" or pool_type in ["rbd", "gluster", "sheepdog"]:
disk_context.update(
_disk_from_pool(conn, pool, pool_xml, disk_context["volume"])
)
else:
if pool_type in ["disk", "logical"]:
# The volume format for these types doesn't match the driver format in the VM
@ -4261,7 +4292,10 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
directories.add(os.path.dirname(disks[disk]["file"]))
else:
# We may have a volume to delete here
matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"])
matcher = re.match(
"^(?P<pool>[^/]+)/(?P<volume>.*)$",
disks[disk]["file"],
)
if matcher:
pool_name = matcher.group("pool")
pool = None

View file

@ -0,0 +1,12 @@
{% macro network_source(disk) -%}
<source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
{%- for host in disk.get('hosts') %}
<host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
{%- endfor %}
{%- if disk.get("auth") %}
<auth username='{{ disk.auth.username }}'>
<secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
</auth>
{%- endif %}
</source>
{%- endmacro %}

View file

@ -1,3 +1,4 @@
{%- import 'libvirt_disks.jinja' as libvirt_disks -%}
<domain type='{{ hypervisor }}'>
<name>{{ name }}</name>
<vcpu>{{ cpu }}</vcpu>
@ -32,21 +33,13 @@
{% if disk.type == 'file' and 'source_file' in disk -%}
<source file='{{ disk.source_file }}' />
{% endif %}
{% if disk.type == 'block' -%}
<source dev='{{ disk.source_file }}' />
{% endif %}
{% if disk.type == 'volume' and 'pool' in disk -%}
<source pool='{{ disk.pool }}' volume='{{ disk.volume }}' />
{% endif %}
{%- if disk.type == 'network' %}
<source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
{%- for host in disk.get('hosts') %}
<host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
{%- endfor %}
{%- if disk.get("auth") %}
<auth username='{{ disk.auth.username }}'>
<secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
</auth>
{%- endif %}
</source>
{%- endif %}
{%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
<target dev='{{ disk.target_dev }}' bus='{{ disk.disk_bus }}' />
{% if disk.address -%}
<address type='drive' controller='0' bus='0' target='0' unit='{{ disk.index }}' />

View file

@ -0,0 +1,163 @@
import pytest
import salt.modules.config as config
import salt.modules.virt as virt
from salt._compat import ElementTree as ET
from tests.support.mock import MagicMock
class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
"""
Libvirt library mock
"""
class virDomain(MagicMock):
"""
virDomain mock
"""
class libvirtError(Exception):
"""
libvirtError mock
"""
def __init__(self, msg):
super().__init__(msg)
self.msg = msg
def get_error_message(self):
return self.msg
class MappedResultMock(MagicMock):
"""
Mock class consistently return the same mock object based on the first argument.
"""
_instances = {}
def __init__(self):
def mapped_results(*args, **kwargs):
if args[0] not in self._instances.keys():
raise virt.libvirt.libvirtError("Not found: {}".format(args[0]))
return self._instances[args[0]]
super().__init__(side_effect=mapped_results)
def add(self, name):
self._instances[name] = MagicMock()
@pytest.fixture(autouse=True)
def setup_loader(request):
# Create libvirt mock and connection mock
mock_libvirt = LibvirtMock()
mock_conn = MagicMock()
mock_conn.getStoragePoolCapabilities.return_value = "<storagepoolCapabilities/>"
mock_libvirt.openAuth.return_value = mock_conn
setup_loader_modules = {
virt: {
"libvirt": mock_libvirt,
"__salt__": {"config.get": config.get, "config.option": config.option},
},
config: {},
}
with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
yield loader_mock
@pytest.fixture
def make_mock_vm():
def _make_mock_vm(xml_def):
mocked_conn = virt.libvirt.openAuth.return_value
doc = ET.fromstring(xml_def)
name = doc.find("name").text
os_type = "hvm"
os_type_node = doc.find("os/type")
if os_type_node is not None:
os_type = os_type_node.text
mocked_conn.listDefinedDomains.return_value = [name]
# Configure the mocked domain
domain_mock = virt.libvirt.virDomain()
if not isinstance(mocked_conn.lookupByName, MappedResultMock):
mocked_conn.lookupByName = MappedResultMock()
mocked_conn.lookupByName.add(name)
domain_mock = mocked_conn.lookupByName(name)
domain_mock.XMLDesc.return_value = xml_def
domain_mock.OSType.return_value = os_type
# Return state as shutdown
domain_mock.info.return_value = [
4,
2048 * 1024,
1024 * 1024,
2,
1234,
]
domain_mock.ID.return_value = 1
domain_mock.name.return_value = name
domain_mock.attachDevice.return_value = 0
domain_mock.detachDevice.return_value = 0
return domain_mock
return _make_mock_vm
@pytest.fixture
def make_mock_storage_pool():
def _make_mock_storage_pool(name, type, volumes):
mocked_conn = virt.libvirt.openAuth.return_value
# Append the pool name to the list of known mocked pools
all_pools = mocked_conn.listStoragePools.return_value
if not isinstance(all_pools, list):
all_pools = []
all_pools.append(name)
mocked_conn.listStoragePools.return_value = all_pools
# Ensure we have mapped results for the pools
if not isinstance(mocked_conn.storagePoolLookupByName, MappedResultMock):
mocked_conn.storagePoolLookupByName = MappedResultMock()
# Configure the pool
mocked_conn.storagePoolLookupByName.add(name)
mocked_pool = mocked_conn.storagePoolLookupByName(name)
source = ""
if type == "disk":
source = "<device path='/dev/{}'/>".format(name)
mocked_pool.XMLDesc.return_value = """
<pool type='{}'>
<source>
{}
</source>
</pool>
""".format(
type, source
)
# Configure the volumes
if not isinstance(mocked_pool.storageVolLookupByName, MappedResultMock):
mocked_pool.storageVolLookupByName = MappedResultMock()
mocked_pool.listVolumes.return_value = volumes
for volume in volumes:
mocked_pool.storageVolLookupByName.add(volume)
mocked_vol = mocked_pool.storageVolLookupByName(volume)
mocked_vol.XMLDesc.return_value = """
<volume>
<target>
<path>/path/to/{}/{}</path>
</target>
</volume>
""".format(
name, volume
)
return mocked_pool
return _make_mock_storage_pool

View file

@ -0,0 +1,48 @@
import salt.modules.virt as virt
from salt._compat import ElementTree as ET
def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool):
xml_def = """
<domain type='xen'>
<name>my_vm</name>
<memory unit='KiB'>524288</memory>
<currentMemory unit='KiB'>524288</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='x86_64'>linux</type>
<kernel>/usr/lib/grub2/x86_64-xen/grub.xen</kernel>
</os>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none' io='native'/>
<source file='/path/to/default/vm03_system'/>
<target dev='xvda' bus='xen'/>
</disk>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none' io='native'/>
<source dev='/path/to/my-iscsi/unit:0:0:1'/>
<target dev='xvdb' bus='xen'/>
</disk>
<controller type='xenbus' index='0'/>
</devices>
</domain>"""
domain_mock = make_mock_vm(xml_def)
make_mock_storage_pool("default", "dir", ["my_vm_system"])
make_mock_storage_pool("my-iscsi", "iscsi", ["unit:0:0:1"])
make_mock_storage_pool("vdb", "disk", ["vdb1"])
ret = virt.update(
"my_vm",
disks=[
{"name": "system", "pool": "default"},
{"name": "iscsi-data", "pool": "my-iscsi", "source_file": "unit:0:0:1"},
{"name": "vdb-data", "pool": "vdb", "source_file": "vdb1"},
],
)
assert ret["definition"]
define_mock = virt.libvirt.openAuth().defineXML
setxml = ET.fromstring(define_mock.call_args[0][0])
assert "block" == setxml.find(".//disk[3]").get("type")
assert "/path/to/vdb/vdb1" == setxml.find(".//disk[3]/source").get("dev")

View file

@ -629,7 +629,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib["type"], "kvm")
@ -662,7 +670,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
diskp = virt._disk_profile(self.mock_conn, "default", "vmware", [], "hello")
nicp = virt._nic_profile("default", "vmware")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"vmware",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib["type"], "vmware")
@ -759,7 +775,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
)
nicp = virt._nic_profile("noeffect", "vmware")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"vmware",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib["type"], "vmware")
@ -792,7 +816,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
diskp = virt._disk_profile(self.mock_conn, "noeffect", "kvm", [], "hello")
nicp = virt._nic_profile("noeffect", "kvm")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
self.assertEqual(root.attrib["type"], "kvm")
@ -975,7 +1007,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
)
nicp = virt._nic_profile(None, "kvm")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
@ -1024,7 +1064,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"test-vm",
)
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
@ -1084,7 +1132,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"test-vm",
)
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
self.assertDictEqual(
@ -1132,7 +1188,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"test-vm",
)
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
@ -1142,6 +1206,79 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.assertEqual("vdb2", source.attrib["volume"])
self.assertEqual("raw", disk.find("driver").get("type"))
def test_get_xml_volume_xen_dir(self):
"""
Test virt._gen_xml generating disks for a Xen hypervisor
"""
self.mock_conn.listStoragePools.return_value = ["default"]
pool_mock = MagicMock()
pool_mock.XMLDesc.return_value = "<pool type='dir'/>"
volume_xml = "<volume><target><path>/path/to/images/hello_system</path></target></volume>"
pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
self.mock_conn.storagePoolLookupByName.return_value = pool_mock
diskp = virt._disk_profile(
self.mock_conn,
None,
"xen",
[{"name": "system", "pool": "default"}],
"hello",
)
xml_data = virt._gen_xml(
self.mock_conn,
"hello",
1,
512,
diskp,
[],
"xen",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
self.assertEqual(disk.attrib["type"], "file")
self.assertEqual(
"/path/to/images/hello_system", disk.find("source").attrib["file"]
)
def test_get_xml_volume_xen_block(self):
"""
Test virt._gen_xml generating disks for a Xen hypervisor
"""
self.mock_conn.listStoragePools.return_value = ["default"]
pool_mock = MagicMock()
pool_mock.listVolumes.return_value = ["vol01"]
volume_xml = "<volume><target><path>/dev/to/vol01</path></target></volume>"
pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
self.mock_conn.storagePoolLookupByName.return_value = pool_mock
for pool_type in ["logical", "disk", "iscsi", "scsi"]:
pool_mock.XMLDesc.return_value = "<pool type='{}'><source><device path='/dev/sda'/></source></pool>".format(
pool_type
)
diskp = virt._disk_profile(
self.mock_conn,
None,
"xen",
[{"name": "system", "pool": "default", "source_file": "vol01"}],
"hello",
)
xml_data = virt._gen_xml(
self.mock_conn,
"hello",
1,
512,
diskp,
[],
"xen",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
self.assertEqual(disk.attrib["type"], "block")
self.assertEqual("/dev/to/vol01", disk.find("source").attrib["dev"])
def test_gen_xml_cdrom(self):
"""
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
@ -1172,7 +1309,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
)
nicp = virt._nic_profile(None, "kvm")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[1]
@ -1201,7 +1346,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
diskp = virt._disk_profile(self.mock_conn, "default", "vmware", [], "hello")
nicp = virt._nic_profile("default", "vmware")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"vmware",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
controllers = root.findall(".//devices/controller")
@ -1216,7 +1369,15 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
self.mock_conn, "hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
self.mock_conn,
"hello",
1,
512,
diskp,
nicp,
"kvm",
"hvm",
"x86_64",
)
root = ET.fromstring(xml_data)
controllers = root.findall(".//devices/controller")
@ -1705,7 +1866,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
for call in create_calls
]
self.assertEqual(
["test vm_system", "test vm_test"], vol_names,
["test vm_system", "test vm_test"],
vol_names,
)
stream_mock.sendAll.assert_called_once()
@ -3705,7 +3867,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"44454c4c-3400-105a-8033-b3c04f4b344a", caps["host"]["host"]["uuid"]
)
self.assertEqual(
{"qemu", "kvm"}, {domainCaps["domain"] for domainCaps in caps["domains"]},
{"qemu", "kvm"},
{domainCaps["domain"] for domainCaps in caps["domains"]},
)
def test_network_tag(self):