Add disk volumes support in virt.init

In order to support creating VMs with disk on more storage pools like
iSCSI, disks, LVM, RBD, etc, virt.init needs to handle disks not only as
files, but also as libvirt volumes.
This commit is contained in:
Cédric Bosdonnat 2020-04-02 15:34:11 +02:00 committed by Daniel Wozniak
parent 7bb3572390
commit faf46571c0
4 changed files with 368 additions and 131 deletions

1
changelog/57005.added Normal file
View file

@ -0,0 +1 @@
Add support for disks volumes in virt.running state

View file

@ -637,11 +637,21 @@ def _gen_xml(
"device": disk.get("device", "disk"),
"target_dev": "{0}{1}".format(prefix, string.ascii_lowercase[i]),
"disk_bus": disk["model"],
"type": disk["format"],
"format": disk.get("format", "raw"),
"index": six.text_type(i),
}
if "source_file" and disk["source_file"]:
if disk.get("source_file"):
disk_context["source_file"] = disk["source_file"]
disk_context["type"] = "file"
elif disk.get("pool"):
# If we had no source_file, then we want a volume
disk_context["type"] = "volume"
disk_context["pool"] = disk["pool"]
disk_context["volume"] = disk["filename"]
else:
# No source and no pool is a removable device, use file type
disk_context["type"] = "file"
if hypervisor in ["qemu", "kvm", "bhyve", "xen"]:
disk_context["address"] = False
@ -980,7 +990,80 @@ def _qemu_image_create(disk, create_overlay=False, saltenv="base"):
return img_dest
def _disk_profile(profile, hypervisor, disks, vm_name, **kwargs):
def _seed_image(seed_cmd, img_path, name, config, install, pub_key, priv_key):
"""
Helper function to seed an existing image. Note that this doesn't
handle volumes.
"""
log.debug("Seeding image")
__salt__[seed_cmd](
img_path,
id_=name,
config=config,
install=install,
pub_key=pub_key,
priv_key=priv_key,
)
def _disk_volume_create(conn, disk, seeder=None, saltenv="base"):
"""
Create a disk volume for use in a VM
"""
if disk.get("overlay_image"):
raise SaltInvocationError(
"Disk overlay_image property is not supported when creating volumes,"
"use backing_store_path and backing_store_format instead."
)
pool = conn.storagePoolLookupByName(disk["pool"])
# Use existing volume if possible
if disk["filename"] in pool.listVolumes():
return
pool_type = ElementTree.fromstring(pool.XMLDesc()).get("type")
backing_path = disk.get("backing_store_path")
backing_format = disk.get("backing_store_format")
backing_store = None
if (
backing_path
and backing_format
and (disk.get("format") == "qcow2" or pool_type == "logical")
):
backing_store = {"path": backing_path, "format": backing_format}
if backing_store and disk.get("image"):
raise SaltInvocationError(
"Using a template image with a backing store is not possible, "
"choose either of them."
)
vol_xml = _gen_vol_xml(
disk["filename"],
disk.get("size", 0),
format=disk.get("format"),
backing_store=backing_store,
)
_define_vol_xml_str(conn, vol_xml, disk.get("pool"))
if disk.get("image"):
log.debug("Caching disk template image: %s", disk.get("image"))
cached_path = __salt__["cp.cache_file"](disk.get("image"), saltenv)
if seeder:
seeder(cached_path)
_volume_upload(
conn,
disk["pool"],
disk["filename"],
cached_path,
sparse=disk.get("format") == "qcow2",
)
def _disk_profile(conn, profile, hypervisor, disks, vm_name):
"""
Gather the disk profile from the config or apply the default based
on the active hypervisor
@ -1023,9 +1106,9 @@ def _disk_profile(profile, hypervisor, disks, vm_name, **kwargs):
if hypervisor == "vmware":
overlay = {"format": "vmdk", "model": "scsi", "device": "disk"}
elif hypervisor in ["qemu", "kvm"]:
overlay = {"format": "qcow2", "device": "disk", "model": "virtio"}
overlay = {"device": "disk", "model": "virtio"}
elif hypervisor == "xen":
overlay = {"format": "qcow2", "device": "disk", "model": "xen"}
overlay = {"device": "disk", "model": "xen"}
elif hypervisor in ["bhyve"]:
overlay = {"format": "raw", "model": "virtio", "sparse_volume": False}
else:
@ -1051,6 +1134,9 @@ def _disk_profile(profile, hypervisor, disks, vm_name, **kwargs):
else:
disklist.append(udisk)
# Get pool capabilities once to get default format later
pool_caps = _pool_capabilities(conn)
for disk in disklist:
# Add the missing properties that have defaults
for key, val in six.iteritems(overlay):
@ -1058,45 +1144,71 @@ def _disk_profile(profile, hypervisor, disks, vm_name, **kwargs):
disk[key] = val
# We may have an already computed source_file (i.e. image not created by our module)
if "source_file" in disk and disk["source_file"]:
if disk.get("source_file") and os.path.exists(disk["source_file"]):
disk["filename"] = os.path.basename(disk["source_file"])
elif "source_file" not in disk:
_fill_disk_filename(vm_name, disk, hypervisor, **kwargs)
if not disk.get("format"):
disk["format"] = "qcow2"
elif disk.get("device", "disk") == "disk":
_fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps)
return disklist
def _fill_disk_filename(vm_name, disk, hypervisor, **kwargs):
def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):
"""
Compute the disk file name and update it in the disk value.
"""
# Compute the filename
disk["filename"] = "{0}_{1}.{2}".format(vm_name, disk["name"], disk["format"])
# Compute the filename without extension since it may not make sense for some pool types
disk["filename"] = "{0}_{1}".format(vm_name, disk["name"])
# Compute the source file path
base_dir = disk.get("pool", None)
if hypervisor in ["qemu", "kvm", "xen"]:
# Compute the base directory from the pool property. We may have either a path
# or a libvirt pool name there.
# If the pool is a known libvirt one with a target path, use it as target path
if not base_dir:
base_dir = _get_images_dir()
# If the pool is a known libvirt one, skip the filename since a libvirt volume will be created later
if base_dir not in conn.listStoragePools():
# For path-based disks, keep the qcow2 default format
if not disk.get("format"):
disk["format"] = "qcow2"
disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"])
disk["source_file"] = os.path.join(base_dir, disk["filename"])
else:
if not base_dir.startswith("/"):
# The pool seems not to be a path, lookup for pool infos
infos = pool_info(base_dir, **kwargs)
pool = infos[base_dir] if base_dir in infos else None
if (
not pool
or not pool["target_path"]
or pool["target_path"].startswith("/dev")
):
raise CommandExecutionError(
"Unable to create new disk {0}, specified pool {1} does not exist "
"or is unsupported".format(disk["name"], base_dir)
if "pool" not in disk:
disk["pool"] = base_dir
pool_obj = conn.storagePoolLookupByName(base_dir)
pool_xml = ElementTree.fromstring(pool_obj.XMLDesc())
pool_type = pool_xml.get("type")
# Is the user wanting to reuse an existing volume?
if disk.get("source_file"):
if not disk.get("source_file") in pool_obj.listVolumes():
raise SaltInvocationError(
"{} volume doesn't exist in pool {}".format(
disk.get("source_file"), base_dir
)
)
base_dir = pool["target_path"]
disk["source_file"] = os.path.join(base_dir, disk["filename"])
disk["filename"] = disk["source_file"]
del disk["source_file"]
# Get the default format from the pool capabilities
if not disk.get("format"):
volume_options = (
[
type_caps.get("options", {}).get("volume", {})
for type_caps in pool_caps.get("pool_types")
if type_caps["name"] == pool_type
]
or [{}]
)[0]
# Still prefer qcow2 if possible
if "qcow2" in volume_options.get("targetFormatType", []):
disk["format"] = "qcow2"
else:
disk["format"] = volume_options.get("default_format", None)
elif hypervisor == "bhyve" and vm_name:
disk["filename"] = "{0}.{1}".format(vm_name, disk["name"])
@ -1104,11 +1216,10 @@ def _fill_disk_filename(vm_name, disk, hypervisor, **kwargs):
"/dev/zvol", base_dir or "", disk["filename"]
)
disk["source_file"] = os.path.join(base_dir, disk["filename"])
elif hypervisor in ["esxi", "vmware"]:
if not base_dir:
base_dir = __salt__["config.get"]("virt:storagepool", "[0] ")
disk["filename"] = "{0}.{1}".format(disk["filename"], disk["format"])
disk["source_file"] = "{0}{1}".format(base_dir, disk["filename"])
@ -1433,7 +1544,12 @@ def init(
pool
Path to the folder or name of the pool where disks should be created.
(Default: depends on hypervisor)
(Default: depends on hypervisor and the virt:storagepool configuration)
.. versionchanged:: sodium
If the value contains no '/', it is considered a pool name where to create a volume.
Using volumes will be mandatory for some pools types like rdb, iscsi, etc.
model
One of the disk busses allowed by libvirt (Default: depends on hypervisor)
@ -1444,15 +1560,39 @@ def init(
Path to the image to use for the disk. If no image is provided, an empty disk will be created
(Default: ``None``)
Note that some pool types do not support uploading an image. This list can evolve with libvirt
versions.
overlay_image
``True`` to create a QCOW2 disk image with ``image`` as backing file. If ``False``
the file pointed to by the ``image`` property will simply be copied. (Default: ``False``)
.. versionchanged:: sodium
This property is only valid on path-based disks, not on volumes. To create a volume with a
backing store, set the ``backing_store_path`` and ``backing_store_format`` properties.
backing_store_path
Path to the backing store image to use. This can also be the name of a volume to use as
backing store within the same pool.
.. versionadded:: sodium
backing_store_format
Image format of the disk or volume to use as backing store. This property is mandatory when
using ``backing_store_path`` to avoid `problems <https://libvirt.org/kbase/backing_chains.html#troubleshooting>`_
.. versionadded:: sodium
source_file
Absolute path to the disk image to use. Not to be confused with ``image`` parameter. This
parameter is useful to use disk images that are created outside of this module. Can also
be ``None`` for devices that have no associated image like cdroms.
.. versionchanged:: sodium
For volume disks, this can be the name of a volume already existing in the storage pool.
device
Type of device of the disk. Can be one of 'disk', 'cdrom', 'floppy' or 'lun'.
(Default: ``'disk'``)
@ -1538,7 +1678,7 @@ def init(
virt_hypervisor = hypervisor
if not virt_hypervisor:
# Use the machine types as possible values
# Prefer "kvm" over the others if available
# Prefer 'kvm' over the others if available
hypervisors = sorted(
{
x
@ -1550,7 +1690,7 @@ def init(
)
virt_hypervisor = "kvm" if "kvm" in hypervisors else hypervisors[0]
# esxi used to be a possible value for the hypervisor: map it to vmware since it"s the same
# esxi used to be a possible value for the hypervisor: map it to vmware since it's the same
virt_hypervisor = "vmware" if virt_hypervisor == "esxi" else virt_hypervisor
log.debug("Using hypervisor %s", virt_hypervisor)
@ -1560,10 +1700,14 @@ def init(
# the disks are computed as follows:
# 1 - get the disks defined in the profile
# 3 - update the disks from the profile with the ones from the user. The matching key is the name.
diskp = _disk_profile(disk, virt_hypervisor, disks, name, **kwargs)
diskp = _disk_profile(conn, disk, virt_hypervisor, disks, name)
# Create multiple disks, empty or from specified images.
for _disk in diskp:
# No need to create an image for cdrom devices
if _disk.get("device", "disk") == "cdrom":
continue
log.debug("Creating disk for VM [ %s ]: %s", name, _disk)
if virt_hypervisor == "vmware":
@ -1581,29 +1725,35 @@ def init(
vol_xml = _gen_vol_xml(
filename, _disk["size"], format=_disk["format"]
)
define_vol_xml_str(vol_xml, pool=_disk.get("pool"))
_define_vol_xml_str(conn, vol_xml, pool=_disk.get("pool"))
elif virt_hypervisor in ["qemu", "kvm", "xen"]:
def seeder(path):
_seed_image(
seed_cmd,
path,
name,
kwargs.get("config"),
install,
pub_key,
priv_key,
)
create_overlay = _disk.get("overlay_image", False)
if _disk["source_file"]:
format = _disk.get("format")
if _disk.get("source_file"):
if os.path.exists(_disk["source_file"]):
img_dest = _disk["source_file"]
else:
img_dest = _qemu_image_create(_disk, create_overlay, saltenv)
else:
_disk_volume_create(conn, _disk, seeder if seed else None, saltenv)
img_dest = None
# Seed only if there is an image specified
if seed and img_dest and _disk.get("image", None):
log.debug("Seed command is %s", seed_cmd)
__salt__[seed_cmd](
img_dest,
id_=name,
config=kwargs.get("config"),
install=install,
pub_key=pub_key,
priv_key=priv_key,
)
seeder(img_dest)
elif hypervisor in ["bhyve"]:
img_dest = _zfs_image_create(
@ -1647,14 +1797,9 @@ def init(
**kwargs
)
conn.defineXML(vm_xml)
except libvirtError as err:
# check if failure is due to this domain already existing
if "domain '{}' already exists".format(name) in six.text_type(err):
# continue on to seeding
log.warning(err)
else:
conn.close()
raise err # a real error we should report upwards
except libvirt.libvirtError as err:
conn.close()
raise CommandExecutionError(err.get_error_message())
if start:
log.debug("Starting VM %s", name)
@ -1958,7 +2103,7 @@ def update(
# Compute the XML to get the disks, interfaces and graphics
hypervisor = desc.get("type")
all_disks = _disk_profile(disk_profile, hypervisor, disks, name, **kwargs)
all_disks = _disk_profile(conn, disk_profile, hypervisor, disks, name)
if boot is not None:
boot = _handle_remote_boot_params(boot)

View file

@ -28,16 +28,19 @@
</os>
<devices>
{% for disk in disks %}
<disk type='file' device='{{ disk.device }}'>
{% if 'source_file' in disk %}
<disk type='{{ disk.type }}' device='{{ disk.device }}'>
{% if disk.type == 'file' and 'source_file' in disk -%}
<source file='{{ disk.source_file }}' />
{% endif %}
{% if disk.type == 'volume' and 'pool' in disk -%}
<source pool='{{ disk.pool }}' volume='{{ disk.volume }}' />
{% endif %}
<target dev='{{ disk.target_dev }}' bus='{{ disk.disk_bus }}' />
{% if disk.address -%}
<address type='drive' controller='0' bus='0' target='0' unit='{{ disk.index }}' />
{% endif %}
{% if disk.driver -%}
<driver name='qemu' type='{{ disk.type }}' cache='none' io='native'/>
<driver name='qemu' type='{{ disk.format}}' cache='none' io='native'/>
{% endif %}
</disk>
{% endfor %}

View file

@ -70,6 +70,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
self.mock_libvirt = LibvirtMock()
self.mock_conn = MagicMock()
self.mock_conn.getStoragePoolCapabilities.return_value = (
"<storagepoolCapabilities/>"
)
self.mock_libvirt.openAuth.return_value = self.mock_conn
self.mock_popen = MagicMock()
self.addCleanup(delattr, self, "mock_libvirt")
@ -124,7 +127,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
{"name": "data", "size": 16384, "format": "raw"},
]
disks = virt._disk_profile("default", "kvm", userdisks, "myvm")
disks = virt._disk_profile(self.mock_conn, "default", "kvm", userdisks, "myvm")
self.assertEqual(
[
{
@ -154,7 +157,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() default boot device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64")
root = ET.fromstring(xml_data)
@ -166,7 +169,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() custom boot device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64", boot_dev="cdrom"
@ -178,7 +181,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() multiple boot devices
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -199,7 +202,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() serial console
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -221,7 +224,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() serial console
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -243,7 +246,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() telnet console
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -267,7 +270,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() telnet console without any specified port
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -292,7 +295,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with no serial console
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -314,7 +317,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with no telnet console
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -336,7 +339,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with default no graphics device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64")
root = ET.fromstring(xml_data)
@ -346,7 +349,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with default no loader
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64")
root = ET.fromstring(xml_data)
@ -356,7 +359,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with default vnc graphics device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -389,7 +392,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with default spice graphics device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -415,7 +418,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() with spice graphics device
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml(
"hello",
@ -450,7 +453,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
virt.__salt__, {"config.get": mock} # pylint: disable=no-member
):
ret = virt._disk_profile("nonexistent", "vmware", None, "test-vm")
ret = virt._disk_profile(
self.mock_conn, "nonexistent", "vmware", None, "test-vm"
)
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk["name"] == "system"]
self.assertTrue(bool(found))
@ -467,7 +472,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
virt.__salt__, {"config.get": mock} # pylint: disable=no-member
):
ret = virt._disk_profile("nonexistent", "kvm", None, "test-vm")
ret = virt._disk_profile(
self.mock_conn, "nonexistent", "kvm", None, "test-vm"
)
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk["name"] == "system"]
self.assertTrue(bool(found))
@ -484,7 +491,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(
virt.__salt__, {"config.get": mock} # pylint: disable=no-member
):
ret = virt._disk_profile("nonexistent", "xen", None, "test-vm")
ret = virt._disk_profile(
self.mock_conn, "nonexistent", "xen", None, "test-vm"
)
self.assertTrue(len(ret) == 1)
found = [disk for disk in ret if disk["name"] == "system"]
self.assertTrue(bool(found))
@ -598,7 +607,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml(), KVM default profile case
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",)
root = ET.fromstring(xml_data)
@ -632,7 +641,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml(), ESXi/vmware default profile case
"""
diskp = virt._disk_profile("default", "vmware", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "vmware", [], "hello")
nicp = virt._nic_profile("default", "vmware")
xml_data = virt._gen_xml(
"hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
@ -666,7 +675,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml(), XEN PV default profile case
"""
diskp = virt._disk_profile("default", "xen", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "xen", [], "hello")
nicp = virt._nic_profile("default", "xen")
with patch.dict(
virt.__grains__, {"os_family": "Suse"} # pylint: disable=no-member
@ -721,7 +730,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
virt.__salt__, # pylint: disable=no-member
{"config.get": MagicMock(side_effect=[disks, nics])},
):
diskp = virt._disk_profile("noeffect", "vmware", [], "hello")
diskp = virt._disk_profile(
self.mock_conn, "noeffect", "vmware", [], "hello"
)
nicp = virt._nic_profile("noeffect", "vmware")
xml_data = virt._gen_xml(
"hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
@ -754,7 +765,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
virt.__salt__, # pylint: disable=no-member
{"config.get": MagicMock(side_effect=[disks, nics])},
):
diskp = virt._disk_profile("noeffect", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "noeffect", "kvm", [], "hello")
nicp = virt._nic_profile("noeffect", "kvm")
xml_data = virt._gen_xml(
"hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",
@ -767,17 +778,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
self.assertTrue(len(root.findall(".//disk")) == 2)
self.assertTrue(len(root.findall(".//interface")) == 2)
@patch(
"salt.modules.virt.pool_info",
return_value={
"mypool": {
"target_path": os.path.join(salt.syspaths.ROOT_DIR, "pools", "mypool")
}
},
)
def test_disk_profile_kvm_disk_pool(self, mock_poolinfo):
def test_disk_profile_kvm_disk_pool(self):
"""
Test virt._gen_xml(), KVM case with pools defined.
Test virt._disk_profile(), KVM case with pools defined.
"""
disks = {
"noeffect": [
@ -799,7 +802,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
},
):
diskp = virt._disk_profile("noeffect", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "noeffect", "kvm", [], "hello")
pools_path = (
os.path.join(salt.syspaths.ROOT_DIR, "pools", "mypool") + os.sep
@ -809,7 +812,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
)
self.assertEqual(len(diskp), 2)
self.assertTrue(diskp[0]["source_file"].startswith(pools_path))
self.assertTrue(diskp[1]["source_file"].startswith(default_path))
# pylint: enable=no-member
@ -817,49 +819,56 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml(), KVM case with an external image.
"""
with patch.dict(os.path.__dict__, {"exists": MagicMock(return_value=True)}):
diskp = virt._disk_profile(
self.mock_conn,
None,
"kvm",
[{"name": "mydisk", "source_file": "/path/to/my/image.qcow2"}],
"hello",
)
self.assertEqual(len(diskp), 1)
self.assertEqual(diskp[0]["source_file"], ("/path/to/my/image.qcow2"))
def test_gen_xml_volume(self):
"""
Test virt._gen_xml(), generating a disk of volume type
"""
self.mock_conn.listStoragePools.return_value = ["default"]
self.mock_conn.storagePoolLookupByName.return_value.XMLDesc.return_value = (
"<pool type='dir'/>"
)
self.mock_conn.storagePoolLookupByName.return_value.listVolumes.return_value = [
"myvolume"
]
diskp = virt._disk_profile(
self.mock_conn,
None,
"kvm",
[{"name": "mydisk", "source_file": "/path/to/my/image.qcow2"}],
[
{"name": "system", "pool": "default"},
{"name": "data", "pool": "default", "source_file": "myvolume"},
],
"hello",
)
self.assertEqual(len(diskp), 1)
self.assertEqual(diskp[0]["source_file"], ("/path/to/my/image.qcow2"))
@patch("salt.modules.virt.pool_info", return_value={})
def test_disk_profile_kvm_disk_pool_notfound(self, mock_poolinfo):
"""
Test virt._gen_xml(), KVM case with pools defined.
"""
disks = {"noeffect": [{"first": {"size": 8192, "pool": "default"}}]}
with patch.dict(
virt.__salt__, # pylint: disable=no-member
{"config.get": MagicMock(side_effect=[disks, "/default/path/"])},
):
with self.assertRaises(CommandExecutionError):
virt._disk_profile("noeffect", "kvm", [], "hello")
@patch(
"salt.modules.virt.pool_info", return_value={"target_path": "/dev/disk/by-path"}
)
def test_disk_profile_kvm_disk_pool_invalid(self, mock_poolinfo):
"""
Test virt._gen_xml(), KVM case with pools defined.
"""
disks = {"noeffect": [{"first": {"size": 8192, "pool": "default"}}]}
with patch.dict(
virt.__salt__, # pylint: disable=no-member
{"config.get": MagicMock(side_effect=[disks, "/default/path/"])},
):
with self.assertRaises(CommandExecutionError):
virt._disk_profile("noeffect", "kvm", [], "hello")
nicp = virt._nic_profile(None, "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
self.assertEqual(disk.attrib["device"], "disk")
self.assertEqual(disk.attrib["type"], "volume")
source = disk.find("source")
self.assertEqual("default", source.attrib["pool"])
self.assertEqual("hello_system", source.attrib["volume"])
self.assertEqual("myvolume", root.find(".//disk[2]/source").get("volume"))
def test_gen_xml_cdrom(self):
"""
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
"""
diskp = virt._disk_profile(
self.mock_conn,
None,
"kvm",
[
@ -876,6 +885,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",)
root = ET.fromstring(xml_data)
disk = root.findall(".//disk")[0]
self.assertEqual(disk.get("type"), "file")
self.assertEqual(disk.attrib["device"], "cdrom")
self.assertIsNone(disk.find("source"))
@ -883,7 +893,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() generated device controller for ESXi/vmware
"""
diskp = virt._disk_profile("default", "vmware", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "vmware", [], "hello")
nicp = virt._nic_profile("default", "vmware")
xml_data = virt._gen_xml(
"hello", 1, 512, diskp, nicp, "vmware", "hvm", "x86_64",
@ -898,7 +908,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
"""
Test virt._gen_xml() generated device controller for KVM
"""
diskp = virt._disk_profile("default", "kvm", [], "hello")
diskp = virt._disk_profile(self.mock_conn, "default", "kvm", [], "hello")
nicp = virt._nic_profile("default", "kvm")
xml_data = virt._gen_xml("hello", 1, 512, diskp, nicp, "kvm", "hvm", "x86_64",)
root = ET.fromstring(xml_data)
@ -1286,6 +1296,9 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
# Test case creating disks
defineMock.reset_mock()
mock_run.reset_mock()
pool_mock = MagicMock()
pool_mock.XMLDesc.return_value = '<pool type="dir"/>'
self.mock_conn.storagePoolLookupByName.return_value = pool_mock
virt.init(
"test vm",
2,
@ -1305,20 +1318,95 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
start=False,
)
definition = ET.fromstring(defineMock.call_args_list[0][0][0])
disk_sources = [
disk.find("source").get("file")
if disk.find("source") is not None
else None
for disk in definition.findall("./devices/disk")
]
expected_disk_path = os.path.join(root_dir, "test vm_system.qcow2")
self.assertEqual(disk_sources, [expected_disk_path, None])
self.assertEqual(
expected_disk_path,
definition.find("./devices/disk[1]/source").get("file"),
)
self.assertIsNone(definition.find("./devices/disk[2]/source"))
self.assertEqual(
mock_run.call_args[0][0],
'qemu-img create -f qcow2 "{0}" 10240M'.format(expected_disk_path),
)
self.assertEqual(mock_chmod.call_args[0][0], expected_disk_path)
# Test case creating disks volumes
defineMock.reset_mock()
mock_run.reset_mock()
vol_mock = MagicMock()
pool_mock.storageVolLookupByName.return_value = vol_mock
pool_mock.listVolumes.return_value = ["test vm_data"]
stream_mock = MagicMock()
self.mock_conn.newStream.return_value = stream_mock
self.mock_conn.listStoragePools.return_value = ["default", "test"]
with patch.dict(
os.__dict__, {"open": MagicMock(), "close": MagicMock()}
):
cache_mock = MagicMock()
with patch.dict(virt.__salt__, {"cp.cache_file": cache_mock}):
virt.init(
"test vm",
2,
1234,
nic=None,
disk=None,
disks=[
{
"name": "system",
"size": 10240,
"image": "/path/to/image",
"pool": "test",
},
{"name": "data", "size": 10240, "pool": "default"},
{
"name": "test",
"size": 1024,
"pool": "default",
"format": "qcow2",
"backing_store_path": "/backing/path",
"backing_store_format": "raw",
},
],
seed=False,
start=False,
)
definition = ET.fromstring(defineMock.call_args_list[0][0][0])
self.assertTrue(
all(
[
disk.get("type") == "volume"
for disk in definition.findall("./devices/disk")
]
)
)
self.assertEqual(
["test", "default", "default"],
[
src.get("pool")
for src in definition.findall("./devices/disk/source")
],
)
self.assertEqual(
["test vm_system", "test vm_data", "test vm_test"],
[
src.get("volume")
for src in definition.findall("./devices/disk/source")
],
)
create_calls = pool_mock.createXML.call_args_list
vol_names = [
ET.fromstring(call[0][0]).find("name").text
for call in create_calls
]
self.assertEqual(
["test vm_system", "test vm_test"], vol_names,
)
stream_mock.sendAll.assert_called_once()
stream_mock.finish.assert_called_once()
vol_mock.upload.assert_called_once_with(stream_mock, 0, 0, 0)
def test_update(self):
"""
Test virt.update()