mirror of
https://github.com/saltstack/salt.git
synced 2025-04-17 10:10:20 +00:00
ZFS backport of 46933 to 2018.3.1
This commit is contained in:
parent
2ee8006da3
commit
3f30ab2ed6
11 changed files with 6977 additions and 2319 deletions
|
@ -19,22 +19,22 @@ import logging
|
|||
import salt.utils.dictupdate
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
try:
|
||||
# The zfs_support grain will only be set to True if this module is supported
|
||||
# This allows the grain to be set to False on systems that don't support zfs
|
||||
# _conform_value is only called if zfs_support is set to True
|
||||
from salt.modules.zfs import _conform_value
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Solve the Chicken and egg problem where grains need to run before any
|
||||
# of the modules are loaded and are generally available for any usage.
|
||||
import salt.modules.cmdmod
|
||||
import salt.utils.zfs
|
||||
|
||||
__virtualname__ = 'zfs'
|
||||
__salt__ = {
|
||||
'cmd.run': salt.modules.cmdmod.run,
|
||||
}
|
||||
__utils__ = {
|
||||
'zfs.is_supported': salt.utils.zfs.is_supported,
|
||||
'zfs.has_feature_flags': salt.utils.zfs.has_feature_flags,
|
||||
'zfs.zpool_command': salt.utils.zfs.zpool_command,
|
||||
'zfs.to_size': salt.utils.zfs.to_size,
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -48,45 +48,6 @@ def __virtual__():
|
|||
return __virtualname__
|
||||
|
||||
|
||||
def _check_retcode(cmd):
|
||||
'''
|
||||
Simple internal wrapper for cmdmod.retcode
|
||||
'''
|
||||
return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0
|
||||
|
||||
|
||||
def _zfs_support():
|
||||
'''
|
||||
Provide information about zfs kernel module
|
||||
'''
|
||||
grains = {'zfs_support': False}
|
||||
|
||||
# Check for zfs support
|
||||
# NOTE: ZFS on Windows is in development
|
||||
# NOTE: ZFS on NetBSD is in development
|
||||
on_supported_platform = False
|
||||
if salt.utils.platform.is_sunos() and salt.utils.path.which('zfs'):
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_linux():
|
||||
modinfo = salt.utils.path.which('modinfo')
|
||||
if modinfo:
|
||||
on_supported_platform = _check_retcode('{0} zfs'.format(modinfo))
|
||||
else:
|
||||
on_supported_platform = _check_retcode('ls /sys/module/zfs')
|
||||
|
||||
# NOTE: fallback to zfs-fuse if needed
|
||||
if not on_supported_platform and salt.utils.path.which('zfs-fuse'):
|
||||
on_supported_platform = True
|
||||
|
||||
# Additional check for the zpool command
|
||||
if on_supported_platform and salt.utils.path.which('zpool'):
|
||||
grains['zfs_support'] = True
|
||||
|
||||
return grains
|
||||
|
||||
|
||||
def _zfs_pool_data():
|
||||
'''
|
||||
Provide grains about zpools
|
||||
|
@ -94,12 +55,16 @@ def _zfs_pool_data():
|
|||
grains = {}
|
||||
|
||||
# collect zpool data
|
||||
zpool_cmd = salt.utils.path.which('zpool')
|
||||
for zpool in __salt__['cmd.run']('{zpool} list -H -p -o name,size'.format(zpool=zpool_cmd)).splitlines():
|
||||
zpool_list_cmd = __utils__['zfs.zpool_command'](
|
||||
'list',
|
||||
flags=['-H'],
|
||||
opts={'-o': 'name,size'},
|
||||
)
|
||||
for zpool in __salt__['cmd.run'](zpool_list_cmd, ignore_retcode=True).splitlines():
|
||||
if 'zpool' not in grains:
|
||||
grains['zpool'] = {}
|
||||
zpool = zpool.split()
|
||||
grains['zpool'][zpool[0]] = _conform_value(zpool[1], True)
|
||||
grains['zpool'][zpool[0]] = __utils__['zfs.to_size'](zpool[1], False)
|
||||
|
||||
# return grain data
|
||||
return grains
|
||||
|
@ -110,8 +75,8 @@ def zfs():
|
|||
Provide grains for zfs/zpool
|
||||
'''
|
||||
grains = {}
|
||||
|
||||
grains = salt.utils.dictupdate.update(grains, _zfs_support(), merge_lists=True)
|
||||
grains['zfs_support'] = __utils__['zfs.is_supported']()
|
||||
grains['zfs_feature_flags'] = __utils__['zfs.has_feature_flags']()
|
||||
if grains['zfs_support']:
|
||||
grains = salt.utils.dictupdate.update(grains, _zfs_pool_data(), merge_lists=True)
|
||||
|
||||
|
|
1302
salt/modules/zfs.py
1302
salt/modules/zfs.py
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
1149
salt/states/zfs.py
1149
salt/states/zfs.py
File diff suppressed because it is too large
Load diff
|
@ -1,13 +1,16 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management zpool
|
||||
States for managing zpools
|
||||
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: zpool
|
||||
:depends: salt.utils.zfs, salt.modules.zpool
|
||||
:platform: smartos, illumos, solaris, freebsd, linux
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
.. versionchanged:: 2018.3.1
|
||||
Big refactor to remove duplicate code, better type converions and improved
|
||||
consistancy in output.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -23,12 +26,12 @@ Management zpool
|
|||
- properties:
|
||||
comment: salty storage pool
|
||||
- layout:
|
||||
mirror-0:
|
||||
/dev/disk0
|
||||
/dev/disk1
|
||||
mirror-1:
|
||||
/dev/disk2
|
||||
/dev/disk3
|
||||
- mirror:
|
||||
- /dev/disk0
|
||||
- /dev/disk1
|
||||
- mirror:
|
||||
- /dev/disk2
|
||||
- /dev/disk3
|
||||
|
||||
partitionpool:
|
||||
zpool.present:
|
||||
|
@ -73,7 +76,6 @@ import logging
|
|||
|
||||
# Import Salt libs
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.modules.zpool import _conform_value
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -85,15 +87,83 @@ def __virtual__():
|
|||
'''
|
||||
Provides zpool state
|
||||
'''
|
||||
if 'zpool.create' in __salt__:
|
||||
return True
|
||||
if __grains__['zfs_support']:
|
||||
return __virtualname__
|
||||
else:
|
||||
return (
|
||||
False,
|
||||
'{0} state module can only be loaded on illumos, Solaris, SmartOS, FreeBSD, Linux, ...'.format(
|
||||
__virtualname__
|
||||
)
|
||||
)
|
||||
return (False, "The zpool state cannot be loaded: zfs not supported")
|
||||
|
||||
|
||||
def _layout_to_vdev(layout, device_dir=None):
|
||||
'''
|
||||
Turn the layout data into usable vdevs spedcification
|
||||
|
||||
We need to support 2 ways of passing the layout:
|
||||
|
||||
.. code::
|
||||
layout_new:
|
||||
- mirror:
|
||||
- disk0
|
||||
- disk1
|
||||
- mirror:
|
||||
- disk2
|
||||
- disk3
|
||||
|
||||
.. code:
|
||||
layout_legacy:
|
||||
mirror-0:
|
||||
disk0
|
||||
disk1
|
||||
mirror-1:
|
||||
disk2
|
||||
disk3
|
||||
|
||||
'''
|
||||
vdevs = []
|
||||
|
||||
# NOTE: check device_dir exists
|
||||
if device_dir and not os.path.exists(device_dir):
|
||||
device_dir = None
|
||||
|
||||
# NOTE: handle list of OrderedDicts (new layout)
|
||||
if isinstance(layout, list):
|
||||
# NOTE: parse each vdev as a tiny layout and just append
|
||||
for vdev in layout:
|
||||
if isinstance(vdev, OrderedDict):
|
||||
vdevs.extend(_layout_to_vdev(vdev, device_dir))
|
||||
else:
|
||||
if device_dir and vdev[0] != '/':
|
||||
vdev = os.path.join(device_dir, vdev)
|
||||
vdevs.append(vdev)
|
||||
|
||||
# NOTE: handle nested OrderedDict (legacy layout)
|
||||
# this is also used to parse the nested OrderedDicts
|
||||
# from the new layout
|
||||
elif isinstance(layout, OrderedDict):
|
||||
for vdev in layout:
|
||||
# NOTE: extract the vdev type and disks in the vdev
|
||||
vdev_type = vdev.split('-')[0]
|
||||
vdev_disk = layout[vdev]
|
||||
|
||||
# NOTE: skip appending the dummy type 'disk'
|
||||
if vdev_type != 'disk':
|
||||
vdevs.append(vdev_type)
|
||||
|
||||
# NOTE: ensure the disks are a list (legacy layout are not)
|
||||
if not isinstance(vdev_disk, list):
|
||||
vdev_disk = vdev_disk.split(' ')
|
||||
|
||||
# NOTE: also append the actualy disks behind the type
|
||||
# also prepend device_dir to disks if required
|
||||
for disk in vdev_disk:
|
||||
if device_dir and disk[0] != '/':
|
||||
disk = os.path.join(device_dir, disk)
|
||||
vdevs.append(disk)
|
||||
|
||||
# NOTE: we got invalid data for layout
|
||||
else:
|
||||
vdevs = None
|
||||
|
||||
return vdevs
|
||||
|
||||
|
||||
def present(name, properties=None, filesystem_properties=None, layout=None, config=None):
|
||||
|
@ -115,13 +185,34 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
|
|||
|
||||
The following configuration properties can be toggled in the config parameter.
|
||||
- import (true) - try to import the pool before creating it if absent
|
||||
- import_dirs (None) - specify additional locations to scan for devices on import
|
||||
- device_dir (None, SunOS=/dev/rdsk) - specify device directory to use if not absolute path
|
||||
- import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated)
|
||||
- device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths
|
||||
- force (false) - try to force the import or creation
|
||||
|
||||
.. note::
|
||||
|
||||
Because ID's inside the layout dict must be unique they need to have a suffix.
|
||||
It is no longer needed to give a unique name to each top-level vdev, the old
|
||||
layout format is still supported but no longer recommended.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- mirror:
|
||||
- /tmp/vdisk3
|
||||
- /tmp/vdisk2
|
||||
- mirror:
|
||||
- /tmp/vdisk0
|
||||
- /tmp/vdisk1
|
||||
|
||||
The above yaml will always result in the following zpool create:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
|
||||
|
||||
.. warning::
|
||||
|
||||
The legacy format is also still supported but not recommended,
|
||||
because ID's inside the layout dict must be unique they need to have a suffix.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -132,22 +223,16 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
|
|||
/tmp/vdisk0
|
||||
/tmp/vdisk1
|
||||
|
||||
The above yaml will always result in the following zpool create:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
|
||||
|
||||
.. warning::
|
||||
|
||||
Pay attention to the order of your dict!
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
mirror-0:
|
||||
/tmp/vdisk0
|
||||
/tmp/vdisk1
|
||||
/tmp/vdisk2:
|
||||
- mirror:
|
||||
- /tmp/vdisk0
|
||||
- /tmp/vdisk1
|
||||
- /tmp/vdisk2
|
||||
|
||||
The above will result in the following zpool create:
|
||||
|
||||
|
@ -163,60 +248,87 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
|
|||
'result': None,
|
||||
'comment': ''}
|
||||
|
||||
# config defaults
|
||||
state_config = config if config else {}
|
||||
config = {
|
||||
## config defaults
|
||||
default_config = {
|
||||
'import': True,
|
||||
'import_dirs': None,
|
||||
'device_dir': None,
|
||||
'force': False
|
||||
}
|
||||
if __grains__['kernel'] == 'SunOS':
|
||||
config['device_dir'] = '/dev/rdsk'
|
||||
default_config['device_dir'] = '/dev/dsk'
|
||||
elif __grains__['kernel'] == 'Linux':
|
||||
config['device_dir'] = '/dev'
|
||||
config.update(state_config)
|
||||
log.debug('zpool.present::%s::config - %s', name, config)
|
||||
default_config['device_dir'] = '/dev'
|
||||
|
||||
# parse layout
|
||||
if layout:
|
||||
for root_dev in layout:
|
||||
if root_dev.count('-') != 1:
|
||||
continue
|
||||
layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ')
|
||||
## merge state config
|
||||
if config:
|
||||
default_config.update(config)
|
||||
config = default_config
|
||||
|
||||
log.debug('zpool.present::%s::layout - %s', name, layout)
|
||||
## ensure properties are zfs values
|
||||
if properties:
|
||||
properties = __utils__['zfs.from_auto_dict'](properties)
|
||||
elif properties is None:
|
||||
properties = {}
|
||||
if filesystem_properties:
|
||||
filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties)
|
||||
elif filesystem_properties is None:
|
||||
filesystem_properties = {}
|
||||
|
||||
# ensure properties conform to the zfs parsable format
|
||||
for prop in properties:
|
||||
properties[prop] = _conform_value(properties[prop], True)
|
||||
## parse layout
|
||||
vdevs = _layout_to_vdev(layout, config['device_dir'])
|
||||
if vdevs:
|
||||
vdevs.insert(0, name)
|
||||
|
||||
# ensure the pool is present
|
||||
## log configuration
|
||||
log.debug('zpool.present::%s::config - %s',
|
||||
name, config)
|
||||
log.debug('zpool.present::%s::vdevs - %s',
|
||||
name, vdevs)
|
||||
log.debug('zpool.present::%s::properties - %s',
|
||||
name, properties)
|
||||
log.debug('zpool.present::%s::filesystem_properties - %s',
|
||||
name, filesystem_properties)
|
||||
|
||||
## ensure the pool is present
|
||||
ret['result'] = False
|
||||
if __salt__['zpool.exists'](name): # update
|
||||
|
||||
## NOTE: don't do anything because this is a test
|
||||
if __opts__['test']:
|
||||
ret['result'] = True
|
||||
if __salt__['zpool.exists'](name):
|
||||
ret['changes'][name] = 'uptodate'
|
||||
else:
|
||||
ret['changes'][name] = 'imported' if config['import'] else 'created'
|
||||
ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name])
|
||||
|
||||
## NOTE: update pool
|
||||
elif __salt__['zpool.exists'](name):
|
||||
ret['result'] = True
|
||||
|
||||
# retrieve current properties
|
||||
properties_current = __salt__['zpool.get'](name)[name]
|
||||
## NOTE: fetch current pool properties
|
||||
properties_current = __salt__['zpool.get'](name, parsable=True)
|
||||
|
||||
# figure out if updates needed
|
||||
## NOTE: build list of properties to update
|
||||
properties_update = []
|
||||
for prop in properties:
|
||||
## NOTE: skip unexisting properties
|
||||
if prop not in properties_current:
|
||||
log.warning('zpool.present::%s::update - unknown property: %s', name, prop)
|
||||
continue
|
||||
|
||||
## NOTE: compare current and wanted value
|
||||
if properties_current[prop] != properties[prop]:
|
||||
properties_update.append(prop)
|
||||
|
||||
# update properties
|
||||
## NOTE: update pool properties
|
||||
for prop in properties_update:
|
||||
res = __salt__['zpool.set'](name, prop, properties[prop])
|
||||
|
||||
# check return
|
||||
if name in res and prop in res[name] and res[name][prop] == properties[prop]:
|
||||
if res['set']:
|
||||
if name not in ret['changes']:
|
||||
ret['changes'][name] = {}
|
||||
ret['changes'][name].update(res[name])
|
||||
ret['changes'][name][prop] = properties[prop]
|
||||
else:
|
||||
ret['result'] = False
|
||||
if ret['comment'] == '':
|
||||
|
@ -224,58 +336,47 @@ def present(name, properties=None, filesystem_properties=None, layout=None, conf
|
|||
ret['comment'] = '{0} {1}'.format(ret['comment'], prop)
|
||||
|
||||
if ret['result']:
|
||||
ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed'
|
||||
ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed'
|
||||
|
||||
else: # import or create
|
||||
if config['import']: # try import
|
||||
log.debug('zpool.present::%s::importing', name)
|
||||
ret['result'] = __salt__['zpool.import'](
|
||||
## NOTE: import or create the pool (at least try to anyway)
|
||||
else:
|
||||
## NOTE: import pool
|
||||
if config['import']:
|
||||
mod_res = __salt__['zpool.import'](
|
||||
name,
|
||||
force=config['force'],
|
||||
dir=config['import_dirs']
|
||||
dir=config['import_dirs'],
|
||||
)
|
||||
ret['result'] = ret['result'].get(name) == 'imported'
|
||||
|
||||
ret['result'] = mod_res['imported']
|
||||
if ret['result']:
|
||||
ret['changes'][name] = 'imported'
|
||||
ret['comment'] = 'storage pool {0} was imported'.format(name)
|
||||
|
||||
if not ret['result']: # create
|
||||
if not layout:
|
||||
ret['comment'] = 'storage pool {0} was not imported, no layout specified for creation'.format(name)
|
||||
## NOTE: create pool
|
||||
if not ret['result'] and vdevs:
|
||||
log.debug('zpool.present::%s::creating', name)
|
||||
|
||||
## NOTE: execute zpool.create
|
||||
mod_res = __salt__['zpool.create'](
|
||||
*vdevs,
|
||||
force=config['force'],
|
||||
properties=properties,
|
||||
filesystem_properties=filesystem_properties
|
||||
)
|
||||
|
||||
ret['result'] = mod_res['created']
|
||||
if ret['result']:
|
||||
ret['changes'][name] = 'created'
|
||||
ret['comment'] = 'storage pool {0} was created'.format(name)
|
||||
elif 'error' in mod_res:
|
||||
ret['comment'] = mod_res['error']
|
||||
else:
|
||||
log.debug('zpool.present::%s::creating', name)
|
||||
if __opts__['test']:
|
||||
ret['result'] = True
|
||||
else:
|
||||
# construct *vdev parameter for zpool.create
|
||||
params = []
|
||||
params.append(name)
|
||||
for root_dev in layout:
|
||||
if root_dev.count('-') == 1: # special device
|
||||
# NOTE: accomidate non existing 'disk' vdev
|
||||
if root_dev.split('-')[0] != 'disk':
|
||||
params.append(root_dev.split('-')[0]) # add the type by stripping the ID
|
||||
for sub_dev in layout[root_dev]: # add all sub devices
|
||||
if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']):
|
||||
sub_dev = os.path.join(config['device_dir'], sub_dev)
|
||||
params.append(sub_dev)
|
||||
else: # normal device
|
||||
if '/' not in root_dev and config['device_dir'] and os.path.exists(config['device_dir']):
|
||||
root_dev = os.path.join(config['device_dir'], root_dev)
|
||||
params.append(root_dev)
|
||||
ret['comment'] = 'could not create storage pool {0}'.format(name)
|
||||
|
||||
# execute zpool.create
|
||||
ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties)
|
||||
if ret['result'].get(name).startswith('created'):
|
||||
ret['result'] = True
|
||||
else:
|
||||
if ret['result'].get(name):
|
||||
ret['comment'] = ret['result'].get(name)
|
||||
ret['result'] = False
|
||||
|
||||
if ret['result']:
|
||||
ret['changes'][name] = 'created'
|
||||
ret['comment'] = 'storage pool {0} was created'.format(name)
|
||||
## NOTE: give up, we cannot import the pool and we do not have a layout to create it
|
||||
if not ret['result'] and not vdevs:
|
||||
ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -297,31 +398,36 @@ def absent(name, export=False, force=False):
|
|||
'result': None,
|
||||
'comment': ''}
|
||||
|
||||
# config defaults
|
||||
log.debug('zpool.absent::%s::config::force = %s', name, force)
|
||||
log.debug('zpool.absent::%s::config::export = %s', name, export)
|
||||
## log configuration
|
||||
log.debug('zpool.absent::%s::config::force = %s',
|
||||
name, force)
|
||||
log.debug('zpool.absent::%s::config::export = %s',
|
||||
name, export)
|
||||
|
||||
# ensure the pool is absent
|
||||
## ensure the pool is absent
|
||||
if __salt__['zpool.exists'](name): # looks like we need to do some work
|
||||
mod_res = {}
|
||||
ret['result'] = False
|
||||
|
||||
if export: # try to export the zpool
|
||||
if __opts__['test']:
|
||||
ret['result'] = True
|
||||
else:
|
||||
ret['result'] = __salt__['zpool.export'](name, force=force)
|
||||
ret['result'] = ret['result'].get(name) == 'exported'
|
||||
# NOTE: handle test
|
||||
if __opts__['test']:
|
||||
ret['result'] = True
|
||||
|
||||
else: # try to destroy the zpool
|
||||
if __opts__['test']:
|
||||
ret['result'] = True
|
||||
else:
|
||||
ret['result'] = __salt__['zpool.destroy'](name, force=force)
|
||||
ret['result'] = ret['result'].get(name) == 'destroyed'
|
||||
# NOTE: try to export the pool
|
||||
elif export:
|
||||
mod_res = __salt__['zpool.export'](name, force=force)
|
||||
ret['result'] = mod_res['exported']
|
||||
|
||||
# NOTE: try to destroy the pool
|
||||
else:
|
||||
mod_res = __salt__['zpool.destroy'](name, force=force)
|
||||
ret['result'] = mod_res['destroyed']
|
||||
|
||||
if ret['result']: # update the changes and comment
|
||||
ret['changes'][name] = 'exported' if export else 'destroyed'
|
||||
ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name])
|
||||
elif 'error' in mod_res:
|
||||
ret['comment'] = mod_res['error']
|
||||
|
||||
else: # we are looking good
|
||||
ret['result'] = True
|
||||
|
|
712
salt/utils/zfs.py
Normal file
712
salt/utils/zfs.py
Normal file
|
@ -0,0 +1,712 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Utility functions for zfs
|
||||
|
||||
These functions are for dealing with type conversion and basic execution
|
||||
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: salt.utils.stringutils, salt.ext, salt.module.cmdmod
|
||||
:platform: illumos,freebsd,linux
|
||||
|
||||
.. versionadded:: 2018.3.1
|
||||
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
import os
|
||||
import re
|
||||
import math
|
||||
import logging
|
||||
from numbers import Number
|
||||
|
||||
# Import salt libs
|
||||
from salt.utils.decorators import memoize as real_memoize
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.utils.stringutils import to_num as str_to_num
|
||||
import salt.modules.cmdmod
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext.six.moves import zip
|
||||
|
||||
# Size conversion data
|
||||
re_zfs_size = re.compile(r'^(\d+|\d+(?=\d*)\.\d+)([KkMmGgTtPpEe][Bb]?)$')
|
||||
zfs_size = ['K', 'M', 'G', 'T', 'P', 'E']
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _check_retcode(cmd):
|
||||
'''
|
||||
Simple internal wrapper for cmdmod.retcode
|
||||
'''
|
||||
return salt.modules.cmdmod.retcode(cmd, output_loglevel='quiet', ignore_retcode=True) == 0
|
||||
|
||||
|
||||
def _exec(**kwargs):
|
||||
'''
|
||||
Simple internal wrapper for cmdmod.run
|
||||
'''
|
||||
if 'ignore_retcode' not in kwargs:
|
||||
kwargs['ignore_retcode'] = True
|
||||
if 'output_loglevel' not in kwargs:
|
||||
kwargs['output_loglevel'] = 'quiet'
|
||||
return salt.modules.cmdmod.run_all(**kwargs)
|
||||
|
||||
|
||||
def _merge_last(values, merge_after, merge_with=' '):
|
||||
'''
|
||||
Merge values all values after X into the last value
|
||||
'''
|
||||
if len(values) > merge_after:
|
||||
values = values[0:(merge_after-1)] + [merge_with.join(values[(merge_after-1):])]
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def _property_normalize_name(name):
|
||||
'''
|
||||
Normalizes property names
|
||||
'''
|
||||
if '@' in name:
|
||||
name = name[:name.index('@')+1]
|
||||
return name
|
||||
|
||||
|
||||
def _property_detect_type(name, values):
|
||||
'''
|
||||
Detect the datatype of a property
|
||||
'''
|
||||
value_type = 'str'
|
||||
if values.startswith('on | off'):
|
||||
value_type = 'bool'
|
||||
elif values.startswith('yes | no'):
|
||||
value_type = 'bool_alt'
|
||||
elif values in ['<size>', '<size> | none']:
|
||||
value_type = 'size'
|
||||
elif values in ['<count>', '<count> | none', '<guid>']:
|
||||
value_type = 'numeric'
|
||||
elif name in ['sharenfs', 'sharesmb', 'canmount']:
|
||||
value_type = 'bool'
|
||||
elif name in ['version', 'copies']:
|
||||
value_type = 'numeric'
|
||||
return value_type
|
||||
|
||||
|
||||
def _property_create_dict(header, data):
|
||||
'''
|
||||
Create a property dict
|
||||
'''
|
||||
prop = dict(zip(header, _merge_last(data, len(header))))
|
||||
prop['name'] = _property_normalize_name(prop['property'])
|
||||
prop['type'] = _property_detect_type(prop['name'], prop['values'])
|
||||
prop['edit'] = from_bool(prop['edit'])
|
||||
if 'inherit' in prop:
|
||||
prop['inherit'] = from_bool(prop['inherit'])
|
||||
del prop['property']
|
||||
return prop
|
||||
|
||||
|
||||
def _property_parse_cmd(cmd, alias=None):
|
||||
'''
|
||||
Parse output of zpool/zfs get command
|
||||
'''
|
||||
if not alias:
|
||||
alias = {}
|
||||
properties = {}
|
||||
|
||||
# NOTE: append get to command
|
||||
if cmd[-3:] != 'get':
|
||||
cmd += ' get'
|
||||
|
||||
# NOTE: parse output
|
||||
prop_hdr = []
|
||||
for prop_data in _exec(cmd=cmd)['stderr'].split('\n'):
|
||||
# NOTE: make the line data more managable
|
||||
prop_data = prop_data.lower().split()
|
||||
|
||||
# NOTE: skip empty lines
|
||||
if not prop_data:
|
||||
continue
|
||||
# NOTE: parse header
|
||||
elif prop_data[0] == 'property':
|
||||
prop_hdr = prop_data
|
||||
continue
|
||||
# NOTE: skip lines after data
|
||||
elif not prop_hdr or prop_data[1] not in ['no', 'yes']:
|
||||
continue
|
||||
|
||||
# NOTE: create property dict
|
||||
prop = _property_create_dict(prop_hdr, prop_data)
|
||||
|
||||
# NOTE: add property to dict
|
||||
properties[prop['name']] = prop
|
||||
if prop['name'] in alias:
|
||||
properties[alias[prop['name']]] = prop
|
||||
|
||||
# NOTE: cleanup some duplicate data
|
||||
del prop['name']
|
||||
return properties
|
||||
|
||||
|
||||
def _auto(direction, name, value, source='auto', convert_to_human=True):
|
||||
'''
|
||||
Internal magic for from_auto and to_auto
|
||||
'''
|
||||
# NOTE: check direction
|
||||
if direction not in ['to', 'from']:
|
||||
return value
|
||||
|
||||
# NOTE: collect property data
|
||||
props = property_data_zpool()
|
||||
if source == 'zfs':
|
||||
props = property_data_zfs()
|
||||
elif source == 'auto':
|
||||
props.update(property_data_zfs())
|
||||
|
||||
# NOTE: figure out the conversion type
|
||||
value_type = props[name]['type'] if name in props else 'str'
|
||||
|
||||
# NOTE: convert
|
||||
if value_type == 'size' and direction == 'to':
|
||||
return globals()['{}_{}'.format(direction, value_type)](value, convert_to_human)
|
||||
|
||||
return globals()['{}_{}'.format(direction, value_type)](value)
|
||||
|
||||
|
||||
@real_memoize
|
||||
def _zfs_cmd():
|
||||
'''
|
||||
Return the path of the zfs binary if present
|
||||
'''
|
||||
# Get the path to the zfs binary.
|
||||
return salt.utils.path.which('zfs')
|
||||
|
||||
|
||||
@real_memoize
|
||||
def _zpool_cmd():
|
||||
'''
|
||||
Return the path of the zpool binary if present
|
||||
'''
|
||||
# Get the path to the zfs binary.
|
||||
return salt.utils.path.which('zpool')
|
||||
|
||||
|
||||
def _command(source, command, flags=None, opts=None,
|
||||
property_name=None, property_value=None,
|
||||
filesystem_properties=None, pool_properties=None,
|
||||
target=None):
|
||||
'''
|
||||
Build and properly escape a zfs command
|
||||
|
||||
.. note::
|
||||
|
||||
Input is not considered safe and will be passed through
|
||||
to_auto(from_auto('input_here')), you do not need to do so
|
||||
your self first.
|
||||
|
||||
'''
|
||||
# NOTE: start with the zfs binary and command
|
||||
cmd = [_zpool_cmd() if source == 'zpool' else _zfs_cmd(), command]
|
||||
|
||||
# NOTE: append flags if we have any
|
||||
if flags is None:
|
||||
flags = []
|
||||
for flag in flags:
|
||||
cmd.append(flag)
|
||||
|
||||
# NOTE: append options
|
||||
# we pass through 'sorted' to garentee the same order
|
||||
if opts is None:
|
||||
opts = {}
|
||||
for opt in sorted(opts):
|
||||
if not isinstance(opts[opt], list):
|
||||
opts[opt] = [opts[opt]]
|
||||
for val in opts[opt]:
|
||||
cmd.append(opt)
|
||||
cmd.append(to_str(val))
|
||||
|
||||
# NOTE: append filesystem properties (really just options with a key/value)
|
||||
# we pass through 'sorted' to garentee the same order
|
||||
if filesystem_properties is None:
|
||||
filesystem_properties = {}
|
||||
for fsopt in sorted(filesystem_properties):
|
||||
cmd.append('-O' if source == 'zpool' else '-o')
|
||||
cmd.append('{key}={val}'.format(
|
||||
key=fsopt,
|
||||
val=to_auto(fsopt, filesystem_properties[fsopt], source='zfs', convert_to_human=False),
|
||||
))
|
||||
|
||||
# NOTE: append pool properties (really just options with a key/value)
|
||||
# we pass through 'sorted' to garentee the same order
|
||||
if pool_properties is None:
|
||||
pool_properties = {}
|
||||
for fsopt in sorted(pool_properties):
|
||||
cmd.append('-o')
|
||||
cmd.append('{key}={val}'.format(
|
||||
key=fsopt,
|
||||
val=to_auto(fsopt, pool_properties[fsopt], source='zpool', convert_to_human=False),
|
||||
))
|
||||
|
||||
# NOTE: append property and value
|
||||
# the set command takes a key=value pair, we need to support this
|
||||
if property_name is not None:
|
||||
if property_value is not None:
|
||||
if not isinstance(property_name, list):
|
||||
property_name = [property_name]
|
||||
if not isinstance(property_value, list):
|
||||
property_value = [property_value]
|
||||
for key, val in zip(property_name, property_value):
|
||||
cmd.append('{key}={val}'.format(
|
||||
key=key,
|
||||
val=to_auto(key, val, source=source, convert_to_human=False),
|
||||
))
|
||||
else:
|
||||
cmd.append(property_name)
|
||||
|
||||
# NOTE: append the target(s)
|
||||
if target is not None:
|
||||
if not isinstance(target, list):
|
||||
target = [target]
|
||||
for tgt in target:
|
||||
# NOTE: skip None list items
|
||||
# we do not want to skip False and 0!
|
||||
if tgt is None:
|
||||
continue
|
||||
cmd.append(to_str(tgt))
|
||||
|
||||
return ' '.join(cmd)
|
||||
|
||||
|
||||
@real_memoize
|
||||
def is_supported():
|
||||
'''
|
||||
Check the system for ZFS support
|
||||
'''
|
||||
# Check for supported platforms
|
||||
# NOTE: ZFS on Windows is in development
|
||||
# NOTE: ZFS on NetBSD is in development
|
||||
on_supported_platform = False
|
||||
if salt.utils.platform.is_sunos():
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'):
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'):
|
||||
on_supported_platform = True
|
||||
elif salt.utils.platform.is_darwin() and \
|
||||
os.path.exists('/Library/Extensions/zfs.kext') and \
|
||||
os.path.exists('/dev/zfs'):
|
||||
on_supported_platform = True
|
||||
|
||||
# Additional check for the zpool command
|
||||
return (_zpool_cmd() and on_supported_platform) is True
|
||||
|
||||
|
||||
@real_memoize
|
||||
def has_feature_flags():
|
||||
'''
|
||||
Check if zpool-features is available
|
||||
'''
|
||||
# get man location
|
||||
man = salt.utils.path.which('man')
|
||||
return _check_retcode('{man} zpool-features'.format(
|
||||
man=man
|
||||
)) if man else False
|
||||
|
||||
|
||||
@real_memoize
|
||||
def property_data_zpool():
|
||||
'''
|
||||
Return a dict of zpool properties
|
||||
|
||||
.. note::
|
||||
|
||||
Each property will have an entry with the following info:
|
||||
- edit : boolean - is this property editable after pool creation
|
||||
- type : str - either bool, bool_alt, size, numeric, or string
|
||||
- values : str - list of possible values
|
||||
|
||||
.. warning::
|
||||
|
||||
This data is probed from the output of 'zpool get' with some suplimental
|
||||
data that is hardcoded. There is no better way to get this informatio aside
|
||||
from reading the code.
|
||||
|
||||
'''
|
||||
# NOTE: man page also mentions a few short forms
|
||||
property_data = _property_parse_cmd(_zpool_cmd(), {
|
||||
'allocated': 'alloc',
|
||||
'autoexpand': 'expand',
|
||||
'autoreplace': 'replace',
|
||||
'listsnapshots': 'listsnaps',
|
||||
'fragmentation': 'frag',
|
||||
})
|
||||
|
||||
# NOTE: zpool status/iostat has a few extra fields
|
||||
zpool_size_extra = [
|
||||
'capacity-alloc', 'capacity-free',
|
||||
'operations-read', 'operations-write',
|
||||
'bandwith-read', 'bandwith-write',
|
||||
'read', 'write',
|
||||
]
|
||||
zpool_numeric_extra = [
|
||||
'cksum', 'cap',
|
||||
]
|
||||
|
||||
for prop in zpool_size_extra:
|
||||
property_data[prop] = {
|
||||
'edit': False,
|
||||
'type': 'size',
|
||||
'values': '<size>',
|
||||
}
|
||||
|
||||
for prop in zpool_numeric_extra:
|
||||
property_data[prop] = {
|
||||
'edit': False,
|
||||
'type': 'numeric',
|
||||
'values': '<count>',
|
||||
}
|
||||
|
||||
return property_data
|
||||
|
||||
|
||||
@real_memoize
|
||||
def property_data_zfs():
|
||||
'''
|
||||
Return a dict of zfs properties
|
||||
|
||||
.. note::
|
||||
|
||||
Each property will have an entry with the following info:
|
||||
- edit : boolean - is this property editable after pool creation
|
||||
- inherit : boolean - is this property inheritable
|
||||
- type : str - either bool, bool_alt, size, numeric, or string
|
||||
- values : str - list of possible values
|
||||
|
||||
.. warning::
|
||||
|
||||
This data is probed from the output of 'zfs get' with some suplimental
|
||||
data that is hardcoded. There is no better way to get this informatio aside
|
||||
from reading the code.
|
||||
|
||||
'''
|
||||
return _property_parse_cmd(_zfs_cmd(), {
|
||||
'available': 'avail',
|
||||
'logicalreferenced': 'lrefer.',
|
||||
'logicalused': 'lused.',
|
||||
'referenced': 'refer',
|
||||
'volblocksize': 'volblock',
|
||||
'compression': 'compress',
|
||||
'readonly': 'rdonly',
|
||||
'recordsize': 'recsize',
|
||||
'refreservation': 'refreserv',
|
||||
'reservation': 'reserv',
|
||||
})
|
||||
|
||||
|
||||
def from_numeric(value):
|
||||
'''
|
||||
Convert zfs numeric to python int
|
||||
'''
|
||||
if value == 'none':
|
||||
value = None
|
||||
elif value:
|
||||
value = str_to_num(value)
|
||||
return value
|
||||
|
||||
|
||||
def to_numeric(value):
|
||||
'''
|
||||
Convert python int to zfs numeric
|
||||
'''
|
||||
value = from_numeric(value)
|
||||
if value is None:
|
||||
value = 'none'
|
||||
return value
|
||||
|
||||
|
||||
def from_bool(value):
|
||||
'''
|
||||
Convert zfs bool to python bool
|
||||
'''
|
||||
if value in ['on', 'yes']:
|
||||
value = True
|
||||
elif value in ['off', 'no']:
|
||||
value = False
|
||||
elif value == 'none':
|
||||
value = None
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def from_bool_alt(value):
|
||||
'''
|
||||
Convert zfs bool_alt to python bool
|
||||
'''
|
||||
return from_bool(value)
|
||||
|
||||
|
||||
def to_bool(value):
|
||||
'''
|
||||
Convert python bool to zfs on/off bool
|
||||
'''
|
||||
value = from_bool(value)
|
||||
if isinstance(value, bool):
|
||||
value = 'on' if value else 'off'
|
||||
elif value is None:
|
||||
value = 'none'
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def to_bool_alt(value):
|
||||
'''
|
||||
Convert python to zfs yes/no value
|
||||
'''
|
||||
value = from_bool_alt(value)
|
||||
if isinstance(value, bool):
|
||||
value = 'yes' if value else 'no'
|
||||
elif value is None:
|
||||
value = 'none'
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def from_size(value):
|
||||
'''
|
||||
Convert zfs size (human readble) to python int (bytes)
|
||||
'''
|
||||
match_size = re_zfs_size.match(str(value))
|
||||
if match_size:
|
||||
v_unit = match_size.group(2).upper()[0]
|
||||
v_size = float(match_size.group(1))
|
||||
v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1)
|
||||
value = v_size * v_multiplier
|
||||
if int(value) == value:
|
||||
value = int(value)
|
||||
elif value is not None:
|
||||
value = str(value)
|
||||
|
||||
return from_numeric(value)
|
||||
|
||||
|
||||
def to_size(value, convert_to_human=True):
|
||||
'''
|
||||
Convert python int (bytes) to zfs size
|
||||
|
||||
NOTE: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/pyzfs/common/util.py#114
|
||||
'''
|
||||
value = from_size(value)
|
||||
if value is None:
|
||||
value = 'none'
|
||||
|
||||
if isinstance(value, Number) and value > 1024 and convert_to_human:
|
||||
v_power = int(math.floor(math.log(value, 1024)))
|
||||
v_multiplier = math.pow(1024, v_power)
|
||||
|
||||
# NOTE: zfs is a bit odd on how it does the rounding,
|
||||
# see libzfs implementation linked above
|
||||
v_size_float = float(value) / v_multiplier
|
||||
if v_size_float == int(v_size_float):
|
||||
value = "{:.0f}{}".format(
|
||||
v_size_float,
|
||||
zfs_size[v_power-1],
|
||||
)
|
||||
else:
|
||||
for v_precision in ["{:.2f}{}", "{:.1f}{}", "{:.0f}{}"]:
|
||||
v_size = v_precision.format(
|
||||
v_size_float,
|
||||
zfs_size[v_power-1],
|
||||
)
|
||||
if len(v_size) <= 5:
|
||||
value = v_size
|
||||
break
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def from_str(value):
|
||||
'''
|
||||
Decode zfs safe string (used for name, path, ...)
|
||||
'''
|
||||
if value == 'none':
|
||||
value = None
|
||||
if value:
|
||||
value = str(value)
|
||||
if value.startswith('"') and value.endswith('"'):
|
||||
value = value[1:-1]
|
||||
value = value.replace('\\"', '"')
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def to_str(value):
|
||||
'''
|
||||
Encode zfs safe string (used for name, path, ...)
|
||||
'''
|
||||
value = from_str(value)
|
||||
|
||||
if value:
|
||||
value = value.replace('"', '\\"')
|
||||
if ' ' in value:
|
||||
value = '"' + value + '"'
|
||||
elif value is None:
|
||||
value = 'none'
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def from_auto(name, value, source='auto'):
|
||||
'''
|
||||
Convert zfs value to python value
|
||||
'''
|
||||
return _auto('from', name, value, source)
|
||||
|
||||
|
||||
def to_auto(name, value, source='auto', convert_to_human=True):
|
||||
'''
|
||||
Convert python value to zfs value
|
||||
'''
|
||||
return _auto('to', name, value, source, convert_to_human)
|
||||
|
||||
|
||||
def from_auto_dict(values, source='auto'):
|
||||
'''
|
||||
Pass an entire dictionary to from_auto
|
||||
|
||||
.. note::
|
||||
The key will be passed as the name
|
||||
|
||||
'''
|
||||
for name, value in values.items():
|
||||
values[name] = from_auto(name, value, source)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def to_auto_dict(values, source='auto', convert_to_human=True):
|
||||
'''
|
||||
Pass an entire dictionary to to_auto
|
||||
|
||||
.. note::
|
||||
The key will be passed as the name
|
||||
'''
|
||||
for name, value in values.items():
|
||||
values[name] = to_auto(name, value, source, convert_to_human)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def is_snapshot(name):
|
||||
'''
|
||||
Check if name is a valid snapshot name
|
||||
'''
|
||||
return from_str(name).count('@') == 1
|
||||
|
||||
|
||||
def is_bookmark(name):
|
||||
'''
|
||||
Check if name is a valid bookmark name
|
||||
'''
|
||||
return from_str(name).count('#') == 1
|
||||
|
||||
|
||||
def is_dataset(name):
|
||||
'''
|
||||
Check if name is a valid filesystem or volume name
|
||||
'''
|
||||
return not is_snapshot(name) and not is_bookmark(name)
|
||||
|
||||
|
||||
def zfs_command(command, flags=None, opts=None, property_name=None, property_value=None,
|
||||
filesystem_properties=None, target=None):
|
||||
'''
|
||||
Build and properly escape a zfs command
|
||||
|
||||
.. note::
|
||||
|
||||
Input is not considered safe and will be passed through
|
||||
to_auto(from_auto('input_here')), you do not need to do so
|
||||
your self first.
|
||||
|
||||
'''
|
||||
return _command(
|
||||
'zfs',
|
||||
command=command,
|
||||
flags=flags,
|
||||
opts=opts,
|
||||
property_name=property_name,
|
||||
property_value=property_value,
|
||||
filesystem_properties=filesystem_properties,
|
||||
pool_properties=None,
|
||||
target=target,
|
||||
)
|
||||
|
||||
|
||||
def zpool_command(command, flags=None, opts=None, property_name=None, property_value=None,
|
||||
filesystem_properties=None, pool_properties=None, target=None):
|
||||
'''
|
||||
Build and properly escape a zpool command
|
||||
|
||||
.. note::
|
||||
|
||||
Input is not considered safe and will be passed through
|
||||
to_auto(from_auto('input_here')), you do not need to do so
|
||||
your self first.
|
||||
|
||||
'''
|
||||
return _command(
|
||||
'zpool',
|
||||
command=command,
|
||||
flags=flags,
|
||||
opts=opts,
|
||||
property_name=property_name,
|
||||
property_value=property_value,
|
||||
filesystem_properties=filesystem_properties,
|
||||
pool_properties=pool_properties,
|
||||
target=target,
|
||||
)
|
||||
|
||||
|
||||
def parse_command_result(res, label=None):
|
||||
'''
|
||||
Parse the result of a zpool/zfs command
|
||||
|
||||
.. note::
|
||||
|
||||
Output on failure is rather predicatable.
|
||||
- retcode > 0
|
||||
- each 'error' is a line on stderr
|
||||
- optional 'Usage:' block under those with hits
|
||||
|
||||
We simple check those and return a OrderedDict were
|
||||
we set label = True|False and error = error_messages
|
||||
|
||||
'''
|
||||
ret = OrderedDict()
|
||||
|
||||
if label:
|
||||
ret[label] = res['retcode'] == 0
|
||||
|
||||
if res['retcode'] != 0:
|
||||
ret['error'] = []
|
||||
for error in res['stderr'].splitlines():
|
||||
if error.lower().startswith('usage:'):
|
||||
break
|
||||
if error.lower().startswith("use '-f'"):
|
||||
error = error.replace('-f', 'force=True')
|
||||
if error.lower().startswith("use '-r'"):
|
||||
error = error.replace('-r', 'recursive=True')
|
||||
ret['error'].append(error)
|
||||
|
||||
if ret['error']:
|
||||
ret['error'] = "\n".join(ret['error'])
|
||||
else:
|
||||
del ret['error']
|
||||
|
||||
return ret
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
@ -1,9 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>`
|
||||
Tests for salt.modules.zfs
|
||||
|
||||
tests.unit.modules.zfs_test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>, Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: salt.utils.zfs
|
||||
:platform: illumos,freebsd,linux
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
|
@ -19,9 +22,17 @@ from tests.support.mock import (
|
|||
NO_MOCK_REASON,
|
||||
)
|
||||
|
||||
# Import test data from salt.utils.zfs test
|
||||
from tests.unit.utils.test_zfs import utils_patch
|
||||
|
||||
# Import Salt Execution module to test
|
||||
import salt.utils.zfs
|
||||
import salt.modules.zfs as zfs
|
||||
|
||||
# Import Salt Utils
|
||||
import salt.loader
|
||||
from salt.utils.odict import OrderedDict
|
||||
from salt.utils.dateutils import strftime
|
||||
|
||||
|
||||
# Skip this test case if we don't have access to mock!
|
||||
|
@ -31,10 +42,16 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
This class contains a set of functions that test salt.modules.zfs module
|
||||
'''
|
||||
def setup_loader_modules(self):
|
||||
patcher = patch('salt.modules.zfs._check_zfs', MagicMock(return_value='/sbin/zfs'))
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
return {zfs: {}}
|
||||
self.opts = opts = salt.config.DEFAULT_MINION_OPTS
|
||||
utils = salt.loader.utils(opts, whitelist=['zfs'])
|
||||
zfs_obj = {
|
||||
zfs: {
|
||||
'__opts__': opts,
|
||||
'__utils__': utils,
|
||||
}
|
||||
}
|
||||
|
||||
return zfs_obj
|
||||
|
||||
def test_exists_success(self):
|
||||
'''
|
||||
|
@ -45,7 +62,8 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ''
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertTrue(zfs.exists('myzpool/mydataset'))
|
||||
|
||||
def test_exists_failure_not_exists(self):
|
||||
|
@ -57,7 +75,8 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertFalse(zfs.exists('myzpool/mydataset'))
|
||||
|
||||
def test_exists_failure_invalid_name(self):
|
||||
|
@ -69,231 +88,488 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = "cannot open 'myzpool/': invalid dataset name"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertFalse(zfs.exists('myzpool/'))
|
||||
|
||||
def test_create_success(self):
|
||||
'''
|
||||
Tests successful return of create function on ZFS file system creation
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'created'}
|
||||
res = OrderedDict([('created', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool/mydataset'))
|
||||
|
||||
def test_create_success_with_create_parent(self):
|
||||
'''
|
||||
Tests successful return of create function when ``create_parent=True``
|
||||
'''
|
||||
res = {'myzpool/mydataset/mysubdataset': 'created'}
|
||||
res = OrderedDict([('created', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset', create_parent=True), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool/mydataset/mysubdataset', create_parent=True))
|
||||
|
||||
def test_create_success_with_properties(self):
|
||||
'''
|
||||
Tests successful return of create function on ZFS file system creation (with properties)
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'created'}
|
||||
res = OrderedDict([('created', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(
|
||||
res,
|
||||
zfs.create(
|
||||
'myzpool/mydataset',
|
||||
properties={
|
||||
'mountpoint': '/export/zfs',
|
||||
'sharenfs': 'on'
|
||||
}
|
||||
), res
|
||||
),
|
||||
)
|
||||
|
||||
def test_create_error_missing_dataset(self):
|
||||
'''
|
||||
Tests unsuccessful return of create function if dataset name is missing
|
||||
'''
|
||||
res = {'myzpool': 'cannot create \'myzpool\': missing dataset name'}
|
||||
res = OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool': missing dataset name"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot create 'myzpool': missing dataset name"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool'))
|
||||
|
||||
def test_create_error_trailing_slash(self):
|
||||
'''
|
||||
Tests unsuccessful return of create function if trailing slash in name is present
|
||||
'''
|
||||
res = {'myzpool/': 'cannot create \'myzpool/\': trailing slash in name'}
|
||||
res = OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool/': trailing slash in name"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot create 'myzpool/': trailing slash in name"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool/'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool/'))
|
||||
|
||||
def test_create_error_no_such_pool(self):
|
||||
'''
|
||||
Tests unsuccessful return of create function if the pool is not present
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'cannot create \'myzpool/mydataset\': no such pool \'myzpool\''}
|
||||
res = OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool/mydataset': no such pool 'myzpool'"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot create 'myzpool/mydataset': no such pool 'myzpool'"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool/mydataset'))
|
||||
|
||||
def test_create_error_missing_parent(self):
|
||||
'''
|
||||
Tests unsuccessful return of create function if the parent datasets do not exist
|
||||
'''
|
||||
res = {'myzpool/mydataset/mysubdataset': 'cannot create \'myzpool/mydataset/mysubdataset\': parent does not exist'}
|
||||
res = OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool/mydataset/mysubdataset': parent does not exist"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot create 'myzpool/mydataset/mysubdataset': parent does not exist"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.create('myzpool/mydataset/mysubdataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.create('myzpool/mydataset/mysubdataset'))
|
||||
|
||||
def test_destroy_success(self):
|
||||
'''
|
||||
Tests successful return of destroy function on ZFS file system destruction
|
||||
'''
|
||||
res = OrderedDict([('destroyed', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.destroy('myzpool/mydataset'))
|
||||
|
||||
def test_destroy_error_not_exists(self):
|
||||
'''
|
||||
Tests failure return of destroy function on ZFS file system destruction
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('destroyed', False),
|
||||
('error', "cannot open 'myzpool/mydataset': dataset does not exist"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.destroy('myzpool/mydataset'))
|
||||
|
||||
def test_destroy_error_has_children(self):
|
||||
'''
|
||||
Tests failure return of destroy function on ZFS file system destruction
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('destroyed', False),
|
||||
('error', "\n".join([
|
||||
"cannot destroy 'myzpool/mydataset': filesystem has children",
|
||||
"use 'recursive=True' to destroy the following datasets:",
|
||||
"myzpool/mydataset@snapshot",
|
||||
])),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "\n".join([
|
||||
"cannot destroy 'myzpool/mydataset': filesystem has children",
|
||||
"use '-r' to destroy the following datasets:",
|
||||
"myzpool/mydataset@snapshot",
|
||||
])
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.destroy('myzpool/mydataset'))
|
||||
|
||||
def test_rename_success(self):
|
||||
'''
|
||||
Tests successful return of rename function
|
||||
'''
|
||||
res = OrderedDict([('renamed', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.rename('myzpool/mydataset', 'myzpool/newdataset'))
|
||||
|
||||
def test_rename_error_not_exists(self):
|
||||
'''
|
||||
Tests failure return of rename function
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('renamed', False),
|
||||
('error', "cannot open 'myzpool/mydataset': dataset does not exist"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'myzpool/mydataset': dataset does not exist"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.rename('myzpool/mydataset', 'myzpool/newdataset'))
|
||||
|
||||
def test_list_success(self):
|
||||
'''
|
||||
Tests zfs list
|
||||
'''
|
||||
res = OrderedDict([('myzpool', {'avail': '954G', 'mountpoint': '/myzpool', 'used': '844G', 'refer': '96K'})])
|
||||
ret = {'pid': 31817, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\t844G\t954G\t96K\t/myzpool'}
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('used', 849329782784),
|
||||
('avail', 1081258016768),
|
||||
('refer', 98304),
|
||||
('mountpoint', '/myzpool'),
|
||||
])),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = 'myzpool\t791G\t1007G\t96K\t/myzpool'
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.list_('myzpool'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_('myzpool'))
|
||||
|
||||
def test_list_parsable_success(self):
|
||||
'''
|
||||
Tests zfs list with parsable output
|
||||
Tests zfs list with parsable set to False
|
||||
'''
|
||||
res = OrderedDict([('myzpool', {'avail': 1024795238400, 'mountpoint': '/myzpool', 'used': 905792561152, 'refer': 98304})])
|
||||
ret = {'pid': 31817, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\t905792561152\t1024795238400\t98304\t/myzpool'}
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('used', '791G'),
|
||||
('avail', '1007G'),
|
||||
('refer', '96K'),
|
||||
('mountpoint', '/myzpool'),
|
||||
])),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = 'myzpool\t791G\t1007G\t96K\t/myzpool'
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.list_('myzpool', parsable=True), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_('myzpool', parsable=False))
|
||||
|
||||
def test_list_custom_success(self):
|
||||
'''
|
||||
Tests zfs list
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('canmount', True),
|
||||
('used', 849329782784),
|
||||
('avail', 1081258016768),
|
||||
('compression', False),
|
||||
])),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = 'myzpool\ton\t791G\t1007G\toff'
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_('myzpool', properties='canmount,used,avail,compression'))
|
||||
|
||||
def test_list_custom_parsable_success(self):
|
||||
'''
|
||||
Tests zfs list
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('canmount', 'on'),
|
||||
('used', '791G'),
|
||||
('avail', '1007G'),
|
||||
('compression', 'off'),
|
||||
])),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = 'myzpool\ton\t791G\t1007G\toff'
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_('myzpool', properties='canmount,used,avail,compression', parsable=False))
|
||||
|
||||
def test_list_error_no_dataset(self):
|
||||
'''
|
||||
Tests zfs list
|
||||
'''
|
||||
res = OrderedDict()
|
||||
ret = {}
|
||||
ret['retcode'] = 1
|
||||
ret['stdout'] = "cannot open 'myzpool': dataset does not exist"
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_('myzpool'))
|
||||
|
||||
def test_list_mount_success(self):
|
||||
'''
|
||||
Tests zfs list_mount
|
||||
'''
|
||||
res = OrderedDict([
|
||||
('myzpool/data', '/data'),
|
||||
('myzpool/data/ares', '/data/ares'),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = "\n".join([
|
||||
"myzpool/data\t\t\t\t/data",
|
||||
"myzpool/data/ares\t\t\t/data/ares",
|
||||
])
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.list_mount())
|
||||
|
||||
def test_mount_success(self):
|
||||
'''
|
||||
Tests zfs mount of filesystem
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'mounted'}
|
||||
res = OrderedDict([('mounted', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.mount('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.mount('myzpool/mydataset'))
|
||||
|
||||
def test_mount_failure(self):
|
||||
'''
|
||||
Tests zfs mount of already mounted filesystem
|
||||
'''
|
||||
res = {'myzpool/mydataset': "cannot mount 'myzpool/mydataset': filesystem already mounted"}
|
||||
res = OrderedDict([('mounted', False), ('error', "cannot mount 'myzpool/mydataset': filesystem already mounted")])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot mount 'myzpool/mydataset': filesystem already mounted"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.mount('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.mount('myzpool/mydataset'))
|
||||
|
||||
def test_unmount_success(self):
|
||||
'''
|
||||
Tests zfs unmount of filesystem
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'unmounted'}
|
||||
res = OrderedDict([('unmounted', True)])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.unmount('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.unmount('myzpool/mydataset'))
|
||||
|
||||
def test_unmount_failure(self):
|
||||
'''
|
||||
Tests zfs unmount of already mounted filesystem
|
||||
'''
|
||||
res = {'myzpool/mydataset': "cannot mount 'myzpool/mydataset': not currently mounted"}
|
||||
res = OrderedDict([
|
||||
('unmounted', False),
|
||||
('error', "cannot mount 'myzpool/mydataset': not currently mounted"),
|
||||
])
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot mount 'myzpool/mydataset': not currently mounted"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.unmount('myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.unmount('myzpool/mydataset'))
|
||||
|
||||
def test_inherit_success(self):
|
||||
'''
|
||||
Tests zfs inherit of compression property
|
||||
'''
|
||||
res = {'myzpool/mydataset': {'compression': 'cleared'}}
|
||||
res = OrderedDict([('inherited', True)])
|
||||
ret = {'pid': 45193, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.inherit('compression', 'myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.inherit('compression', 'myzpool/mydataset'))
|
||||
|
||||
def test_inherit_failure(self):
|
||||
'''
|
||||
Tests zfs inherit of canmount
|
||||
'''
|
||||
res = {
|
||||
'myzpool/mydataset': {
|
||||
'canmount': "'canmount' property cannot be inherited, use revert=True to try and reset it to it's default value."
|
||||
}
|
||||
}
|
||||
res = OrderedDict([
|
||||
('inherited', False),
|
||||
('error', "'canmount' property cannot be inherited"),
|
||||
])
|
||||
ret = {'pid': 43898, 'retcode': 1, 'stderr': "'canmount' property cannot be inherited", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.inherit('canmount', 'myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.inherit('canmount', 'myzpool/mydataset'))
|
||||
|
||||
def test_diff(self):
|
||||
'''
|
||||
Tests zfs diff
|
||||
'''
|
||||
res = ['M\t/\t/myzpool/mydataset/', '+\tF\t/myzpool/mydataset/hello']
|
||||
ret = {'pid': 51495, 'retcode': 0, 'stderr': '', 'stdout': 'M\t/\t/myzpool/mydataset/\n+\tF\t/myzpool/mydataset/hello'}
|
||||
res = [
|
||||
"1517063879.144517494\tM\t\t/data/test/",
|
||||
"1517063875.296592355\t+\t\t/data/test/world",
|
||||
"1517063879.274438467\t+\t\t/data/test/hello",
|
||||
]
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = "\n".join([
|
||||
"1517063879.144517494\tM\t\t/data/test/",
|
||||
"1517063875.296592355\t+\t\t/data/test/world",
|
||||
"1517063879.274438467\t+\t\t/data/test/hello",
|
||||
])
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.diff('myzpool/mydataset@yesterday', 'myzpool/mydataset'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.diff('myzpool/mydataset@yesterday', 'myzpool/mydataset'))
|
||||
|
||||
def test_diff_parsed_time(self):
|
||||
'''
|
||||
Tests zfs diff
|
||||
'''
|
||||
## NOTE: do not hardcode parsed timestamps, timezone play a role here.
|
||||
## zfs diff output seems to be timezone aware
|
||||
res = OrderedDict([
|
||||
(strftime(1517063879.144517494, '%Y-%m-%d.%H:%M:%S.%f'), 'M\t\t/data/test/'),
|
||||
(strftime(1517063875.296592355, '%Y-%m-%d.%H:%M:%S.%f'), '+\t\t/data/test/world'),
|
||||
(strftime(1517063879.274438467, '%Y-%m-%d.%H:%M:%S.%f'), '+\t\t/data/test/hello'),
|
||||
])
|
||||
ret = {}
|
||||
ret['retcode'] = 0
|
||||
ret['stdout'] = "\n".join([
|
||||
"1517063879.144517494\tM\t\t/data/test/",
|
||||
"1517063875.296592355\t+\t\t/data/test/world",
|
||||
"1517063879.274438467\t+\t\t/data/test/hello",
|
||||
])
|
||||
ret['stderr'] = ''
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.diff('myzpool/data@yesterday', 'myzpool/data', parsable=False))
|
||||
|
||||
def test_rollback_success(self):
|
||||
'''
|
||||
Tests zfs rollback success
|
||||
'''
|
||||
res = {'myzpool/mydataset': 'rolledback to snapshot: yesterday'}
|
||||
res = OrderedDict([('rolledback', True)])
|
||||
ret = {'pid': 56502, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.rollback('myzpool/mydataset@yesterday'))
|
||||
|
||||
def test_rollback_failure(self):
|
||||
'''
|
||||
Tests zfs rollback failure
|
||||
'''
|
||||
res = {'myzpool/mydataset': "cannot rollback to 'myzpool/mydataset@yesterday': more recent snapshots "
|
||||
"or bookmarks exist\nuse '-r' to force deletion of the following snapshots "
|
||||
"and bookmarks:\nmyzpool/mydataset@today"}
|
||||
res = OrderedDict([
|
||||
('rolledback', False),
|
||||
('error', "\n".join([
|
||||
"cannot rollback to 'myzpool/mydataset@yesterday': more recent snapshots or bookmarks exist",
|
||||
"use 'recursive=True' to force deletion of the following snapshots and bookmarks:",
|
||||
"myzpool/mydataset@today"
|
||||
]),
|
||||
),
|
||||
])
|
||||
ret = {
|
||||
'pid': 57471,
|
||||
'retcode': 1,
|
||||
|
@ -303,186 +579,243 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'stdout': ''
|
||||
}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.rollback('myzpool/mydataset@yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.rollback('myzpool/mydataset@yesterday'))
|
||||
|
||||
def test_clone_success(self):
|
||||
'''
|
||||
Tests zfs clone success
|
||||
'''
|
||||
res = {'myzpool/yesterday': 'cloned from myzpool/mydataset@yesterday'}
|
||||
res = OrderedDict([('cloned', True)])
|
||||
ret = {'pid': 64532, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.clone('myzpool/mydataset@yesterday', 'myzpool/yesterday'))
|
||||
|
||||
def test_clone_failure(self):
|
||||
'''
|
||||
Tests zfs clone failure
|
||||
'''
|
||||
res = {'myzpool/archive/yesterday': "cannot create 'myzpool/archive/yesterday': parent does not exist"}
|
||||
res = OrderedDict([
|
||||
('cloned', False),
|
||||
('error', "cannot create 'myzpool/archive/yesterday': parent does not exist"),
|
||||
])
|
||||
ret = {'pid': 64864, 'retcode': 1, 'stderr': "cannot create 'myzpool/archive/yesterday': parent does not exist", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.clone('myzpool/mydataset@yesterday', 'myzpool/archive/yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.clone('myzpool/mydataset@yesterday', 'myzpool/archive/yesterday'))
|
||||
|
||||
def test_promote_success(self):
|
||||
'''
|
||||
Tests zfs promote success
|
||||
'''
|
||||
res = {'myzpool/yesterday': 'promoted'}
|
||||
res = OrderedDict([('promoted', True)])
|
||||
ret = {'pid': 69075, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.promote('myzpool/yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.promote('myzpool/yesterday'))
|
||||
|
||||
def test_promote_failure(self):
|
||||
'''
|
||||
Tests zfs promote failure
|
||||
'''
|
||||
res = {'myzpool/yesterday': "cannot promote 'myzpool/yesterday': not a cloned filesystem"}
|
||||
res = OrderedDict([
|
||||
('promoted', False),
|
||||
('error', "cannot promote 'myzpool/yesterday': not a cloned filesystem"),
|
||||
])
|
||||
ret = {'pid': 69209, 'retcode': 1, 'stderr': "cannot promote 'myzpool/yesterday': not a cloned filesystem", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.promote('myzpool/yesterday'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.promote('myzpool/yesterday'))
|
||||
|
||||
def test_bookmark_success(self):
|
||||
'''
|
||||
Tests zfs bookmark success
|
||||
'''
|
||||
with patch('salt.utils.path.which', MagicMock(return_value='/usr/bin/man')):
|
||||
res = {'myzpool/mydataset@yesterday': 'bookmarked as myzpool/mydataset#important'}
|
||||
res = OrderedDict([('bookmarked', True)])
|
||||
ret = {'pid': 20990, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.bookmark('myzpool/mydataset@yesterday', 'myzpool/mydataset#important'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.bookmark('myzpool/mydataset@yesterday', 'myzpool/mydataset#important'))
|
||||
|
||||
def test_holds_success(self):
|
||||
'''
|
||||
Tests zfs holds success
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': {'important ': 'Wed Dec 23 21:06 2015', 'release-1.0': 'Wed Dec 23 21:08 2015'}}
|
||||
res = OrderedDict([
|
||||
('important', 'Wed Dec 23 21:06 2015'),
|
||||
('release-1.0', 'Wed Dec 23 21:08 2015'),
|
||||
])
|
||||
ret = {'pid': 40216, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool/mydataset@baseline\timportant \tWed Dec 23 21:06 2015\nmyzpool/mydataset@baseline\trelease-1.0\tWed Dec 23 21:08 2015'}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.holds('myzpool/mydataset@baseline'))
|
||||
|
||||
def test_holds_failure(self):
|
||||
'''
|
||||
Tests zfs holds failure
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': "cannot open 'myzpool/mydataset@baseline': dataset does not exist"}
|
||||
res = OrderedDict([
|
||||
('error', "cannot open 'myzpool/mydataset@baseline': dataset does not exist"),
|
||||
])
|
||||
ret = {'pid': 40993, 'retcode': 1, 'stderr': "cannot open 'myzpool/mydataset@baseline': dataset does not exist", 'stdout': 'no datasets available'}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.holds('myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.holds('myzpool/mydataset@baseline'))
|
||||
|
||||
def test_hold_success(self):
|
||||
'''
|
||||
Tests zfs hold success
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': {'important': 'held'}, 'myzpool/mydataset@release-1.0': {'important': 'held'}}
|
||||
res = OrderedDict([('held', True)])
|
||||
ret = {'pid': 50876, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.hold('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'))
|
||||
|
||||
def test_hold_failure(self):
|
||||
'''
|
||||
Tests zfs hold failure
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': {'important': 'tag already exists on this dataset'}}
|
||||
res = OrderedDict([
|
||||
('held', False),
|
||||
('error', "cannot hold snapshot 'myzpool/mydataset@baseline': tag already exists on this dataset"),
|
||||
])
|
||||
ret = {'pid': 51006, 'retcode': 1, 'stderr': "cannot hold snapshot 'myzpool/mydataset@baseline': tag already exists on this dataset", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.hold('important', 'myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.hold('important', 'myzpool/mydataset@baseline'))
|
||||
|
||||
def test_release_success(self):
|
||||
'''
|
||||
Tests zfs release success
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': {'important': 'released'}, 'myzpool/mydataset@release-1.0': {'important': 'released'}}
|
||||
res = OrderedDict([('released', True)])
|
||||
ret = {'pid': 50876, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.release('important', 'myzpool/mydataset@baseline', 'myzpool/mydataset@release-1.0'))
|
||||
|
||||
def test_release_failure(self):
|
||||
'''
|
||||
Tests zfs release failure
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': {'important': 'no such tag on this dataset'}}
|
||||
res = OrderedDict([
|
||||
('released', False),
|
||||
('error', "cannot release hold from snapshot 'myzpool/mydataset@baseline': no such tag on this dataset"),
|
||||
])
|
||||
ret = {'pid': 51006, 'retcode': 1, 'stderr': "cannot release hold from snapshot 'myzpool/mydataset@baseline': no such tag on this dataset", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.release('important', 'myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.release('important', 'myzpool/mydataset@baseline'))
|
||||
|
||||
def test_snapshot_success(self):
|
||||
'''
|
||||
Tests zfs snapshot success
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': 'snapshotted'}
|
||||
res = OrderedDict([('snapshotted', True)])
|
||||
ret = {'pid': 69125, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline'))
|
||||
|
||||
def test_snapshot_failure(self):
|
||||
'''
|
||||
Tests zfs snapshot failure
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': 'dataset already exists'}
|
||||
res = OrderedDict([
|
||||
('snapshotted', False),
|
||||
('error', "cannot create snapshot 'myzpool/mydataset@baseline': dataset already exists"),
|
||||
])
|
||||
ret = {'pid': 68526, 'retcode': 1, 'stderr': "cannot create snapshot 'myzpool/mydataset@baseline': dataset already exists", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline'))
|
||||
|
||||
def test_snapshot_failure2(self):
|
||||
'''
|
||||
Tests zfs snapshot failure
|
||||
'''
|
||||
res = {'myzpool/mydataset@baseline': 'dataset does not exist'}
|
||||
res = OrderedDict([
|
||||
('snapshotted', False),
|
||||
('error', "cannot open 'myzpool/mydataset': dataset does not exist"),
|
||||
])
|
||||
ret = {'pid': 69256, 'retcode': 2, 'stderr': "cannot open 'myzpool/mydataset': dataset does not exist\nusage:\n\tsnapshot [-r] [-o property=value] ... <filesystem|volume>@<snap> ...\n\nFor the property list, run: zfs set|get\n\nFor the delegated permission list, run: zfs allow|unallow", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.snapshot('myzpool/mydataset@baseline'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.snapshot('myzpool/mydataset@baseline'))
|
||||
|
||||
def test_set_success(self):
|
||||
'''
|
||||
Tests zfs set success
|
||||
'''
|
||||
res = {'myzpool/mydataset': {'compression': 'set'}}
|
||||
res = OrderedDict([('set', True)])
|
||||
ret = {'pid': 79736, 'retcode': 0, 'stderr': '', 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.set('myzpool/mydataset', compression='lz4'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.set('myzpool/mydataset', compression='lz4'))
|
||||
|
||||
def test_set_failure(self):
|
||||
'''
|
||||
Tests zfs set failure
|
||||
'''
|
||||
res = {'myzpool/mydataset': {'canmount': "'canmount' must be one of 'on | off | noauto'"}}
|
||||
res = OrderedDict([
|
||||
('set', False),
|
||||
('error', "cannot set property for 'myzpool/mydataset': 'canmount' must be one of 'on | off | noauto'"),
|
||||
])
|
||||
ret = {'pid': 79887, 'retcode': 1, 'stderr': "cannot set property for 'myzpool/mydataset': 'canmount' must be one of 'on | off | noauto'", 'stdout': ''}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.set('myzpool/mydataset', canmount='lz4'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.set('myzpool/mydataset', canmount='lz4'))
|
||||
|
||||
def test_get_success(self):
|
||||
'''
|
||||
Tests zfs get success
|
||||
'''
|
||||
res = OrderedDict([('myzpool', {'used': {'value': '844G'}})])
|
||||
ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t844G'}
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('used', OrderedDict([
|
||||
('value', 906238099456),
|
||||
])),
|
||||
])),
|
||||
])
|
||||
ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t906238099456'}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.get('myzpool', properties='used', fields='value'), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.get('myzpool', properties='used', fields='value'))
|
||||
|
||||
def test_get_parsable_success(self):
|
||||
'''
|
||||
Tests zfs get with parsable output
|
||||
'''
|
||||
res = OrderedDict([('myzpool', {'used': {'value': 905792561152}})])
|
||||
ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t905792561152'}
|
||||
res = OrderedDict([
|
||||
('myzpool', OrderedDict([
|
||||
('used', OrderedDict([
|
||||
('value', '844G'),
|
||||
])),
|
||||
])),
|
||||
])
|
||||
ret = {'pid': 562, 'retcode': 0, 'stderr': '', 'stdout': 'myzpool\tused\t906238099456'}
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
self.assertEqual(zfs.get('myzpool', properties='used', fields='value', parsable=True), res)
|
||||
with patch.dict(zfs.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(res, zfs.get('myzpool', properties='used', fields='value', parsable=False))
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>`
|
||||
Tests for salt.modules.zpool
|
||||
|
||||
tests.unit.modules.zpool_test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>, Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: salt.utils.zfs
|
||||
:platform: illumos,freebsd,linux
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
|
@ -13,18 +16,24 @@ from __future__ import absolute_import, print_function, unicode_literals
|
|||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
Mock,
|
||||
MagicMock,
|
||||
patch,
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
)
|
||||
|
||||
# Import test data from salt.utils.zfs test
|
||||
from tests.unit.utils.test_zfs import utils_patch
|
||||
|
||||
# Import Salt Execution module to test
|
||||
import salt.utils.zfs
|
||||
import salt.modules.zpool as zpool
|
||||
|
||||
# Import Salt Utils
|
||||
import salt.loader
|
||||
from salt.utils.odict import OrderedDict
|
||||
import salt.utils.decorators
|
||||
import salt.utils.decorators.path
|
||||
|
||||
|
||||
# Skip this test case if we don't have access to mock!
|
||||
|
@ -34,11 +43,16 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
This class contains a set of functions that test salt.modules.zpool module
|
||||
'''
|
||||
def setup_loader_modules(self):
|
||||
patcher = patch('salt.modules.zpool._check_zpool',
|
||||
MagicMock(return_value='/sbin/zpool'))
|
||||
patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
return {zpool: {}}
|
||||
self.opts = opts = salt.config.DEFAULT_MINION_OPTS
|
||||
utils = salt.loader.utils(opts, whitelist=['zfs'])
|
||||
zpool_obj = {
|
||||
zpool: {
|
||||
'__opts__': opts,
|
||||
'__utils__': utils,
|
||||
}
|
||||
}
|
||||
|
||||
return zpool_obj
|
||||
|
||||
def test_exists_success(self):
|
||||
'''
|
||||
|
@ -50,7 +64,8 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertTrue(zpool.exists('myzpool'))
|
||||
|
||||
def test_exists_failure(self):
|
||||
|
@ -62,7 +77,9 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = "cannot open 'myzpool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertFalse(zpool.exists('myzpool'))
|
||||
|
||||
def test_healthy(self):
|
||||
|
@ -74,7 +91,9 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertTrue(zpool.healthy())
|
||||
|
||||
def test_status(self):
|
||||
|
@ -88,18 +107,19 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
" scan: scrub repaired 0 in 0h6m with 0 errors on Mon Dec 21 02:06:17 2015",
|
||||
"config:",
|
||||
"",
|
||||
" NAME STATE READ WRITE CKSUM",
|
||||
" mypool ONLINE 0 0 0",
|
||||
" mirror-0 ONLINE 0 0 0",
|
||||
" c2t0d0 ONLINE 0 0 0",
|
||||
" c2t1d0 ONLINE 0 0 0",
|
||||
"\tNAME STATE READ WRITE CKSUM",
|
||||
"\tmypool ONLINE 0 0 0",
|
||||
"\t mirror-0 ONLINE 0 0 0",
|
||||
"\t c2t0d0 ONLINE 0 0 0",
|
||||
"\t c2t1d0 ONLINE 0 0 0",
|
||||
"",
|
||||
"errors: No known data errors",
|
||||
])
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.status()
|
||||
self.assertEqual('ONLINE', ret['mypool']['state'])
|
||||
|
||||
|
@ -121,44 +141,82 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.iostat('mypool')
|
||||
self.assertEqual('46.7G', ret['mypool']['mypool']['capacity-alloc'])
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.iostat('mypool', parsable=False)
|
||||
self.assertEqual('46.7G', ret['mypool']['capacity-alloc'])
|
||||
|
||||
def test_iostat_parsable(self):
|
||||
'''
|
||||
Tests successful return of iostat function
|
||||
|
||||
.. note:
|
||||
The command output is the same as the non parsable!
|
||||
There is no -p flag for zpool iostat, but our type
|
||||
conversions can handle this!
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = "\n".join([
|
||||
" capacity operations bandwidth",
|
||||
"pool alloc free read write read write",
|
||||
"---------- ----- ----- ----- ----- ----- -----",
|
||||
"mypool 46.7G 64.3G 4 19 113K 331K",
|
||||
" mirror 46.7G 64.3G 4 19 113K 331K",
|
||||
" c2t0d0 - - 1 10 114K 334K",
|
||||
" c2t1d0 - - 1 10 114K 334K",
|
||||
"---------- ----- ----- ----- ----- ----- -----",
|
||||
])
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.iostat('mypool', parsable=True)
|
||||
self.assertEqual(50143743180, ret['mypool']['capacity-alloc'])
|
||||
|
||||
def test_list(self):
|
||||
'''
|
||||
Tests successful return of list function
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = "mypool\t1.81T\t714G\t1.11T\t38%\tONLINE"
|
||||
ret['stdout'] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch('salt.modules.zpool._check_features',
|
||||
MagicMock(return_value=False)):
|
||||
ret = zpool.list_()
|
||||
res = OrderedDict([('mypool', {'alloc': '714G', 'cap': '38%', 'free': '1.11T',
|
||||
'health': 'ONLINE', 'size': '1.81T'})])
|
||||
self.assertEqual(res, ret)
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.list_(parsable=False)
|
||||
res = OrderedDict([('mypool', OrderedDict([
|
||||
('size', '1.81T'),
|
||||
('alloc', '661G'),
|
||||
('free', '1.17T'),
|
||||
('cap', '35%'),
|
||||
('frag', '11%'),
|
||||
('health', 'ONLINE'),
|
||||
]))])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_list_parsable(self):
|
||||
'''
|
||||
Tests successful return of list function with parsable output
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = "mypool\t1992864825344\t767076794368\t1225788030976\t38\tONLINE"
|
||||
ret['stdout'] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch('salt.modules.zpool._check_features',
|
||||
MagicMock(return_value=False)):
|
||||
ret = zpool.list_()
|
||||
res = OrderedDict([('mypool', {'alloc': 767076794368, 'cap': 38,
|
||||
'free': 1225788030976, 'health': 'ONLINE',
|
||||
'size': 1992864825344})])
|
||||
self.assertEqual(res, ret)
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.list_(parsable=True)
|
||||
res = OrderedDict([('mypool', OrderedDict([
|
||||
('size', 1990116046274),
|
||||
('alloc', 709743345664),
|
||||
('free', 1286428604497),
|
||||
('cap', '35%'),
|
||||
('frag', '11%'),
|
||||
('health', 'ONLINE'),
|
||||
]))])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_get(self):
|
||||
'''
|
||||
|
@ -169,24 +227,26 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.get('mypool', 'size')
|
||||
res = OrderedDict([('mypool', OrderedDict([('size', '1.81T')]))])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.get('mypool', 'size', parsable=False)
|
||||
res = OrderedDict(OrderedDict([('size', '1.81T')]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_get_parsable(self):
|
||||
'''
|
||||
Tests successful return of get function with parsable output
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = "size\t1992864825344\t-\n"
|
||||
ret['stdout'] = "size\t1.81T\t-\n"
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.get('mypool', 'size')
|
||||
res = OrderedDict([('mypool', OrderedDict([('size', 1992864825344)]))])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.get('mypool', 'size', parsable=True)
|
||||
res = OrderedDict(OrderedDict([('size', 1990116046274)]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_get_whitespace(self):
|
||||
'''
|
||||
|
@ -197,10 +257,11 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.get('mypool', 'comment')
|
||||
res = OrderedDict([('mypool', OrderedDict([('comment', "'my testing pool'")]))])
|
||||
self.assertEqual(res, ret)
|
||||
res = OrderedDict(OrderedDict([('comment', "my testing pool")]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_scrub_start(self):
|
||||
'''
|
||||
|
@ -213,11 +274,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.scrub('mypool')
|
||||
res = OrderedDict([('mypool', OrderedDict([('scrubbing', True)]))])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.scrub('mypool')
|
||||
res = OrderedDict(OrderedDict([('scrubbing', True)]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_scrub_pause(self):
|
||||
'''
|
||||
|
@ -230,11 +292,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.scrub('mypool', pause=True)
|
||||
res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.scrub('mypool', pause=True)
|
||||
res = OrderedDict(OrderedDict([('scrubbing', False)]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_scrub_stop(self):
|
||||
'''
|
||||
|
@ -247,11 +310,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.scrub('mypool', stop=True)
|
||||
res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.scrub('mypool', stop=True)
|
||||
res = OrderedDict(OrderedDict([('scrubbing', False)]))
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_split_success(self):
|
||||
'''
|
||||
|
@ -262,14 +326,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = Mock()
|
||||
mock_exists.side_effect = [False, True]
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('backuppool', 'split off from datapool')])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('split', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_split_exist_new(self):
|
||||
'''
|
||||
|
@ -277,17 +339,15 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
ret['stderr'] = "Unable to split datapool: pool already exists"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = Mock()
|
||||
mock_exists.side_effect = [True, True]
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('backuppool', 'storage pool already exists')])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('split', False), ('error', 'Unable to split datapool: pool already exists')])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_split_missing_pool(self):
|
||||
'''
|
||||
|
@ -295,17 +355,15 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
ret['stderr'] = "cannot open 'datapool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = Mock()
|
||||
mock_exists.side_effect = [False, False]
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('datapool', 'storage pool does not exists')])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('split', False), ('error', "cannot open 'datapool': no such pool")])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_split_not_mirror(self):
|
||||
'''
|
||||
|
@ -316,15 +374,12 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = "Unable to split datapool: Source pool must be composed only of mirrors"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
mock_exists = Mock()
|
||||
mock_exists.side_effect = [False, True]
|
||||
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('backuppool', 'Unable to split datapool: '
|
||||
'Source pool must be composed only of mirrors')])
|
||||
self.assertEqual(res, ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.split('datapool', 'backuppool')
|
||||
res = OrderedDict([('split', False), ('error', 'Unable to split datapool: Source pool must be composed only of mirrors')])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_labelclear_success(self):
|
||||
'''
|
||||
|
@ -335,10 +390,31 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
|
||||
res = OrderedDict([('/dev/rdsk/c0t0d0', 'cleared')])
|
||||
self.assertEqual(res, ret)
|
||||
res = OrderedDict([('labelcleared', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_labelclear_nodevice(self):
|
||||
'''
|
||||
Tests labelclear on non existing device
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "failed to open /dev/rdsk/c0t0d0: No such file or directory"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
|
||||
res = OrderedDict([
|
||||
('labelcleared', False),
|
||||
('error', 'failed to open /dev/rdsk/c0t0d0: No such file or directory'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_labelclear_cleared(self):
|
||||
'''
|
||||
|
@ -349,10 +425,15 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
ret['stderr'] = "failed to read label from /dev/rdsk/c0t0d0"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
|
||||
res = OrderedDict([('/dev/rdsk/c0t0d0', 'failed to read label from /dev/rdsk/c0t0d0')])
|
||||
self.assertEqual(res, ret)
|
||||
res = OrderedDict([
|
||||
('labelcleared', False),
|
||||
('error', 'failed to read label from /dev/rdsk/c0t0d0'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_labelclear_exported(self):
|
||||
'''
|
||||
|
@ -366,7 +447,431 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
|||
])
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
|
||||
res = OrderedDict([('/dev/rdsk/c0t0d0', '/dev/rdsk/c0t0d0 is a member of exported pool "mypool"')])
|
||||
self.assertEqual(res, ret)
|
||||
res = OrderedDict([
|
||||
('labelcleared', False),
|
||||
('error', 'use \'force=True\' to override the following error:\n/dev/rdsk/c0t0d0 is a member of exported pool "mypool"'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
@skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable')
|
||||
def test_create_file_vdev_success(self):
|
||||
'''
|
||||
Tests create_file_vdev when out of space
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.create_file_vdev('64M', '/vdisks/disk0')
|
||||
res = OrderedDict([
|
||||
('/vdisks/disk0', 'created'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
@skipIf(not salt.utils.path.which('mkfile'), 'Cannot find mkfile executable')
|
||||
def test_create_file_vdev_nospace(self):
|
||||
'''
|
||||
Tests create_file_vdev when out of space
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "/vdisks/disk0: initialized 10424320 of 67108864 bytes: No space left on device"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.create_file_vdev('64M', '/vdisks/disk0')
|
||||
res = OrderedDict([
|
||||
('/vdisks/disk0', 'failed'),
|
||||
('error', OrderedDict([
|
||||
('/vdisks/disk0', ' initialized 10424320 of 67108864 bytes: No space left on device'),
|
||||
])),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_export_success(self):
|
||||
'''
|
||||
Tests export
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.export('mypool')
|
||||
res = OrderedDict([('exported', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_export_nopool(self):
|
||||
'''
|
||||
Tests export when the pool does not exists
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.export('mypool')
|
||||
res = OrderedDict([('exported', False), ('error', "cannot open 'mypool': no such pool")])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_import_success(self):
|
||||
'''
|
||||
Tests import
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.import_('mypool')
|
||||
res = OrderedDict([('imported', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_import_duplicate(self):
|
||||
'''
|
||||
Tests import with already imported pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "\n".join([
|
||||
"cannot import 'mypool': a pool with that name already exists",
|
||||
"use the form 'zpool import <pool | id> <newpool>' to give it a new name",
|
||||
])
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.import_('mypool')
|
||||
res = OrderedDict([
|
||||
('imported', False),
|
||||
('error', "cannot import 'mypool': a pool with that name already exists\nuse the form 'zpool import <pool | id> <newpool>' to give it a new name"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_import_nopool(self):
|
||||
'''
|
||||
Tests import
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot import 'mypool': no such pool available"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.import_('mypool')
|
||||
res = OrderedDict([
|
||||
('imported', False),
|
||||
('error', "cannot import 'mypool': no such pool available"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_online_success(self):
|
||||
'''
|
||||
Tests online
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.online('mypool', '/dev/rdsk/c0t0d0')
|
||||
res = OrderedDict([('onlined', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_online_nodevice(self):
|
||||
'''
|
||||
Tests online
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot online /dev/rdsk/c0t0d1: no such device in pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.online('mypool', '/dev/rdsk/c0t0d1')
|
||||
res = OrderedDict([
|
||||
('onlined', False),
|
||||
('error', 'cannot online /dev/rdsk/c0t0d1: no such device in pool'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_offline_success(self):
|
||||
'''
|
||||
Tests offline
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.offline('mypool', '/dev/rdsk/c0t0d0')
|
||||
res = OrderedDict([('offlined', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_offline_nodevice(self):
|
||||
'''
|
||||
Tests offline
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot offline /dev/rdsk/c0t0d1: no such device in pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.offline('mypool', '/dev/rdsk/c0t0d1')
|
||||
res = OrderedDict([
|
||||
('offlined', False),
|
||||
('error', 'cannot offline /dev/rdsk/c0t0d1: no such device in pool'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_offline_noreplica(self):
|
||||
'''
|
||||
Tests offline
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot offline /dev/rdsk/c0t0d1: no valid replicas"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.offline('mypool', '/dev/rdsk/c0t0d1')
|
||||
res = OrderedDict([
|
||||
('offlined', False),
|
||||
('error', 'cannot offline /dev/rdsk/c0t0d1: no valid replicas'),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_reguid_success(self):
|
||||
'''
|
||||
Tests reguid
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.reguid('mypool')
|
||||
res = OrderedDict([('reguided', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_reguid_nopool(self):
|
||||
'''
|
||||
Tests reguid with missing pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.reguid('mypool')
|
||||
res = OrderedDict([
|
||||
('reguided', False),
|
||||
('error', "cannot open 'mypool': no such pool"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_reopen_success(self):
|
||||
'''
|
||||
Tests reopen
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.reopen('mypool')
|
||||
res = OrderedDict([('reopened', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_reopen_nopool(self):
|
||||
'''
|
||||
Tests reopen with missing pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.reopen('mypool')
|
||||
res = OrderedDict([
|
||||
('reopened', False),
|
||||
('error', "cannot open 'mypool': no such pool"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_upgrade_success(self):
|
||||
'''
|
||||
Tests upgrade
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.upgrade('mypool')
|
||||
res = OrderedDict([('upgraded', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_upgrade_nopool(self):
|
||||
'''
|
||||
Tests upgrade with missing pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.upgrade('mypool')
|
||||
res = OrderedDict([
|
||||
('upgraded', False),
|
||||
('error', "cannot open 'mypool': no such pool"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_history_success(self):
|
||||
'''
|
||||
Tests history
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = "\n".join([
|
||||
"History for 'mypool':",
|
||||
"2018-01-18.16:56:12 zpool create -f mypool /dev/rdsk/c0t0d0",
|
||||
"2018-01-19.16:01:55 zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1",
|
||||
])
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.history('mypool')
|
||||
res = OrderedDict([
|
||||
('mypool', OrderedDict([
|
||||
('2018-01-18.16:56:12', 'zpool create -f mypool /dev/rdsk/c0t0d0'),
|
||||
('2018-01-19.16:01:55', 'zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1'),
|
||||
])),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_history_nopool(self):
|
||||
'''
|
||||
Tests history with missing pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.history('mypool')
|
||||
res = OrderedDict([
|
||||
('error', "cannot open 'mypool': no such pool"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_clear_success(self):
|
||||
'''
|
||||
Tests clear
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = ""
|
||||
ret['retcode'] = 0
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.clear('mypool')
|
||||
res = OrderedDict([('cleared', True)])
|
||||
self.assertEqual(ret, res)
|
||||
|
||||
def test_clear_nopool(self):
|
||||
'''
|
||||
Tests clear with missing pool
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot open 'mypool': no such pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.clear('mypool')
|
||||
res = OrderedDict([
|
||||
('cleared', False),
|
||||
('error', "cannot open 'mypool': no such pool"),
|
||||
])
|
||||
|
||||
def test_clear_nodevice(self):
|
||||
'''
|
||||
Tests clear with non existign device
|
||||
'''
|
||||
ret = {}
|
||||
ret['stdout'] = ""
|
||||
ret['stderr'] = "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"
|
||||
ret['retcode'] = 1
|
||||
mock_cmd = MagicMock(return_value=ret)
|
||||
|
||||
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
ret = zpool.clear('mypool', '/dev/rdsk/c0t0d0')
|
||||
res = OrderedDict([
|
||||
('cleared', False),
|
||||
('error', "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"),
|
||||
])
|
||||
self.assertEqual(ret, res)
|
||||
|
|
696
tests/unit/states/test_zfs.py
Normal file
696
tests/unit/states/test_zfs.py
Normal file
|
@ -0,0 +1,696 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Tests for salt.states.zfs
|
||||
|
||||
:codeauthor: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: salt.utils.zfs, salt.modules.zfs
|
||||
:platform: illumos,freebsd,linux
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
# Import test data from salt.utils.zfs test
|
||||
from tests.unit.utils.test_zfs import utils_patch
|
||||
|
||||
# Import Salt Execution module to test
|
||||
import salt.utils.zfs
|
||||
import salt.states.zfs as zfs
|
||||
|
||||
# Import Salt Utils
|
||||
import salt.loader
|
||||
from salt.utils.odict import OrderedDict
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class ZfsTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test cases for salt.states.zfs
|
||||
'''
|
||||
def setup_loader_modules(self):
|
||||
self.opts = opts = salt.config.DEFAULT_MINION_OPTS
|
||||
utils = salt.loader.utils(opts, whitelist=['zfs'])
|
||||
zfs_obj = {
|
||||
zfs: {
|
||||
'__opts__': opts,
|
||||
'__grains__': {'kernel': 'SunOS'},
|
||||
'__utils__': utils,
|
||||
}
|
||||
}
|
||||
|
||||
return zfs_obj
|
||||
|
||||
def test_filesystem_absent_nofs(self):
|
||||
'''
|
||||
Test if filesystem is absent (non existing filesystem)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'filesystem myzpool/filesystem is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem'))
|
||||
|
||||
def test_filesystem_absent_removed(self):
|
||||
'''
|
||||
Test if filesystem is absent
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'filesystem myzpool/filesystem was destroyed',
|
||||
'changes': {'myzpool/filesystem': 'destroyed'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem'))
|
||||
|
||||
def test_filesystem_absent_fail(self):
|
||||
'''
|
||||
Test if filesystem is absent (with snapshots)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': False,
|
||||
'comment': "\n".join([
|
||||
"cannot destroy 'myzpool/filesystem': filesystem has children",
|
||||
"use 'recursive=True' to destroy the following datasets:",
|
||||
"myzpool/filesystem@snap",
|
||||
]),
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('destroyed', False),
|
||||
('error', "\n".join([
|
||||
"cannot destroy 'myzpool/filesystem': filesystem has children",
|
||||
"use 'recursive=True' to destroy the following datasets:",
|
||||
"myzpool/filesystem@snap",
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_absent('myzpool/filesystem'))
|
||||
|
||||
def test_volume_absent_novol(self):
|
||||
'''
|
||||
Test if volume is absent (non existing volume)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': True,
|
||||
'comment': 'volume myzpool/volume is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_absent('myzpool/volume'))
|
||||
|
||||
def test_volume_absent_removed(self):
|
||||
'''
|
||||
Test if volume is absent
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': True,
|
||||
'comment': 'volume myzpool/volume was destroyed',
|
||||
'changes': {'myzpool/volume': 'destroyed'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_absent('myzpool/volume'))
|
||||
|
||||
def test_volume_absent_fail(self):
|
||||
'''
|
||||
Test if volume is absent (with snapshots)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': False,
|
||||
'comment': "\n".join([
|
||||
"cannot destroy 'myzpool/volume': volume has children",
|
||||
"use 'recursive=True' to destroy the following datasets:",
|
||||
"myzpool/volume@snap",
|
||||
]),
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('destroyed', False),
|
||||
('error', "\n".join([
|
||||
"cannot destroy 'myzpool/volume': volume has children",
|
||||
"use 'recursive=True' to destroy the following datasets:",
|
||||
"myzpool/volume@snap",
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_absent('myzpool/volume'))
|
||||
|
||||
def test_snapshot_absent_nosnap(self):
|
||||
'''
|
||||
Test if snapshot is absent (non existing snapshot)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': True,
|
||||
'comment': 'snapshot myzpool/filesystem@snap is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap'))
|
||||
|
||||
def test_snapshot_absent_removed(self):
|
||||
'''
|
||||
Test if snapshot is absent
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': True,
|
||||
'comment': 'snapshot myzpool/filesystem@snap was destroyed',
|
||||
'changes': {'myzpool/filesystem@snap': 'destroyed'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap'))
|
||||
|
||||
def test_snapshot_absent_fail(self):
|
||||
'''
|
||||
Test if snapshot is absent (with snapshots)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': False,
|
||||
'comment': 'cannot destroy snapshot myzpool/filesystem@snap: dataset is busy',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('destroyed', False),
|
||||
('error', 'cannot destroy snapshot myzpool/filesystem@snap: dataset is busy'),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_absent('myzpool/filesystem@snap'))
|
||||
|
||||
def test_bookmark_absent_nobook(self):
|
||||
'''
|
||||
Test if bookmark is absent (non existing bookmark)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem#book',
|
||||
'result': True,
|
||||
'comment': 'bookmark myzpool/filesystem#book is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.bookmark_absent('myzpool/filesystem#book'))
|
||||
|
||||
def test_bookmark_absent_removed(self):
|
||||
'''
|
||||
Test if bookmark is absent
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem#book',
|
||||
'result': True,
|
||||
'comment': 'bookmark myzpool/filesystem#book was destroyed',
|
||||
'changes': {'myzpool/filesystem#book': 'destroyed'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([('destroyed', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.destroy': mock_destroy}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.bookmark_absent('myzpool/filesystem#book'))
|
||||
|
||||
def test_hold_absent_nohold(self):
|
||||
'''
|
||||
Test if hold is absent (non existing hold)
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': True,
|
||||
'comment': 'hold myhold is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_hold_absent_removed(self):
|
||||
'''
|
||||
Test if hold is absent
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': True,
|
||||
'comment': 'hold myhold released',
|
||||
'changes': OrderedDict([
|
||||
('myzpool/filesystem@snap', OrderedDict([
|
||||
('myhold', 'released'),
|
||||
])),
|
||||
])}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([('myhold', 'Thu Feb 15 16:24 2018')]))
|
||||
mock_release = MagicMock(return_value=OrderedDict([('released', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.release': mock_release}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_hold_absent_fail(self):
|
||||
'''
|
||||
Test if hold is absent (non existing snapshot)
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': False,
|
||||
'comment': "cannot open 'myzpool/filesystem@snap': dataset does not exist",
|
||||
'changes': {}}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([
|
||||
('error', "cannot open 'myzpool/filesystem@snap': dataset does not exist"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_absent('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_hold_present(self):
|
||||
'''
|
||||
Test if hold is present (hold already present)
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': True,
|
||||
'comment': 'hold myhold is present for myzpool/filesystem@snap',
|
||||
'changes': {}}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([('myhold', 'Thu Feb 15 16:24 2018')]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_hold_present_new(self):
|
||||
'''
|
||||
Test if hold is present (new)
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': True,
|
||||
'comment': 'hold myhold added to myzpool/filesystem@snap',
|
||||
'changes': {'myzpool/filesystem@snap': {'myhold': 'held'}}}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([]))
|
||||
mock_hold = MagicMock(return_value=OrderedDict([('held', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.hold': mock_hold}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_hold_present_fail(self):
|
||||
'''
|
||||
Test if hold is present (using non existing snapshot)
|
||||
'''
|
||||
ret = {'name': 'myhold',
|
||||
'result': False,
|
||||
'comment': "cannot hold snapshot 'zsalt/filesystem@snap': dataset does not exist",
|
||||
'changes': {}}
|
||||
|
||||
mock_holds = MagicMock(return_value=OrderedDict([]))
|
||||
mock_hold = MagicMock(return_value=OrderedDict([
|
||||
('held', False),
|
||||
('error', "cannot hold snapshot 'zsalt/filesystem@snap': dataset does not exist"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.holds': mock_holds}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.hold': mock_hold}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.hold_present('myhold', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_filesystem_present(self):
|
||||
'''
|
||||
Test if filesystem is present (existing filesystem)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'filesystem myzpool/filesystem is uptodate',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/filesystem', OrderedDict([
|
||||
('type', OrderedDict([
|
||||
('value', 'filesystem'),
|
||||
])),
|
||||
('compression', OrderedDict([
|
||||
('value', False),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem'))
|
||||
|
||||
def test_filesystem_present_new(self):
|
||||
'''
|
||||
Test if filesystem is present (non existing filesystem)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'filesystem myzpool/filesystem was created',
|
||||
'changes': {'myzpool/filesystem': u'created'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([('created', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem'))
|
||||
|
||||
def test_filesystem_present_update(self):
|
||||
'''
|
||||
Test if filesystem is present (non existing filesystem)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'filesystem myzpool/filesystem was updated',
|
||||
'changes': {'myzpool/filesystem': {'compression': 'lz4'}}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_set = MagicMock(return_value=OrderedDict([('set', True)]))
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/filesystem', OrderedDict([
|
||||
('type', OrderedDict([
|
||||
('value', 'filesystem'),
|
||||
])),
|
||||
('compression', OrderedDict([
|
||||
('value', False),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.set': mock_set}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_present(
|
||||
name='myzpool/filesystem',
|
||||
properties={'compression': 'lz4'},
|
||||
))
|
||||
|
||||
def test_filesystem_present_fail(self):
|
||||
'''
|
||||
Test if filesystem is present (non existing pool)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': False,
|
||||
'comment': "cannot create 'myzpool/filesystem': no such pool 'myzpool'",
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool/filesystem': no such pool 'myzpool'"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.filesystem_present('myzpool/filesystem'))
|
||||
|
||||
def test_volume_present(self):
|
||||
'''
|
||||
Test if volume is present (existing volume)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': True,
|
||||
'comment': 'volume myzpool/volume is uptodate',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/volume', OrderedDict([
|
||||
('type', OrderedDict([
|
||||
('value', 'volume'),
|
||||
])),
|
||||
('compression', OrderedDict([
|
||||
('value', False),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G'))
|
||||
|
||||
def test_volume_present_new(self):
|
||||
'''
|
||||
Test if volume is present (non existing volume)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': True,
|
||||
'comment': 'volume myzpool/volume was created',
|
||||
'changes': {'myzpool/volume': u'created'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([('created', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G'))
|
||||
|
||||
def test_volume_present_update(self):
|
||||
'''
|
||||
Test if volume is present (non existing volume)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': True,
|
||||
'comment': 'volume myzpool/volume was updated',
|
||||
'changes': {'myzpool/volume': {'compression': 'lz4'}}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_set = MagicMock(return_value=OrderedDict([('set', True)]))
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/volume', OrderedDict([
|
||||
('type', OrderedDict([
|
||||
('value', 'volume'),
|
||||
])),
|
||||
('compression', OrderedDict([
|
||||
('value', False),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.set': mock_set}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_present(
|
||||
name='myzpool/volume',
|
||||
volume_size='1G',
|
||||
properties={'compression': 'lz4'},
|
||||
))
|
||||
|
||||
def test_volume_present_fail(self):
|
||||
'''
|
||||
Test if volume is present (non existing pool)
|
||||
'''
|
||||
ret = {'name': 'myzpool/volume',
|
||||
'result': False,
|
||||
'comment': "cannot create 'myzpool/volume': no such pool 'myzpool'",
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([
|
||||
('created', False),
|
||||
('error', "cannot create 'myzpool/volume': no such pool 'myzpool'"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.create': mock_create}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.volume_present('myzpool/volume', volume_size='1G'))
|
||||
|
||||
def test_bookmark_present(self):
|
||||
'''
|
||||
Test if bookmark is present (bookmark already present)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem#mybookmark',
|
||||
'result': True,
|
||||
'comment': 'bookmark is present',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_bookmark_present_new(self):
|
||||
'''
|
||||
Test if bookmark is present (new)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem#mybookmark',
|
||||
'result': True,
|
||||
'comment': 'myzpool/filesystem@snap bookmarked as myzpool/filesystem#mybookmark',
|
||||
'changes': {'myzpool/filesystem#mybookmark': 'myzpool/filesystem@snap'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_bookmark = MagicMock(return_value=OrderedDict([('bookmarked', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.bookmark': mock_bookmark}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_bookmark_present_fail(self):
|
||||
'''
|
||||
Test if bookmark is present (using non existing snapshot)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem#mybookmark',
|
||||
'result': False,
|
||||
'comment': "cannot bookmark snapshot 'zsalt/filesystem@snap': dataset does not exist",
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_bookmark = MagicMock(return_value=OrderedDict([
|
||||
('bookmarked', False),
|
||||
('error', "cannot bookmark snapshot 'zsalt/filesystem@snap': dataset does not exist"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.bookmark': mock_bookmark}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.bookmark_present('mybookmark', 'myzpool/filesystem@snap'))
|
||||
|
||||
def test_snapshot_present(self):
|
||||
'''
|
||||
Test if snapshot is present (snapshot already present)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': True,
|
||||
'comment': 'snapshot is present',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap'))
|
||||
|
||||
def test_snapshot_present_new(self):
|
||||
'''
|
||||
Test if snapshot is present (new)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': True,
|
||||
'comment': 'snapshot myzpool/filesystem@snap was created',
|
||||
'changes': {u'myzpool/filesystem@snap': u'snapshotted'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_snapshot = MagicMock(return_value=OrderedDict([('snapshotted', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.snapshot': mock_snapshot}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap'))
|
||||
|
||||
def test_snapshot_present_fail(self):
|
||||
'''
|
||||
Test if snapshot is present (using non existing snapshot)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem@snap',
|
||||
'result': False,
|
||||
'comment': "cannot open 'myzpool/filesystem': dataset does not exist",
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_snapshot = MagicMock(return_value=OrderedDict([
|
||||
('snapshotted', False),
|
||||
('error', "cannot open 'myzpool/filesystem': dataset does not exist"),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.snapshot': mock_snapshot}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.snapshot_present('myzpool/filesystem@snap'))
|
||||
|
||||
def test_propmoted(self):
|
||||
'''
|
||||
Test promotion of clone (already promoted)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'myzpool/filesystem already promoted',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/filesystem', OrderedDict([
|
||||
('origin', OrderedDict([
|
||||
('value', '-'),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.promoted('myzpool/filesystem'))
|
||||
|
||||
def test_propmoted_clone(self):
|
||||
'''
|
||||
Test promotion of clone
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': True,
|
||||
'comment': 'myzpool/filesystem promoted',
|
||||
'changes': {'myzpool/filesystem': 'promoted'}}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('myzpool/filesystem', OrderedDict([
|
||||
('origin', OrderedDict([
|
||||
('value', 'myzool/filesystem_source@clean'),
|
||||
])),
|
||||
])),
|
||||
]))
|
||||
mock_promote = MagicMock(return_value=OrderedDict([('promoted', True)]))
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.get': mock_get}), \
|
||||
patch.dict(zfs.__salt__, {'zfs.promote': mock_promote}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.promoted('myzpool/filesystem'))
|
||||
|
||||
def test_propmoted_fail(self):
|
||||
'''
|
||||
Test promotion of clone (unknown dataset)
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': False,
|
||||
'comment': 'dataset myzpool/filesystem does not exist',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.promoted('myzpool/filesystem'))
|
||||
|
||||
def test_scheduled_snapshot_fail(self):
|
||||
'''
|
||||
Test scheduled_snapshot of unknown dataset
|
||||
'''
|
||||
ret = {'name': 'myzpool/filesystem',
|
||||
'result': False,
|
||||
'comment': 'dataset myzpool/filesystem does not exist',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zfs.__salt__, {'zfs.exists': mock_exists}), \
|
||||
patch.dict(zfs.__utils__, utils_patch):
|
||||
self.assertEqual(ret, zfs.scheduled_snapshot('myzpool/filesystem', 'shadow', schedule={'hour': 6}))
|
450
tests/unit/states/test_zpool.py
Normal file
450
tests/unit/states/test_zpool.py
Normal file
|
@ -0,0 +1,450 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Tests for salt.states.zpool
|
||||
|
||||
:codeauthor: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
|
||||
:maturity: new
|
||||
:depends: salt.utils.zfs, salt.modules.zpool
|
||||
:platform: illumos,freebsd,linux
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, unicode_literals, print_function
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.mixins import LoaderModuleMockMixin
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
MagicMock,
|
||||
patch)
|
||||
|
||||
# Import test data from salt.utils.zfs test
|
||||
from tests.unit.utils.test_zfs import utils_patch
|
||||
|
||||
# Import Salt Execution module to test
|
||||
import salt.utils.zfs
|
||||
import salt.states.zpool as zpool
|
||||
|
||||
# Import Salt Utils
|
||||
import salt.loader
|
||||
from salt.utils.odict import OrderedDict
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
|
||||
'''
|
||||
Test cases for salt.states.zpool
|
||||
'''
|
||||
def setup_loader_modules(self):
|
||||
self.opts = opts = salt.config.DEFAULT_MINION_OPTS
|
||||
utils = salt.loader.utils(opts, whitelist=['zfs'])
|
||||
zpool_obj = {
|
||||
zpool: {
|
||||
'__opts__': opts,
|
||||
'__grains__': {'kernel': 'SunOS'},
|
||||
'__utils__': utils,
|
||||
}
|
||||
}
|
||||
|
||||
return zpool_obj
|
||||
|
||||
def test_absent_without_pool(self):
|
||||
'''
|
||||
Test zpool absent without a pool
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'storage pool myzpool is absent',
|
||||
'changes': {}}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.absent('myzpool'), ret)
|
||||
|
||||
def test_absent_destroy_pool(self):
|
||||
'''
|
||||
Test zpool absent destroying pool
|
||||
'''
|
||||
ret = {
|
||||
'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'storage pool myzpool was destroyed',
|
||||
'changes': {'myzpool': 'destroyed'},
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('destroyed', True),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.destroy': mock_destroy}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.absent('myzpool'), ret)
|
||||
|
||||
def test_absent_exporty_pool(self):
|
||||
'''
|
||||
Test zpool absent exporting pool
|
||||
'''
|
||||
ret = {
|
||||
'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'storage pool myzpool was exported',
|
||||
'changes': {'myzpool': 'exported'},
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('exported', True),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.export': mock_destroy}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.absent('myzpool', export=True), ret)
|
||||
|
||||
def test_absent_busy(self):
|
||||
'''
|
||||
Test zpool absent on a busy pool
|
||||
'''
|
||||
ret = {
|
||||
'name': 'myzpool',
|
||||
'result': False,
|
||||
'comment': "\n".join([
|
||||
"cannot unmount '/myzpool': Device busy",
|
||||
"cannot export 'myzpool': pool is busy",
|
||||
]),
|
||||
'changes': {},
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_destroy = MagicMock(return_value=OrderedDict([
|
||||
('exported', False),
|
||||
('error', "\n".join([
|
||||
"cannot unmount '/myzpool': Device busy",
|
||||
"cannot export 'myzpool': pool is busy",
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.export': mock_destroy}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.absent('myzpool', export=True), ret)
|
||||
|
||||
def test_present_import_success(self):
|
||||
'''
|
||||
Test zpool present with import allowed and unimported pool
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'storage pool myzpool was imported',
|
||||
'changes': {'myzpool': 'imported'}}
|
||||
|
||||
config = {
|
||||
'import': True,
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_import = MagicMock(return_value=OrderedDict([
|
||||
('imported', True),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.import': mock_import}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.present('myzpool', config=config), ret)
|
||||
|
||||
def test_present_import_fail(self):
|
||||
'''
|
||||
Test zpool present with import allowed and no unimported pool or layout
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': False,
|
||||
'comment': 'storage pool myzpool was not imported, no (valid) layout specified for creation',
|
||||
'changes': {}}
|
||||
|
||||
config = {
|
||||
'import': True,
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_import = MagicMock(return_value=OrderedDict([
|
||||
('imported', False),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.import': mock_import}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.present('myzpool', config=config), ret)
|
||||
|
||||
def test_present_create_success(self):
|
||||
'''
|
||||
Test zpool present with non existing pool
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'storage pool myzpool was created',
|
||||
'changes': {'myzpool': 'created'}}
|
||||
|
||||
config = {
|
||||
'import': False,
|
||||
}
|
||||
layout = [
|
||||
OrderedDict([('mirror', ['disk0', 'disk1'])]),
|
||||
OrderedDict([('mirror', ['disk2', 'disk3'])]),
|
||||
]
|
||||
properties = {
|
||||
'autoexpand': True,
|
||||
}
|
||||
filesystem_properties = {
|
||||
'quota': '5G',
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([
|
||||
('created', True),
|
||||
('vdevs', OrderedDict([
|
||||
('mirror-0', ['/dev/dsk/disk0', '/dev/dsk/disk1']),
|
||||
('mirror-1', ['/dev/dsk/disk2', '/dev/dsk/disk3']),
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.create': mock_create}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(
|
||||
zpool.present(
|
||||
'myzpool',
|
||||
config=config,
|
||||
layout=layout,
|
||||
properties=properties,
|
||||
filesystem_properties=filesystem_properties,
|
||||
),
|
||||
ret,
|
||||
)
|
||||
|
||||
def test_present_create_fail(self):
|
||||
'''
|
||||
Test zpool present with non existing pool (without a layout)
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': False,
|
||||
'comment': 'storage pool myzpool was not imported, no (valid) layout specified for creation',
|
||||
'changes': {}}
|
||||
|
||||
config = {
|
||||
'import': False,
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(zpool.present('myzpool', config=config), ret)
|
||||
|
||||
def test_present_create_passthrough_fail(self):
|
||||
'''
|
||||
Test zpool present with non existing pool (without a layout)
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': False,
|
||||
'comment': "\n".join([
|
||||
"invalid vdev specification",
|
||||
"use 'force=True' to override the following errors:",
|
||||
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
|
||||
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
|
||||
]),
|
||||
'changes': {}}
|
||||
|
||||
config = {
|
||||
'force': False,
|
||||
'import': False,
|
||||
}
|
||||
layout = [
|
||||
OrderedDict([('mirror', ['disk0', 'disk1'])]),
|
||||
OrderedDict([('mirror', ['disk2', 'disk3'])]),
|
||||
]
|
||||
properties = {
|
||||
'autoexpand': True,
|
||||
}
|
||||
filesystem_properties = {
|
||||
'quota': '5G',
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=False)
|
||||
mock_create = MagicMock(return_value=OrderedDict([
|
||||
('created', False),
|
||||
('error', "\n".join([
|
||||
"invalid vdev specification",
|
||||
"use 'force=True' to override the following errors:",
|
||||
"/data/salt/vdisk0 is part of exported pool 'zsalt'",
|
||||
"/data/salt/vdisk1 is part of exported pool 'zsalt'",
|
||||
])),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.create': mock_create}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(
|
||||
zpool.present(
|
||||
'myzpool',
|
||||
config=config,
|
||||
layout=layout,
|
||||
properties=properties,
|
||||
filesystem_properties=filesystem_properties,
|
||||
),
|
||||
ret,
|
||||
)
|
||||
|
||||
def test_present_update_success(self):
|
||||
'''
|
||||
Test zpool present with an existing pool that needs an update
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'properties updated',
|
||||
'changes': {'myzpool': {'autoexpand': False}}}
|
||||
|
||||
config = {
|
||||
'import': False,
|
||||
}
|
||||
layout = [
|
||||
OrderedDict([('mirror', ['disk0', 'disk1'])]),
|
||||
OrderedDict([('mirror', ['disk2', 'disk3'])]),
|
||||
]
|
||||
properties = {
|
||||
'autoexpand': False,
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('comment', 'salt managed pool'),
|
||||
('freeing', 0),
|
||||
('listsnapshots', False),
|
||||
('leaked', 0),
|
||||
('feature@obsolete_counts', 'enabled'),
|
||||
('feature@sha512', 'enabled'),
|
||||
('delegation', True),
|
||||
('dedupditto', '0'),
|
||||
('dedupratio', '1.00x'),
|
||||
('autoexpand', True),
|
||||
('feature@bookmarks', 'enabled'),
|
||||
('allocated', 115712),
|
||||
('guid', 1591906802560842214),
|
||||
('feature@large_blocks', 'enabled'),
|
||||
('size', 2113929216),
|
||||
('feature@enabled_txg', 'active'),
|
||||
('feature@hole_birth', 'active'),
|
||||
('capacity', 0),
|
||||
('feature@multi_vdev_crash_dump', 'enabled'),
|
||||
('feature@extensible_dataset', 'enabled'),
|
||||
('cachefile', '-'),
|
||||
('bootfs', '-'),
|
||||
('autoreplace', True),
|
||||
('readonly', False),
|
||||
('version', '-'),
|
||||
('health', 'ONLINE'),
|
||||
('expandsize', '-'),
|
||||
('feature@embedded_data', 'active'),
|
||||
('feature@lz4_compress', 'active'),
|
||||
('feature@async_destroy', 'enabled'),
|
||||
('feature@skein', 'enabled'),
|
||||
('feature@empty_bpobj', 'enabled'),
|
||||
('feature@spacemap_histogram', 'active'),
|
||||
('bootsize', '-'),
|
||||
('free', 2113813504),
|
||||
('feature@device_removal', 'enabled'),
|
||||
('failmode', 'wait'),
|
||||
('feature@filesystem_limits', 'enabled'),
|
||||
('feature@edonr', 'enabled'),
|
||||
('altroot', '-'),
|
||||
('fragmentation', '0%'),
|
||||
]))
|
||||
mock_set = MagicMock(return_value=OrderedDict([
|
||||
('set', True),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.get': mock_get}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.set': mock_set}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(
|
||||
zpool.present(
|
||||
'myzpool',
|
||||
config=config,
|
||||
layout=layout,
|
||||
properties=properties,
|
||||
),
|
||||
ret,
|
||||
)
|
||||
|
||||
def test_present_update_nochange_success(self):
|
||||
'''
|
||||
Test zpool present with non existing pool
|
||||
'''
|
||||
ret = {'name': 'myzpool',
|
||||
'result': True,
|
||||
'comment': 'no update needed',
|
||||
'changes': {}}
|
||||
|
||||
config = {
|
||||
'import': False,
|
||||
}
|
||||
layout = [
|
||||
OrderedDict([('mirror', ['disk0', 'disk1'])]),
|
||||
OrderedDict([('mirror', ['disk2', 'disk3'])]),
|
||||
]
|
||||
properties = {
|
||||
'autoexpand': True,
|
||||
}
|
||||
|
||||
mock_exists = MagicMock(return_value=True)
|
||||
mock_get = MagicMock(return_value=OrderedDict([
|
||||
('comment', 'salt managed pool'),
|
||||
('freeing', 0),
|
||||
('listsnapshots', False),
|
||||
('leaked', 0),
|
||||
('feature@obsolete_counts', 'enabled'),
|
||||
('feature@sha512', 'enabled'),
|
||||
('delegation', True),
|
||||
('dedupditto', '0'),
|
||||
('dedupratio', '1.00x'),
|
||||
('autoexpand', True),
|
||||
('feature@bookmarks', 'enabled'),
|
||||
('allocated', 115712),
|
||||
('guid', 1591906802560842214),
|
||||
('feature@large_blocks', 'enabled'),
|
||||
('size', 2113929216),
|
||||
('feature@enabled_txg', 'active'),
|
||||
('feature@hole_birth', 'active'),
|
||||
('capacity', 0),
|
||||
('feature@multi_vdev_crash_dump', 'enabled'),
|
||||
('feature@extensible_dataset', 'enabled'),
|
||||
('cachefile', '-'),
|
||||
('bootfs', '-'),
|
||||
('autoreplace', True),
|
||||
('readonly', False),
|
||||
('version', '-'),
|
||||
('health', 'ONLINE'),
|
||||
('expandsize', '-'),
|
||||
('feature@embedded_data', 'active'),
|
||||
('feature@lz4_compress', 'active'),
|
||||
('feature@async_destroy', 'enabled'),
|
||||
('feature@skein', 'enabled'),
|
||||
('feature@empty_bpobj', 'enabled'),
|
||||
('feature@spacemap_histogram', 'active'),
|
||||
('bootsize', '-'),
|
||||
('free', 2113813504),
|
||||
('feature@device_removal', 'enabled'),
|
||||
('failmode', 'wait'),
|
||||
('feature@filesystem_limits', 'enabled'),
|
||||
('feature@edonr', 'enabled'),
|
||||
('altroot', '-'),
|
||||
('fragmentation', '0%'),
|
||||
]))
|
||||
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}), \
|
||||
patch.dict(zpool.__salt__, {'zpool.get': mock_get}), \
|
||||
patch.dict(zpool.__utils__, utils_patch):
|
||||
self.assertEqual(
|
||||
zpool.present(
|
||||
'myzpool',
|
||||
config=config,
|
||||
layout=layout,
|
||||
properties=properties,
|
||||
),
|
||||
ret,
|
||||
)
|
1712
tests/unit/utils/test_zfs.py
Normal file
1712
tests/unit/utils/test_zfs.py
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Reference in a new issue