Running the plugin_test.py yielded a number of
issues which are addressed with this patch.
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/ontap/na.py | 45 ++++++++++++++++++-------------
plugin/ontap/ontap.py | 16 ++++++-----
test/plugin_test.py | 74 +++++++++++++++++++++++++++++++++------------------
3 files changed, 84 insertions(+), 51 deletions(-)
diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 668ce30..0986a9c 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -379,20 +379,20 @@ class Filer(object):
try:
self._invoke('volume-offline', {'name': vol_name})
online = True
- except FilerError as fe:
- if fe.errno != Filer.EFSDOESNOTEXIST:
- raise fe
+ except FilerError as f_error:
+ if f_error.errno != Filer.EFSDOESNOTEXIST:
+ raise
try:
self._invoke('volume-destroy', {'name': vol_name})
- except FilerError as fe:
+ except FilerError as f_error:
#If the volume was online, we will return it to same status
if online:
try:
self._invoke('volume-online', {'name': vol_name})
except FilerError:
pass
- raise fe
+ raise f_error
def volume_names(self):
"""
@@ -435,20 +435,29 @@ class Filer(object):
while True:
progress = self._invoke('clone-list-status',
- {'clone-id': c_id})['status']['ops-info']
-
- if progress['clone-state'] == 'failed':
- self._invoke('clone-clear', {'clone-id': c_id})
- raise FilerError(progress['error'], progress['reason'])
- elif progress['clone-state'] == 'running' \
- or progress['clone-state'] == 'fail exit':
- # State needs to transition to failed before we can clear it!
- time.sleep(0.2) # Don't hog cpu
- elif progress['clone-state'] == 'completed':
- return progress['destination-file']
+ {'clone-id': c_id})
+
+ # According to the spec the output is optional, if not present
+ # then we are done and good
+ if 'status' in progress:
+ progress = progress['status']['ops-info']
+
+ if progress['clone-state'] == 'failed':
+ self._invoke('clone-clear', {'clone-id': c_id})
+ raise FilerError(progress['error'], progress['reason'])
+ elif progress['clone-state'] == 'running' \
+ or progress['clone-state'] == 'fail exit':
+ # State needs to transition to failed before we can
+ # clear it!
+ time.sleep(0.2) # Don't hog cpu
+ elif progress['clone-state'] == 'completed':
+ return
+ else:
+ raise FilerError(ErrorNumber.NOT_IMPLEMENTED,
+ 'Unexpected state=' +
+ progress['clone-state'])
else:
- raise FilerError(ErrorNumber.NOT_IMPLEMENTED,
- 'Unexpected state=' + progress['clone-state'])
+ return
def lun_online(self, lun_path):
self._invoke('lun-online', {'path': lun_path})
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index ca3aed2..cde128f 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -65,6 +65,8 @@ def handle_ontap_errors(method):
def na_wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
+ except LsmError:
+ raise
except na.FilerError as oe:
error, error_msg = error_map(oe)
raise LsmError(error, error_msg)
@@ -178,10 +180,10 @@ class Ontap(IStorageAreaNetwork, INfs):
return self.f.validate()
def time_out_set(self, ms, flags=0):
- self.f.timeout = ms / Ontap.TMO_CONV
+ self.f.timeout = int(ms / Ontap.TMO_CONV)
def time_out_get(self, flags=0):
- return self.f.timeout * Ontap.TMO_CONV
+ return int(self.f.timeout * Ontap.TMO_CONV)
def plugin_unregister(self, flags=0):
pass
@@ -375,7 +377,6 @@ class Ontap(IStorageAreaNetwork, INfs):
opt_data.set('thinp_type', Pool.THINP_TYPE_THICK)
else:
opt_data.set('thinp_type', Pool.THINP_TYPE_UNKNOWN)
- opt_data.set('status_info', self._status_info_of_na_aggr(na_aggr))
element_type = (
Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
@@ -385,6 +386,7 @@ class Ontap(IStorageAreaNetwork, INfs):
opt_data.set('element_type', element_type)
return Pool(pool_id, pool_name, total_space, free_space, status,
+ self._status_info_of_na_aggr(na_aggr),
system_id, opt_data)
@staticmethod
@@ -433,10 +435,10 @@ class Ontap(IStorageAreaNetwork, INfs):
opt_data.set('thinp_type', Pool.THINP_TYPE_UNKNOWN)
opt_data.set('status_info', self._status_info_of_na_vol(na_vol))
- element_type = Pool.ELEMENT_TYPE_VOLUME
- opt_data.set('element_type', element_type)
+ opt_data.set('element_type', Pool.ELEMENT_TYPE_VOLUME)
return Pool(pool_id, pool_name, total_space, free_space, status,
+ self._status_info_of_na_vol(na_vol),
system_id, opt_data)
@handle_ontap_errors
@@ -635,7 +637,7 @@ class Ontap(IStorageAreaNetwork, INfs):
luns = self.f.luns_get_specific(aggr=volume.pool_id,
na_volume_name=vol)
- if len(luns) == 1:
+ if len(luns) == 1 and Ontap.LSM_VOL_PREFIX in vol:
self.f.volume_delete(vol)
else:
self.f.lun_delete(volume.name)
@@ -705,7 +707,7 @@ class Ontap(IStorageAreaNetwork, INfs):
#Put volume back to previous size
self._na_resize_recovery(
Ontap._vol_to_na_volume_name(volume_src), -size)
- raise e
+ raise
return None, self._get_volume(dest, volume_src.pool_id)
else:
#TODO Need to get instructions on how to provide this
diff --git a/test/plugin_test.py b/test/plugin_test.py
index 139b1d4..67bc34d 100755
--- a/test/plugin_test.py
+++ b/test/plugin_test.py
@@ -176,10 +176,11 @@ class TestProxy(object):
if isinstance(e, lsm.LsmError) and \
e.code != lsm.ErrorNumber.NO_SUPPORT:
- TestProxy.log_result(_proxy_method_name,
- dict(rc=False,
- stack_trace=traceback.format_exc(),
- msg=str(e)))
+ TestProxy.log_result(
+ _proxy_method_name,
+ dict(rc=False,
+ stack_trace=traceback.format_exc(),
+ msg=str(e)))
raise e
# If the job can do async, we will block looping on it.
@@ -240,11 +241,23 @@ class TestPlugin(unittest.TestCase):
self.c = TestProxy(lsm.Client(TestPlugin.URI, TestPlugin.PASSWORD))
self.systems = self.c.systems()
- self.pools = self.c.pools()
+ self.pools = self.c.pools(flags=lsm.Pool.FLAG_RETRIEVE_FULL_INFO)
+
+ self.pool_by_sys_id = {}
+
+ for s in self.systems:
+ self.pool_by_sys_id[s.id] = [p for p in self.pools if
+ p.system_id == s.id]
- self.pool_by_sys_id = dict((p.system_id, p) for p in self.pools)
# TODO Store what exists, so that we don't remove it
+ def _get_pool_by_usage(self, system_id, element_type):
+ for p in self.pool_by_sys_id[system_id]:
+ if 'element_type' in p.optional_data.list():
+ if int(p.optional_data.get('element_type')) == element_type:
+ return p
+ return None
+
def tearDown(self):
# TODO Walk the array looking for stuff we have created and remove it
# What should we do if an array supports a create operation, but not
@@ -270,10 +283,9 @@ class TestPlugin(unittest.TestCase):
pools_list = self.c.pools()
self.assertTrue(len(pools_list) > 0, "We need at least 1 pool to test")
-
@staticmethod
def _vpd_correct(vpd):
- p = re.compile('^[A-F0-9]+$')
+ p = re.compile('^[a-fA-F0-9]+$')
if vpd is not None and len(vpd) > 0 and p.match(vpd) is not None:
return True
@@ -298,25 +310,31 @@ class TestPlugin(unittest.TestCase):
def _volume_create(self, system_id):
if system_id in self.pool_by_sys_id:
- p = self.pool_by_sys_id[system_id]
+ p = self._get_pool_by_usage(system_id,
+ lsm.Pool.ELEMENT_TYPE_VOLUME)
- vol_size = min(p.free_space / 10, mb_in_bytes(512))
+ if p:
+ vol_size = min(p.free_space / 10, mb_in_bytes(512))
- vol = self.c.volume_create(p, rs('volume'), vol_size,
- lsm.Volume.PROVISION_DEFAULT)[1]
+ vol = self.c.volume_create(p, rs('volume'), vol_size,
+ lsm.Volume.PROVISION_DEFAULT)[1]
- self.assertTrue(self._volume_exists(vol.id))
- return vol, p
+ self.assertTrue(self._volume_exists(vol.id))
+ return vol, p
def _fs_create(self, system_id):
if system_id in self.pool_by_sys_id:
- p = self.pool_by_sys_id[system_id]
-
- fs_size = min(p.free_space / 10, mb_in_bytes(512))
- fs = self.c.fs_create(p, rs('fs'), fs_size)[1]
-
- self.assertTrue(self._fs_exists(fs.id))
- return fs, p
+ pools = self.pool_by_sys_id[system_id]
+
+ for p in pools:
+ if p.free_space > mb_in_bytes(250) and \
+ int(p.optional_data.get('element_type')) & \
+ lsm.Pool.ELEMENT_TYPE_FS:
+ fs_size = min(p.free_space / 10, mb_in_bytes(512))
+ fs = self.c.fs_create(p, rs('fs'), fs_size)[1]
+ self.assertTrue(self._fs_exists(fs.id))
+ return fs, p
+ return None, None
def _volume_delete(self, volume):
self.c.volume_delete(volume)
@@ -379,8 +397,8 @@ class TestPlugin(unittest.TestCase):
lsm.Capabilities.VOLUME_DELETE,
lsm.Capabilities.VOLUME_RESIZE]):
vol = self._volume_create(s.id)[0]
- vol_resize = self.c.volume_resize(vol,
- vol.size_bytes * 1.10)[1]
+ vol_resize = self.c.volume_resize(
+ vol, int(vol.size_bytes * 1.10))[1]
self.assertTrue(vol.size_bytes < vol_resize.size_bytes)
self.assertTrue(vol.id == vol_resize.id,
"Expecting re-sized volume to refer to "
@@ -443,13 +461,17 @@ class TestPlugin(unittest.TestCase):
for s in self.systems:
cap = self.c.capabilities(s)
- if supported(cap, [lsm.Capabilities.VOLUME_CREATE,
+ if supported(cap,
+ [lsm.Capabilities.VOLUME_COPY_RANGE_BLOCK_SIZE,
+ lsm.Capabilities.VOLUME_CREATE,
lsm.Capabilities.VOLUME_DELETE,
lsm.Capabilities.VOLUME_COPY_RANGE]):
+ size = self.c.volume_replicate_range_block_size(s)
+
vol, pool = self._volume_create(s.id)
- br = lsm.BlockRange(0, 100, 10)
+ br = lsm.BlockRange(0, size, size)
if supported(
cap, [lsm.Capabilities.VOLUME_COPY_RANGE_CLONE]):
@@ -461,7 +483,7 @@ class TestPlugin(unittest.TestCase):
self.c.volume_replicate_range,
lsm.Volume.REPLICATE_CLONE, vol, vol, [br])
- br = lsm.BlockRange(200, 400, 50)
+ br = lsm.BlockRange(size * 2, size, size)
if supported(
cap, [lsm.Capabilities.VOLUME_COPY_RANGE_COPY]):
--
1.8.2.1