Discussion:
[Libstoragemgmt-devel] [PATCH 0/8] ABI/API changes for lsmcli and error numbers.
Gris Ge
2014-09-04 13:59:53 UTC
Permalink
Gris Ge (8):
Rename UNSUPPORTED_VOLUME_EXPAND to UNSUPPORTED_VOLUME_GROW
lsmcli: add some key back to column display way
lsmcli: Rename some lsmcli output cloumn key
Rename VOLUME_ONLINE to VOLUME_ENABLE and _OFFLINE to _DISABLE
C and Python Library: Rename error ACCESS_GROUP_MASKED to
LAST_INIT_IN_ACCESS_GROUP
SMI-S plugin: Handle LAST_INIT_IN_ACCESS_GROUP, NO_STATE_CHANGE error
in access_group_initiator_delete()
ONTAP plugin: Improve volume_mask() and
access_group_initiator_delete()
Remove SIZE_TOO_SMALL error and add POOL_NOT_READY error

.../libstoragemgmt/libstoragemgmt_capabilities.h | 4 +-
.../include/libstoragemgmt/libstoragemgmt_error.h | 4 +-
.../include/libstoragemgmt/libstoragemgmt_types.h | 2 +-
plugin/nstor/nstor.py | 4 +-
plugin/ontap/na.py | 13 +-
plugin/ontap/ontap.py | 126 ++++++++--
plugin/sim/simarray.py | 2 +-
plugin/simc/simc_lsmplugin.c | 4 +-
plugin/smispy/dmtf.py | 6 -
plugin/smispy/smis.py | 58 ++---
plugin/targetd/targetd.py | 2 -
python_binding/lsm/_common.py | 6 +-
python_binding/lsm/_data.py | 6 +-
test/plugin_test.py | 2 +-
test/tester.c | 12 +-
tools/lsmcli/data_display.py | 272 ++++++++++-----------
16 files changed, 279 insertions(+), 244 deletions(-)
--
1.9.3
Gris Ge
2014-09-04 13:59:54 UTC
Permalink
* With Pool.STATUS_GROWING, we should use GROW instead of EXPAND.

Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt_types.h | 2 +-
plugin/sim/simarray.py | 2 +-
plugin/smispy/smis.py | 2 +-
python_binding/lsm/_data.py | 2 +-
test/plugin_test.py | 2 +-
tools/lsmcli/data_display.py | 4 ++--
6 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index cb10163..c020641 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -216,7 +216,7 @@ typedef enum {
#define LSM_POOL_ELEMENT_TYPE_DELTA 0x0000000000000010
#define LSM_POOL_ELEMENT_TYPE_SYS_RESERVED 0x0000000000000400

-#define LSM_POOL_UNSUPPORTED_VOLUME_EXPAND 0x0000000000000001
+#define LSM_POOL_UNSUPPORTED_VOLUME_GROW 0x0000000000000001
#define LSM_POOL_UNSUPPORTED_VOLUME_SHRINK 0x0000000000000002

typedef enum {
diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index 0d38e73..038d2a4 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -619,7 +619,7 @@ class SimData(object):
status_info=SimData.SIM_DATA_POOL_STATUS_INFO,
sys_id=SimData.SIM_DATA_SYS_ID,
element_type=SimData.SIM_DATA_SYS_POOL_ELEMENT_TYPE,
- unsupported_actions=Pool.UNSUPPORTED_VOLUME_EXPAND |
+ unsupported_actions=Pool.UNSUPPORTED_VOLUME_GROW |
Pool.UNSUPPORTED_VOLUME_SHRINK),
'POO2': dict(
pool_id='POO2', name='Pool 2',
diff --git a/plugin/smispy/smis.py b/plugin/smispy/smis.py
index 46c640a..0305d54 100644
--- a/plugin/smispy/smis.py
+++ b/plugin/smispy/smis.py
@@ -3229,7 +3229,7 @@ class Smis(IStorageAreaNetwork):
if Smis.DMTF_SUPPORT_VOL_CREATE in supported_features:
element_type |= Pool.ELEMENT_TYPE_VOLUME
if Smis.DMTF_SUPPORT_ELEMENT_EXPAND not in supported_features:
- unsupported |= Pool.UNSUPPORTED_VOLUME_EXPAND
+ unsupported |= Pool.UNSUPPORTED_VOLUME_GROW
if Smis.DMTF_SUPPORT_ELEMENT_REDUCE not in supported_features:
unsupported |= Pool.UNSUPPORTED_VOLUME_SHRINK

diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index dbe77a5..bfb4d5a 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -329,7 +329,7 @@ class Pool(IData):
ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use

# Unsupported actions, what pool cannot be used for
- UNSUPPORTED_VOLUME_EXPAND = 1 << 0
+ UNSUPPORTED_VOLUME_GROW = 1 << 0
UNSUPPORTED_VOLUME_SHRINK = 1 << 1

# Pool status could be any combination of these status.
diff --git a/test/plugin_test.py b/test/plugin_test.py
index 75e3650..2e1f295 100755
--- a/test/plugin_test.py
+++ b/test/plugin_test.py
@@ -275,7 +275,7 @@ class TestPlugin(unittest.TestCase):
if p.element_type & element_type and \
p.free_space > mb_in_bytes(MIN_POOL_SIZE) and \
(not p.unsupported_actions &
- lsm.Pool.UNSUPPORTED_VOLUME_EXPAND):
+ lsm.Pool.UNSUPPORTED_VOLUME_GROW):
if p.free_space > largest_free:
largest_free = p.free_space
rc = p
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 2ba7ef3..f6702f6 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -116,8 +116,8 @@ _POOL_ELEMENT_TYPE_CONV = {
}

_POOL_UNSUPPORTED_ACTION_CONV = {
- Pool.UNSUPPORTED_VOLUME_EXPAND: "Volume expand",
- Pool.UNSUPPORTED_VOLUME_SHRINK: "Volume shrink"
+ Pool.UNSUPPORTED_VOLUME_GROW: "Volume Grow",
+ Pool.UNSUPPORTED_VOLUME_SHRINK: "Volume Shrink"
}
--
1.9.3
Gris Ge
2014-09-04 13:59:55 UTC
Permalink
* Simply the code workflow by:
1. Rename XXX_MAN_HEADER to XXX_HEADER as we don't have any optional
properties anymore.
2. Replace XXX_COLUMN_KEYS with XXX_COLUMN_SKIP_KEYS for easier
continence.

* Add these keys back for column display way:
1. 'system_id' for 'list --type volumes' and 'list --type fs'
2. 'root' for 'list --type EXPORTS'

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/data_display.py | 262 ++++++++++++++++++++-----------------------
1 file changed, 123 insertions(+), 139 deletions(-)

diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index f6702f6..0dec6c5 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -256,18 +256,15 @@ class DisplayData(object):
VALUE_CONVERT = {}

# lsm.System
- SYSTEM_MAN_HEADER = OrderedDict()
- SYSTEM_MAN_HEADER['id'] = 'ID'
- SYSTEM_MAN_HEADER['name'] = 'Name'
- SYSTEM_MAN_HEADER['status'] = 'Status'
- SYSTEM_MAN_HEADER['status_info'] = 'Status Info'
-
- SYSTEM_COLUMN_KEYS = SYSTEM_MAN_HEADER.keys()
- # SYSTEM_COLUMN_KEYS should be subset of SYSTEM_MAN_HEADER.keys()
- # XXX_COLUMN_KEYS contain a list of mandatory properties which will be
- # displayed in column way. It was used to limit the output of properties
- # in sure the column display way does not exceeded the column width 78.
- # All mandatory_headers will be displayed in script way.
+ SYSTEM_HEADER = OrderedDict()
+ SYSTEM_HEADER['id'] = 'ID'
+ SYSTEM_HEADER['name'] = 'Name'
+ SYSTEM_HEADER['status'] = 'Status'
+ SYSTEM_HEADER['status_info'] = 'Status Info'
+
+ SYSTEM_COLUMN_SKIP_KEYS = []
+ # XXX_COLUMN_SKIP_KEYS contain a list of property should be skipped when
+ # displaying in column way.

SYSTEM_VALUE_CONV_ENUM = {
'status': system_status_to_str,
@@ -276,41 +273,41 @@ class DisplayData(object):
SYSTEM_VALUE_CONV_HUMAN = []

VALUE_CONVERT[System] = {
- 'mandatory_headers': SYSTEM_MAN_HEADER,
- 'column_keys': SYSTEM_COLUMN_KEYS,
+ 'headers': SYSTEM_HEADER,
+ 'column_skip_keys': SYSTEM_COLUMN_SKIP_KEYS,
'value_conv_enum': SYSTEM_VALUE_CONV_ENUM,
'value_conv_human': SYSTEM_VALUE_CONV_HUMAN,
}

- PLUG_DATA_MAN_HEADER = OrderedDict()
- PLUG_DATA_MAN_HEADER['desc'] = 'Description'
- PLUG_DATA_MAN_HEADER['version'] = 'Version'
+ PLUG_DATA_HEADER = OrderedDict()
+ PLUG_DATA_HEADER['desc'] = 'Description'
+ PLUG_DATA_HEADER['version'] = 'Version'

- PLUG_DATA_COLUMN_KEYS = PLUG_DATA_MAN_HEADER.keys()
+ PLUG_DATA_COLUMN_SKIP_KEYS = []

PLUG_DATA_VALUE_CONV_ENUM = {}
PLUG_DATA_VALUE_CONV_HUMAN = []

VALUE_CONVERT[PlugData] = {
- 'mandatory_headers': PLUG_DATA_MAN_HEADER,
- 'column_keys': PLUG_DATA_COLUMN_KEYS,
+ 'headers': PLUG_DATA_HEADER,
+ 'column_skip_keys': PLUG_DATA_COLUMN_SKIP_KEYS,
'value_conv_enum': PLUG_DATA_VALUE_CONV_ENUM,
'value_conv_human': PLUG_DATA_VALUE_CONV_HUMAN,
}

# lsm.Pool
- POOL_MAN_HEADER = OrderedDict()
- POOL_MAN_HEADER['id'] = 'ID'
- POOL_MAN_HEADER['name'] = 'Name'
- POOL_MAN_HEADER['element_type'] = 'Element type'
- POOL_MAN_HEADER['unsupported_actions'] = 'Does not support'
- POOL_MAN_HEADER['total_space'] = 'Total Space'
- POOL_MAN_HEADER['free_space'] = 'Free Space'
- POOL_MAN_HEADER['status'] = 'Status'
- POOL_MAN_HEADER['status_info'] = 'Status Info'
- POOL_MAN_HEADER['system_id'] = 'System ID'
-
- POOL_COLUMN_KEYS = POOL_MAN_HEADER.keys()
+ POOL_HEADER = OrderedDict()
+ POOL_HEADER['id'] = 'ID'
+ POOL_HEADER['name'] = 'Name'
+ POOL_HEADER['element_type'] = 'Element type'
+ POOL_HEADER['unsupported_actions'] = 'Does not support'
+ POOL_HEADER['total_space'] = 'Total Space'
+ POOL_HEADER['free_space'] = 'Free Space'
+ POOL_HEADER['status'] = 'Status'
+ POOL_HEADER['status_info'] = 'Status Info'
+ POOL_HEADER['system_id'] = 'System ID'
+
+ POOL_COLUMN_SKIP_KEYS = []

POOL_VALUE_CONV_ENUM = {
'status': pool_status_to_str,
@@ -321,29 +318,25 @@ class DisplayData(object):
POOL_VALUE_CONV_HUMAN = ['total_space', 'free_space']

VALUE_CONVERT[Pool] = {
- 'mandatory_headers': POOL_MAN_HEADER,
- 'column_keys': POOL_COLUMN_KEYS,
+ 'headers': POOL_HEADER,
+ 'column_skip_keys': POOL_COLUMN_SKIP_KEYS,
'value_conv_enum': POOL_VALUE_CONV_ENUM,
'value_conv_human': POOL_VALUE_CONV_HUMAN,
}

# lsm.Volume
- VOL_MAN_HEADER = OrderedDict()
- VOL_MAN_HEADER['id'] = 'ID'
- VOL_MAN_HEADER['name'] = 'Name'
- VOL_MAN_HEADER['vpd83'] = 'SCSI VPD 0x83'
- VOL_MAN_HEADER['block_size'] = 'Block Size'
- VOL_MAN_HEADER['num_of_blocks'] = '#blocks'
- VOL_MAN_HEADER['size_bytes'] = 'Size'
- VOL_MAN_HEADER['admin_state'] = 'Disabled'
- VOL_MAN_HEADER['pool_id'] = 'Pool ID'
- VOL_MAN_HEADER['system_id'] = 'System ID'
-
- VOL_COLUMN_KEYS = []
- for key_name in VOL_MAN_HEADER.keys():
- # Skip these keys for column display
- if key_name not in ['block_size', 'num_of_blocks', 'system_id']:
- VOL_COLUMN_KEYS.extend([key_name])
+ VOL_HEADER = OrderedDict()
+ VOL_HEADER['id'] = 'ID'
+ VOL_HEADER['name'] = 'Name'
+ VOL_HEADER['vpd83'] = 'SCSI VPD 0x83'
+ VOL_HEADER['block_size'] = 'Block Size'
+ VOL_HEADER['num_of_blocks'] = '#blocks'
+ VOL_HEADER['size_bytes'] = 'Size'
+ VOL_HEADER['admin_state'] = 'Disabled'
+ VOL_HEADER['pool_id'] = 'Pool ID'
+ VOL_HEADER['system_id'] = 'System ID'
+
+ VOL_COLUMN_SKIP_KEYS = ['block_size', 'num_of_blocks']

VOL_VALUE_CONV_ENUM = {
'admin_state': vol_admin_state_to_str
@@ -352,28 +345,24 @@ class DisplayData(object):
VOL_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']

VALUE_CONVERT[Volume] = {
- 'mandatory_headers': VOL_MAN_HEADER,
- 'column_keys': VOL_COLUMN_KEYS,
+ 'headers': VOL_HEADER,
+ 'column_skip_keys': VOL_COLUMN_SKIP_KEYS,
'value_conv_enum': VOL_VALUE_CONV_ENUM,
'value_conv_human': VOL_VALUE_CONV_HUMAN,
}

# lsm.Disk
- DISK_MAN_HEADER = OrderedDict()
- DISK_MAN_HEADER['id'] = 'ID'
- DISK_MAN_HEADER['name'] = 'Name'
- DISK_MAN_HEADER['disk_type'] = 'Type'
- DISK_MAN_HEADER['block_size'] = 'Block Size'
- DISK_MAN_HEADER['num_of_blocks'] = '#blocks'
- DISK_MAN_HEADER['size_bytes'] = 'Size'
- DISK_MAN_HEADER['status'] = 'Status'
- DISK_MAN_HEADER['system_id'] = 'System ID'
-
- DISK_COLUMN_KEYS = []
- for key_name in DISK_MAN_HEADER.keys():
- # Skip these keys for column display
- if key_name not in ['block_size', 'num_of_blocks']:
- DISK_COLUMN_KEYS.extend([key_name])
+ DISK_HEADER = OrderedDict()
+ DISK_HEADER['id'] = 'ID'
+ DISK_HEADER['name'] = 'Name'
+ DISK_HEADER['disk_type'] = 'Type'
+ DISK_HEADER['block_size'] = 'Block Size'
+ DISK_HEADER['num_of_blocks'] = '#blocks'
+ DISK_HEADER['size_bytes'] = 'Size'
+ DISK_HEADER['status'] = 'Status'
+ DISK_HEADER['system_id'] = 'System ID'
+
+ DISK_COLUMN_SKIP_KEYS = ['block_size', 'num_of_blocks']

DISK_VALUE_CONV_ENUM = {
'status': disk_status_to_str,
@@ -383,21 +372,21 @@ class DisplayData(object):
DISK_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']

VALUE_CONVERT[Disk] = {
- 'mandatory_headers': DISK_MAN_HEADER,
- 'column_keys': DISK_COLUMN_KEYS,
+ 'headers': DISK_HEADER,
+ 'column_skip_keys': DISK_COLUMN_SKIP_KEYS,
'value_conv_enum': DISK_VALUE_CONV_ENUM,
'value_conv_human': DISK_VALUE_CONV_HUMAN,
}

# lsm.AccessGroup
- AG_MAN_HEADER = OrderedDict()
- AG_MAN_HEADER['id'] = 'ID'
- AG_MAN_HEADER['name'] = 'Name'
- AG_MAN_HEADER['init_ids'] = 'Initiator IDs'
- AG_MAN_HEADER['init_type'] = 'Type'
- AG_MAN_HEADER['system_id'] = 'System ID'
+ AG_HEADER = OrderedDict()
+ AG_HEADER['id'] = 'ID'
+ AG_HEADER['name'] = 'Name'
+ AG_HEADER['init_ids'] = 'Initiator IDs'
+ AG_HEADER['init_type'] = 'Type'
+ AG_HEADER['system_id'] = 'System ID'

- AG_COLUMN_KEYS = AG_MAN_HEADER.keys()
+ AG_COLUMN_SKIP_KEYS = []

AG_VALUE_CONV_ENUM = {
'init_type': ag_init_type_to_str,
@@ -406,26 +395,22 @@ class DisplayData(object):
AG_VALUE_CONV_HUMAN = []

VALUE_CONVERT[AccessGroup] = {
- 'mandatory_headers': AG_MAN_HEADER,
- 'column_keys': AG_COLUMN_KEYS,
+ 'headers': AG_HEADER,
+ 'column_skip_keys': AG_COLUMN_SKIP_KEYS,
'value_conv_enum': AG_VALUE_CONV_ENUM,
'value_conv_human': AG_VALUE_CONV_HUMAN,
}

# lsm.FileSystem
- FS_MAN_HEADER = OrderedDict()
- FS_MAN_HEADER['id'] = 'ID'
- FS_MAN_HEADER['name'] = 'Name'
- FS_MAN_HEADER['total_space'] = 'Total Space'
- FS_MAN_HEADER['free_space'] = 'Free Space'
- FS_MAN_HEADER['pool_id'] = 'Pool ID'
- FS_MAN_HEADER['system_id'] = 'System ID'
-
- FS_COLUMN_KEYS = []
- for key_name in FS_MAN_HEADER.keys():
- # Skip these keys for column display
- if key_name not in ['system_id']:
- FS_COLUMN_KEYS.extend([key_name])
+ FS_HEADER = OrderedDict()
+ FS_HEADER['id'] = 'ID'
+ FS_HEADER['name'] = 'Name'
+ FS_HEADER['total_space'] = 'Total Space'
+ FS_HEADER['free_space'] = 'Free Space'
+ FS_HEADER['pool_id'] = 'Pool ID'
+ FS_HEADER['system_id'] = 'System ID'
+
+ FS_COLUMN_SKIP_KEYS = []

FS_VALUE_CONV_ENUM = {
}
@@ -433,19 +418,19 @@ class DisplayData(object):
FS_VALUE_CONV_HUMAN = ['total_space', 'free_space']

VALUE_CONVERT[FileSystem] = {
- 'mandatory_headers': FS_MAN_HEADER,
- 'column_keys': FS_COLUMN_KEYS,
+ 'headers': FS_HEADER,
+ 'column_skip_keys': FS_COLUMN_SKIP_KEYS,
'value_conv_enum': FS_VALUE_CONV_ENUM,
'value_conv_human': FS_VALUE_CONV_HUMAN,
}

# lsm.FsSnapshot
- FS_SNAP_MAN_HEADER = OrderedDict()
- FS_SNAP_MAN_HEADER['id'] = 'ID'
- FS_SNAP_MAN_HEADER['name'] = 'Name'
- FS_SNAP_MAN_HEADER['ts'] = 'Time Stamp'
+ FS_SNAP_HEADER = OrderedDict()
+ FS_SNAP_HEADER['id'] = 'ID'
+ FS_SNAP_HEADER['name'] = 'Name'
+ FS_SNAP_HEADER['ts'] = 'Time Stamp'

- FS_SNAP_COLUMN_KEYS = FS_SNAP_MAN_HEADER.keys()
+ FS_SNAP_COLUMN_SKIP_KEYS = []

FS_SNAP_VALUE_CONV_ENUM = {
'ts': datetime.fromtimestamp
@@ -454,57 +439,49 @@ class DisplayData(object):
FS_SNAP_VALUE_CONV_HUMAN = []

VALUE_CONVERT[FsSnapshot] = {
- 'mandatory_headers': FS_SNAP_MAN_HEADER,
- 'column_keys': FS_SNAP_COLUMN_KEYS,
+ 'headers': FS_SNAP_HEADER,
+ 'column_skip_keys': FS_SNAP_COLUMN_SKIP_KEYS,
'value_conv_enum': FS_SNAP_VALUE_CONV_ENUM,
'value_conv_human': FS_SNAP_VALUE_CONV_HUMAN,
}

# lsm.NfsExport
- NFS_EXPORT_MAN_HEADER = OrderedDict()
- NFS_EXPORT_MAN_HEADER['id'] = 'ID'
- NFS_EXPORT_MAN_HEADER['fs_id'] = 'FileSystem ID'
- NFS_EXPORT_MAN_HEADER['export_path'] = 'Export Path'
- NFS_EXPORT_MAN_HEADER['auth'] = 'Auth Type'
- NFS_EXPORT_MAN_HEADER['root'] = 'Root Hosts'
- NFS_EXPORT_MAN_HEADER['rw'] = 'RW Hosts'
- NFS_EXPORT_MAN_HEADER['ro'] = 'RO Hosts'
- NFS_EXPORT_MAN_HEADER['anonuid'] = 'Anonymous UID'
- NFS_EXPORT_MAN_HEADER['anongid'] = 'Anonymous GID'
- NFS_EXPORT_MAN_HEADER['options'] = 'Options'
-
- NFS_EXPORT_COLUMN_KEYS = []
- for key_name in NFS_EXPORT_MAN_HEADER.keys():
- # Skip these keys for column display
- if key_name not in ['root', 'anonuid', 'anongid', 'auth']:
- NFS_EXPORT_COLUMN_KEYS.extend([key_name])
+ NFS_EXPORT_HEADER = OrderedDict()
+ NFS_EXPORT_HEADER['id'] = 'ID'
+ NFS_EXPORT_HEADER['fs_id'] = 'FileSystem ID'
+ NFS_EXPORT_HEADER['export_path'] = 'Export Path'
+ NFS_EXPORT_HEADER['auth'] = 'Auth Type'
+ NFS_EXPORT_HEADER['root'] = 'Root Hosts'
+ NFS_EXPORT_HEADER['rw'] = 'RW Hosts'
+ NFS_EXPORT_HEADER['ro'] = 'RO Hosts'
+ NFS_EXPORT_HEADER['anonuid'] = 'Anonymous UID'
+ NFS_EXPORT_HEADER['anongid'] = 'Anonymous GID'
+ NFS_EXPORT_HEADER['options'] = 'Options'
+
+ NFS_EXPORT_COLUMN_SKIP_KEYS = ['anonuid', 'anongid', 'auth']

NFS_EXPORT_VALUE_CONV_ENUM = {}

NFS_EXPORT_VALUE_CONV_HUMAN = []

VALUE_CONVERT[NfsExport] = {
- 'mandatory_headers': NFS_EXPORT_MAN_HEADER,
- 'column_keys': NFS_EXPORT_COLUMN_KEYS,
+ 'headers': NFS_EXPORT_HEADER,
+ 'column_skip_keys': NFS_EXPORT_COLUMN_SKIP_KEYS,
'value_conv_enum': NFS_EXPORT_VALUE_CONV_ENUM,
'value_conv_human': NFS_EXPORT_VALUE_CONV_HUMAN,
}

# lsm.TargetPort
- TGT_PORT_MAN_HEADER = OrderedDict()
- TGT_PORT_MAN_HEADER['id'] = 'ID'
- TGT_PORT_MAN_HEADER['port_type'] = 'Type'
- TGT_PORT_MAN_HEADER['physical_name'] = 'Physical Name'
- TGT_PORT_MAN_HEADER['service_address'] = 'Address'
- TGT_PORT_MAN_HEADER['network_address'] = 'Network Address'
- TGT_PORT_MAN_HEADER['physical_address'] = 'Physical Address'
- TGT_PORT_MAN_HEADER['system_id'] = 'System ID'
-
- TGT_PORT_COLUMN_KEYS = []
- for key_name in TGT_PORT_MAN_HEADER.keys():
- # Skip these keys for column display
- if key_name not in ['physical_address', 'physical_name']:
- TGT_PORT_COLUMN_KEYS.extend([key_name])
+ TGT_PORT_HEADER = OrderedDict()
+ TGT_PORT_HEADER['id'] = 'ID'
+ TGT_PORT_HEADER['port_type'] = 'Type'
+ TGT_PORT_HEADER['physical_name'] = 'Physical Name'
+ TGT_PORT_HEADER['service_address'] = 'Address'
+ TGT_PORT_HEADER['network_address'] = 'Network Address'
+ TGT_PORT_HEADER['physical_address'] = 'Physical Address'
+ TGT_PORT_HEADER['system_id'] = 'System ID'
+
+ TGT_PORT_COLUMN_SKIP_KEYS = ['physical_address', 'physical_name']

TGT_PORT_VALUE_CONV_ENUM = {
'port_type': tgt_port_type_to_str,
@@ -513,8 +490,8 @@ class DisplayData(object):
TGT_PORT_VALUE_CONV_HUMAN = []

VALUE_CONVERT[TargetPort] = {
- 'mandatory_headers': TGT_PORT_MAN_HEADER,
- 'column_keys': TGT_PORT_COLUMN_KEYS,
+ 'headers': TGT_PORT_HEADER,
+ 'column_skip_keys': TGT_PORT_COLUMN_SKIP_KEYS,
'value_conv_enum': TGT_PORT_VALUE_CONV_ENUM,
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}
@@ -545,7 +522,7 @@ class DisplayData(object):
extra_properties=None, flag_dsp_all_data=False):
data_dict = OrderedDict()
value_convert = DisplayData.VALUE_CONVERT[type(obj)]
- mandatory_headers = value_convert['mandatory_headers']
+ headers = value_convert['headers']
value_conv_enum = value_convert['value_conv_enum']
value_conv_human = value_convert['value_conv_human']

@@ -555,12 +532,19 @@ class DisplayData(object):
display_keys = []

if display_way == DisplayData.DISPLAY_WAY_COLUMN:
- display_keys = value_convert['column_keys']
+ for key_name in headers.keys():
+ if key_name not in value_convert['column_skip_keys']:
+ display_keys.append(key_name)
elif display_way == DisplayData.DISPLAY_WAY_SCRIPT:
- display_keys = mandatory_headers.keys()
+ display_keys = headers.keys()
+
+ if extra_properties:
+ for extra_key_name in extra_properties:
+ if extra_key_name not in display_keys:
+ display_keys.append(extra_key_name)

for key in display_keys:
- key_str = mandatory_headers[key]
+ key_str = headers[key]
value = DisplayData._get_man_pro_value(
obj, key, value_conv_enum, value_conv_human, flag_human,
flag_enum)
--
1.9.3
Gris Ge
2014-09-04 13:59:56 UTC
Permalink
* Hide initiator type when listing access group as lsmcli user can easily
identify a iSCSI IQN or WWPN access group base on their initiator.
* Rename 'Status Info' to 'Info' to save column text width.
* Rename '#blocks' to 'Block Count'.
* Rename 'Element type' to 'Element Type'.
* Rename element type from 'FILE_SYSTEM' to 'FS' to save column text width.

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/data_display.py | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 0dec6c5..b122b8b 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -110,7 +110,7 @@ def pool_status_to_str(pool_status):
_POOL_ELEMENT_TYPE_CONV = {
Pool.ELEMENT_TYPE_POOL: 'POOL',
Pool.ELEMENT_TYPE_VOLUME: 'VOLUME',
- Pool.ELEMENT_TYPE_FS: 'FILE_SYSTEM',
+ Pool.ELEMENT_TYPE_FS: 'FS',
Pool.ELEMENT_TYPE_SYS_RESERVED: 'SYSTEM_RESERVED',
Pool.ELEMENT_TYPE_DELTA: "DELTA",
}
@@ -125,8 +125,8 @@ def pool_element_type_to_str(element_type):
return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)


-def pool_unsupported_actions_to_str(unsupported_action):
- return _bit_map_to_str(unsupported_action, _POOL_UNSUPPORTED_ACTION_CONV)
+def pool_unsupported_actions_to_str(unsupported_actions):
+ return _bit_map_to_str(unsupported_actions, _POOL_UNSUPPORTED_ACTION_CONV)


_VOL_PROVISION_CONV = {
@@ -260,7 +260,7 @@ class DisplayData(object):
SYSTEM_HEADER['id'] = 'ID'
SYSTEM_HEADER['name'] = 'Name'
SYSTEM_HEADER['status'] = 'Status'
- SYSTEM_HEADER['status_info'] = 'Status Info'
+ SYSTEM_HEADER['status_info'] = 'Info'

SYSTEM_COLUMN_SKIP_KEYS = []
# XXX_COLUMN_SKIP_KEYS contain a list of property should be skipped when
@@ -299,15 +299,15 @@ class DisplayData(object):
POOL_HEADER = OrderedDict()
POOL_HEADER['id'] = 'ID'
POOL_HEADER['name'] = 'Name'
- POOL_HEADER['element_type'] = 'Element type'
+ POOL_HEADER['element_type'] = 'Element Type'
POOL_HEADER['unsupported_actions'] = 'Does not support'
POOL_HEADER['total_space'] = 'Total Space'
POOL_HEADER['free_space'] = 'Free Space'
POOL_HEADER['status'] = 'Status'
- POOL_HEADER['status_info'] = 'Status Info'
+ POOL_HEADER['status_info'] = 'Info'
POOL_HEADER['system_id'] = 'System ID'

- POOL_COLUMN_SKIP_KEYS = []
+ POOL_COLUMN_SKIP_KEYS = ['unsupported_actions']

POOL_VALUE_CONV_ENUM = {
'status': pool_status_to_str,
@@ -330,7 +330,7 @@ class DisplayData(object):
VOL_HEADER['name'] = 'Name'
VOL_HEADER['vpd83'] = 'SCSI VPD 0x83'
VOL_HEADER['block_size'] = 'Block Size'
- VOL_HEADER['num_of_blocks'] = '#blocks'
+ VOL_HEADER['num_of_blocks'] = 'Block Count'
VOL_HEADER['size_bytes'] = 'Size'
VOL_HEADER['admin_state'] = 'Disabled'
VOL_HEADER['pool_id'] = 'Pool ID'
@@ -357,7 +357,7 @@ class DisplayData(object):
DISK_HEADER['name'] = 'Name'
DISK_HEADER['disk_type'] = 'Type'
DISK_HEADER['block_size'] = 'Block Size'
- DISK_HEADER['num_of_blocks'] = '#blocks'
+ DISK_HEADER['num_of_blocks'] = 'Block Count'
DISK_HEADER['size_bytes'] = 'Size'
DISK_HEADER['status'] = 'Status'
DISK_HEADER['system_id'] = 'System ID'
@@ -386,7 +386,7 @@ class DisplayData(object):
AG_HEADER['init_type'] = 'Type'
AG_HEADER['system_id'] = 'System ID'

- AG_COLUMN_SKIP_KEYS = []
+ AG_COLUMN_SKIP_KEYS = ['init_type']

AG_VALUE_CONV_ENUM = {
'init_type': ag_init_type_to_str,
--
1.9.3
Gris Ge
2014-09-04 13:59:57 UTC
Permalink
* Just be consistent with volume_enable() and volume_disable() methods.

Signed-off-by: Gris Ge <***@redhat.com>
---
.../include/libstoragemgmt/libstoragemgmt_capabilities.h | 4 ++--
plugin/nstor/nstor.py | 4 ++--
plugin/ontap/ontap.py | 4 ++--
plugin/simc/simc_lsmplugin.c | 4 ++--
plugin/targetd/targetd.py | 2 --
python_binding/lsm/_data.py | 4 ++--
test/tester.c | 12 ++++++------
7 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 323db98..7d6182c 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -55,8 +55,8 @@ typedef enum {

LSM_CAP_VOLUME_DELETE = 33, /**< Can delete a volume */

- LSM_CAP_VOLUME_ONLINE = 34, /**< Put volume online */
- LSM_CAP_VOLUME_OFFLINE = 35, /**< Take volume offline */
+ LSM_CAP_VOLUME_ENABLE = 34, /**< Enable volume*/
+ LSM_CAP_VOLUME_DISABLE = 35, /**< Disable volume*/

LSM_CAP_VOLUME_MASK = 36, /**< Grant an access group to a volume */
LSM_CAP_VOLUME_UNMASK = 37, /**< Revoke access for an access group */
diff --git a/plugin/nstor/nstor.py b/plugin/nstor/nstor.py
index 98b73be..41633fb 100644
--- a/plugin/nstor/nstor.py
+++ b/plugin/nstor/nstor.py
@@ -273,8 +273,8 @@ class NexentaStor(INfs, IStorageAreaNetwork):
# c.set(Capabilities.VOLUME_COPY_RANGE_CLONE)
# c.set(Capabilities.VOLUME_COPY_RANGE_COPY)
c.set(Capabilities.VOLUME_DELETE)
- # c.set(Capabilities.VOLUME_ONLINE)
- # c.set(Capabilities.VOLUME_OFFLINE)
+ # c.set(Capabilities.VOLUME_ENABLE)
+ # c.set(Capabilities.VOLUME_DISABLE)
c.set(Capabilities.VOLUME_MASK)
c.set(Capabilities.VOLUME_UNMASK)
c.set(Capabilities.ACCESS_GROUPS)
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index c275f63..4198f88 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -466,8 +466,8 @@ class Ontap(IStorageAreaNetwork, INfs):
cap.set(Capabilities.VOLUME_COPY_RANGE)
cap.set(Capabilities.VOLUME_COPY_RANGE_CLONE)
cap.set(Capabilities.VOLUME_DELETE)
- cap.set(Capabilities.VOLUME_ONLINE)
- cap.set(Capabilities.VOLUME_OFFLINE)
+ cap.set(Capabilities.VOLUME_ENABLE)
+ cap.set(Capabilities.VOLUME_DISABLE)
cap.set(Capabilities.VOLUME_ISCSI_CHAP_AUTHENTICATION)
cap.set(Capabilities.VOLUME_MASK)
cap.set(Capabilities.VOLUME_UNMASK)
diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index d0d7170..5ef2afd 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -358,8 +358,8 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_VOLUME_COPY_RANGE_CLONE,
LSM_CAP_VOLUME_COPY_RANGE_COPY,
LSM_CAP_VOLUME_DELETE,
- LSM_CAP_VOLUME_ONLINE,
- LSM_CAP_VOLUME_OFFLINE,
+ LSM_CAP_VOLUME_ENABLE,
+ LSM_CAP_VOLUME_DISABLE,
LSM_CAP_VOLUME_MASK,
LSM_CAP_VOLUME_UNMASK,
LSM_CAP_ACCESS_GROUPS,
diff --git a/plugin/targetd/targetd.py b/plugin/targetd/targetd.py
index 2053754..ddd3a39 100644
--- a/plugin/targetd/targetd.py
+++ b/plugin/targetd/targetd.py
@@ -117,8 +117,6 @@ class TargetdStorage(IStorageAreaNetwork, INfs):
cap.set(Capabilities.VOLUME_REPLICATE)
cap.set(Capabilities.VOLUME_REPLICATE_COPY)
cap.set(Capabilities.VOLUME_DELETE)
- cap.set(Capabilities.VOLUME_OFFLINE)
- cap.set(Capabilities.VOLUME_ONLINE)
cap.set(Capabilities.VOLUME_MASK)
cap.set(Capabilities.VOLUME_UNMASK)
cap.set(Capabilities.FS)
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index bfb4d5a..4031936 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -625,8 +625,8 @@ class Capabilities(IData):

VOLUME_DELETE = 33

- VOLUME_ONLINE = 34
- VOLUME_OFFLINE = 35
+ VOLUME_ENABLE = 34
+ VOLUME_DISABLE = 35

VOLUME_MASK = 36
VOLUME_UNMASK = 37
diff --git a/test/tester.c b/test/tester.c
index 82e52fb..01a8513 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -1841,8 +1841,8 @@ START_TEST(test_capabilities)
cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE_CLONE);
cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE_COPY);
cap_test(cap, LSM_CAP_VOLUME_DELETE);
- cap_test(cap, LSM_CAP_VOLUME_ONLINE);
- cap_test(cap, LSM_CAP_VOLUME_OFFLINE);
+ cap_test(cap, LSM_CAP_VOLUME_ENABLE);
+ cap_test(cap, LSM_CAP_VOLUME_DISABLE);
cap_test(cap, LSM_CAP_VOLUME_MASK);
cap_test(cap, LSM_CAP_VOLUME_UNMASK);
cap_test(cap, LSM_CAP_ACCESS_GROUPS);
@@ -2013,8 +2013,8 @@ START_TEST(test_capability)
LSM_CAP_VOLUME_COPY_RANGE_CLONE,
LSM_CAP_VOLUME_COPY_RANGE_COPY,
LSM_CAP_VOLUME_DELETE,
- LSM_CAP_VOLUME_ONLINE,
- LSM_CAP_VOLUME_OFFLINE,
+ LSM_CAP_VOLUME_ENABLE,
+ LSM_CAP_VOLUME_DISABLE,
LSM_CAP_VOLUME_MASK,
LSM_CAP_VOLUME_UNMASK,
LSM_CAP_ACCESS_GROUPS,
@@ -2067,8 +2067,8 @@ START_TEST(test_capability)
LSM_CAP_VOLUME_COPY_RANGE_CLONE,
LSM_CAP_VOLUME_COPY_RANGE_COPY,
LSM_CAP_VOLUME_DELETE,
- LSM_CAP_VOLUME_ONLINE,
- LSM_CAP_VOLUME_OFFLINE,
+ LSM_CAP_VOLUME_ENABLE,
+ LSM_CAP_VOLUME_DISABLE,
LSM_CAP_VOLUME_MASK,
LSM_CAP_VOLUME_UNMASK,
LSM_CAP_ACCESS_GROUPS,
--
1.9.3
Gris Ge
2014-09-04 13:59:58 UTC
Permalink
* Rename 'ACCESS_GROUP_MASKED' to 'LAST_INIT_IN_ACCESS_GROUP' for better
definition:
LSM refuse to delete the last initiator from an access group.

Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt_error.h | 2 +-
python_binding/lsm/_common.py | 5 ++---
2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
index 2ea1eda..c978e8c 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
@@ -85,7 +85,7 @@ typedef enum {
LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**< Transport serialization error */
LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**< Parameter transported over IPC is invalid */

- LSM_ERR_ACCESS_GROUP_MASKED = 502,
+ LSM_ERR_LAST_INIT_IN_ACCESS_GROUP = 502,


LSM_ERR_UNSUPPORTED_SEARCH_KEY = 510, /**< Unsupport search key */
diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index 383c9e0..b97dadb 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -469,9 +469,8 @@ class ErrorNumber(object):
TRANSPORT_SERIALIZATION = 401
TRANSPORT_INVALID_ARG = 402

- ACCESS_GROUP_MASKED = 502 # refuse to remove the last initiator from
- # access group which have volume masked or
- # allow an access group to be deleted
+ LAST_INIT_IN_ACCESS_GROUP = 502
+ # refuse to remove the last initiator from access group

UNSUPPORTED_SEARCH_KEY = 510
--
1.9.3
Gris Ge
2014-09-04 13:59:59 UTC
Permalink
* As LSM require us to refuse deletion on last initiator of an access group,
we remove the capacity checking codes for empty access group.
* Raise NO_STATE_CHANGE when initiator does not exist in defined access group
for access_group_initiator_delete().
* Fix capabilities() call as we don't require empty access group support.
* Fix a incorrect error in access_group_delete():
Should raise IS_MASKED if access group has volume masked.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/smispy/dmtf.py | 6 ------
plugin/smispy/smis.py | 56 ++++++++++++---------------------------------------
2 files changed, 13 insertions(+), 49 deletions(-)

diff --git a/plugin/smispy/dmtf.py b/plugin/smispy/dmtf.py
index c06811d..5edbfc1 100644
--- a/plugin/smispy/dmtf.py
+++ b/plugin/smispy/dmtf.py
@@ -191,12 +191,6 @@ class DMTF(object):
# Allowing empty DeviceMaskingGroup associated to SPC
GMM_CAP_DEV_MG_ALLOW_EMPTY_W_SPC = Uint16(5)

- # CIM_GroupMaskingMappingCapabilities['SupportedInitiatorGroupFeatures']
- # Allowing empty DeviceMaskingGroup
- GMM_CAP_INIT_MG_ALLOW_EMPTY = Uint16(4)
- # Allowing empty DeviceMaskingGroup associated to SPC
- GMM_CAP_INIT_MG_ALLOW_EMPTY_W_SPC = Uint16(5)
-
# CIM_GroupMaskingMappingCapabilities['SupportedAsynchronousActions']
# and 'SupportedSynchronousActions'. They are using the same value map.
GMM_CAP_DELETE_SPC = Uint16(24)
diff --git a/plugin/smispy/smis.py b/plugin/smispy/smis.py
index 0305d54..36eabf5 100644
--- a/plugin/smispy/smis.py
+++ b/plugin/smispy/smis.py
@@ -916,10 +916,10 @@ class Smis(IStorageAreaNetwork):
cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN)
cap.set(Capabilities.ACCESS_GROUP_CREATE_ISCSI_IQN)

- # RemoveMembers is also mandatory, but we require target provider
- # to support empty InitiatorMaskingGroup.
+ # RemoveMembers is also mandatory
+ cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE)
+
cim_gmm_cap_pros = [
- 'SupportedInitiatorGroupFeatures',
'SupportedAsynchronousActions',
'SupportedSynchronousActions',
'SupportedDeviceGroupFeatures']
@@ -930,10 +930,6 @@ class Smis(IStorageAreaNetwork):
ResultClass='CIM_GroupMaskingMappingCapabilities',
PropertyList=cim_gmm_cap_pros)[0]

- if DMTF.GMM_CAP_INIT_MG_ALLOW_EMPTY in \
- cim_gmm_cap['SupportedInitiatorGroupFeatures']:
- cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE)
-
# if empty dev group in spc is allowed, RemoveMembers() is enough
# to do volume_unamsk(). RemoveMembers() is mandatory.
if DMTF.GMM_CAP_DEV_MG_ALLOW_EMPTY_W_SPC in \
@@ -2888,27 +2884,12 @@ class Smis(IStorageAreaNetwork):

def _ag_init_del_group(self, access_group, init_id):
"""
- LSM Require support of empty access group.
- So GMM_CAP_INIT_MG_ALLOW_EMPTY should be support by target SMI-S
- provider. To make thing simple, even current access_group has 2+
- init_id, we still raise NO_SUPPORT error if empty access group not
- supported.
+ Call CIM_GroupMaskingMappingService.RemoveMembers() against
+ CIM_InitiatorMaskingGroup.
"""
cim_sys = self._get_cim_instance_by_id(
'System', access_group.system_id, raise_error=True)

- cim_gmm_cap = self._c.Associators(
- cim_sys.path,
- AssocClass='CIM_ElementCapabilities',
- ResultClass='CIM_GroupMaskingMappingCapabilities',
- PropertyList=['SupportedInitiatorGroupFeatures'])[0]
- if DMTF.GMM_CAP_INIT_MG_ALLOW_EMPTY not in \
- cim_gmm_cap['SupportedInitiatorGroupFeatures']:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Target SMI-S provider does not support empty "
- "CIM_InitiatorMaskingGroup which is required by "
- "LSM for access_group_initiator_delete()")
-
cim_init_mg_pros = self._cim_init_mg_pros()
cim_init_mg = self._cim_init_mg_of_id(
access_group.id, raise_error=True, property_list=cim_init_mg_pros)
@@ -2924,28 +2905,17 @@ class Smis(IStorageAreaNetwork):
break

if cim_init is None:
- return self._cim_init_mg_to_lsm(
- cim_init_mg, access_group.system_id)
+ raise LsmError(ErrorNumber.NO_STATE_CHANGE,
+ "Initiator %s does not exist in defined access group %s" %
+ (init_id, access_group.id))
+
+ if len(cur_cim_inits) == 1:
+ raise LsmError(ErrorNumber.LAST_INIT_IN_ACCESS_GROUP,
+ "Refuse to remove last initiator from access group")

cim_gmm_path = self._get_cim_service_path(
cim_sys.path, 'CIM_GroupMaskingMappingService')

- if len(cur_cim_inits) == 1:
- # Check whether we have any volume masked.
- cim_spcs_path = self._c.AssociatorNames(
- cim_init_mg.path,
- AssocClass='CIM_AssociatedInitiatorMaskingGroup',
- ResultClass='CIM_SCSIProtocolController')
- for cim_spc_path in cim_spcs_path:
- if len(self._c.AssociatorNames(
- cim_spc_path,
- AssocClass='CIM_ProtocolControllerForUnit',
- ResultClass='CIM_StorageVolume')) >= 1:
- raise LsmError(ErrorNumber.ACCESS_GROUP_MASKED,
- "Refuse to remove last initiator member "
- "from access group which have volume "
- "masked to")
-
# RemoveMembers from InitiatorMaskingGroup
in_params = {
'MaskingGroup': cim_init_mg.path,
@@ -4118,7 +4088,7 @@ class Smis(IStorageAreaNetwork):
cim_spc_path,
AssocClass='CIM_ProtocolControllerForUnit',
ResultClass='CIM_StorageVolume')) >= 1:
- raise LsmError(ErrorNumber.ACCESS_GROUP_MASKED,
+ raise LsmError(ErrorNumber.IS_MASKED,
"Access Group %s has volume masked" %
access_group.id)
--
1.9.3
Gris Ge
2014-09-04 14:00:00 UTC
Permalink
* Handle these errors in volume_mask():
NOT_FOUND_ACCESS_GROUP
EMPTY_ACCESS_GROUP
NO_STATE_CHANGE # already masked.
* Handle these errors in access_group_initiator_delete():
NOT_FOUND_ACCESS_GROUP
NO_STATE_CHANGE # initiator not exist in defined access group
LAST_INIT_IN_ACCESS_GROUP # Refuse to delete the last initiator

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/ontap/na.py | 1 +
plugin/ontap/ontap.py | 51 ++++++++++++++++++++++++++++++++++++++-------------
2 files changed, 39 insertions(+), 13 deletions(-)

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 4fd1bc2..6c6b32a 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -144,6 +144,7 @@ class FilerError(Exception):
# group exist
EVDISK_ERROR_VDISK_NOT_ENABLED = 9014 # LUN is not online
EVDISK_ERROR_VDISK_NOT_DISABLED = 9015 # LUN is not offline
+ EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked

def __init__(self, errno, reason, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index 4198f88..a74f4a0 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -643,7 +643,25 @@ class Ontap(IStorageAreaNetwork, INfs):

@handle_ontap_errors
def volume_mask(self, access_group, volume, flags=0):
- self.f.lun_map(access_group.name, _lsm_vol_to_na_vol_path(volume))
+ igroups = self.f.igroups(group_name=access_group.name)
+ if len(igroups) != 1:
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "AccessGroup %s(%d) not found" %
+ (access_group.name, access_group.id))
+
+ cur_init_ids = Ontap._initiators_in_group(igroups[0])
+ if len(cur_init_ids) == 0:
+ raise LsmError(ErrorNumber.EMPTY_ACCESS_GROUP,
+ "Refuse to do volume masking against empty access group")
+ try:
+ self.f.lun_map(access_group.name, _lsm_vol_to_na_vol_path(volume))
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_INITGROUP_HAS_VDISK:
+ raise LsmError(
+ ErrorNumber.NO_STATE_CHANGE,
+ "Volume already masked to defined access group")
+ else:
+ raise
return None

@handle_ontap_errors
@@ -746,18 +764,25 @@ class Ontap(IStorageAreaNetwork, INfs):
@handle_ontap_errors
def access_group_initiator_delete(self, access_group, init_id, init_type,
flags=0):
- try:
- self.f.igroup_del_initiator(access_group.name, init_id)
- except na.FilerError as oe:
- error_code, error_msg = error_map(oe)
- if oe.errno == na.FilerError.IGROUP_NOT_CONTAIN_GIVEN_INIT:
- return copy.deepcopy(access_group)
- elif oe.errno == na.FilerError.NO_SUCH_IGROUP:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "AccessGroup %s(%d) not found" %
- (access_group.name, access_group.id))
- else:
- raise
+ igroups = self.f.igroups(group_name=access_group.name)
+ if len(igroups) != 1:
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "AccessGroup %s(%d) not found" %
+ (access_group.name, access_group.id))
+
+ cur_init_ids = Ontap._initiators_in_group(igroups[0])
+ if init_id not in cur_init_ids:
+ raise LsmError(
+ ErrorNumber.NO_STATE_CHANGE,
+ "Initiator %s does not exist in access group %s" %
+ (init_id, access_group.name))
+
+ if len(cur_init_ids) == 1:
+ raise LsmError(ErrorNumber.LAST_INIT_IN_ACCESS_GROUP,
+ "Refuse to remove last initiator from access group")
+
+ self.f.igroup_del_initiator(access_group.name, init_id)
+
na_ags = self.f.igroups(access_group.name)
if len(na_ags) != 1:
raise LsmError(ErrorNumber.PLUGIN_BUG,
--
1.9.3
Gris Ge
2014-09-04 14:00:01 UTC
Permalink
* Remove SIZE_TOO_SMALL as volume_create() and volume_resize() should
return larger or equal size volume than requested.
* Add POOL_NOT_READY and LSM_ERR_POOL_NOT_READY error:
Pool is not ready for volume/fs/etc create/resize/delete/etc
* SIZE_TOO_SMALL support status by plugin:
* SMI-S plugin:
Never handle SIZE_TOO_SMALL, so no harm for this.
EMC VMAX and VNX support this without code changes.
* ONTAP plugin:
Use lun_min_size() method to get minimum size and create/resize using
that size.
* Extra changes for ONTAP plugin:
1. Handle disabled NetApp volume(LSM Pool) status.
2. Handle POOL_NOT_READY in volume_create() and volume_resize().
3. Handle NO_STATE_CHANGE in volume_resize()

Signed-off-by: Gris Ge <***@redhat.com>
---
.../include/libstoragemgmt/libstoragemgmt_error.h | 5 +-
plugin/ontap/na.py | 14 ++++-
plugin/ontap/ontap.py | 71 ++++++++++++++++++++--
python_binding/lsm/_common.py | 3 +-
4 files changed, 80 insertions(+), 13 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
index c978e8c..0b28ddc 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
@@ -79,8 +79,6 @@ typedef enum {

LSM_ERR_NOT_ENOUGH_SPACE = 350, /**< Insufficient space */

- LSM_ERR_SIZE_TOO_SMALL = 353, /**< Size specified is too small */
-
LSM_ERR_TRANSPORT_COMMUNICATION = 400, /**< Error comunicating with plug-in */
LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**< Transport serialization error */
LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**< Parameter transported over IPC is invalid */
@@ -90,7 +88,8 @@ typedef enum {

LSM_ERR_UNSUPPORTED_SEARCH_KEY = 510, /**< Unsupport search key */

- LSM_ERR_EMPTY_ACCESS_GROUP = 511
+ LSM_ERR_EMPTY_ACCESS_GROUP = 511,
+ LSM_ERR_POOL_NOT_READY = 512,

} lsm_error_number;

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 6c6b32a..2e00f92 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -139,12 +139,17 @@ class FilerError(Exception):
NO_SUCH_IGROUP = 9003

# Using the name from NetApp SDK netapp_errno.h
+ EVDISK_ERROR_VDISK_EXISTS = 9012 # LUN name already in use
EVDISK_ERROR_VDISK_EXPORTED = 9013 # LUN is currently mapped
- EVDISK_ERROR_INITGROUP_MAPS_EXIST = 9029 # LUN maps for this initiator
- # group exist
EVDISK_ERROR_VDISK_NOT_ENABLED = 9014 # LUN is not online
EVDISK_ERROR_VDISK_NOT_DISABLED = 9015 # LUN is not offline
- EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked
+ EVDISK_ERROR_INITGROUP_MAPS_EXIST = 9029 # LUN maps for this initiator
+ # group exist
+ EVDISK_ERROR_SIZE_TOO_LARGE = 9034 # LUN size too large.
+ EVDISK_ERROR_NO_SUCH_VOLUME = 9036 # NetApp Volume not exists.
+ EVDISK_ERROR_SIZE_TOO_SMALL = 9041 # Specified too small a size
+ EVDISK_ERROR_SIZE_UNCHANGED = 9042 # requested size is the same.
+ EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked

def __init__(self, errno, reason, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
@@ -285,6 +290,9 @@ class Filer(object):
# No LUN found.
return []

+ def lun_min_size(self):
+ return self._invoke('lun-get-minsize', {'type': 'image'})['min-size']
+
def lun_create(self, full_path_name, size_bytes):
"""
Creates a lun
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index a74f4a0..476b4d0 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -35,7 +35,6 @@ e_map = {
na.Filer.ESIZE_TOO_LARGE: ErrorNumber.NOT_ENOUGH_SPACE,
na.Filer.ENOSPACE: ErrorNumber.NOT_ENOUGH_SPACE,
na.Filer.ENO_SUCH_FS: ErrorNumber.NOT_FOUND_FS,
- na.Filer.EVOLUME_TOO_SMALL: ErrorNumber.SIZE_TOO_SMALL,
na.Filer.EAPILICENSE: ErrorNumber.NOT_LICENSED,
na.Filer.EFSDOESNOTEXIST: ErrorNumber.NOT_FOUND_FS,
na.Filer.EFSOFFLINE: ErrorNumber.NO_SUPPORT_ONLINE_CHANGE,
@@ -446,6 +445,10 @@ class Ontap(IStorageAreaNetwork, INfs):
% na_aggr['name']
break

+ if status & Pool.STATUS_OK and na_vol['state'] == 'offline':
+ status = Pool.STATUS_STOPPED
+ status_info = 'Disabled by admin'
+
# This volume should be noted that it is reserved for system
# and thus cannot be removed.
if pool_name == '/vol/vol0':
@@ -545,13 +548,31 @@ class Ontap(IStorageAreaNetwork, INfs):
"Pool not suitable for creating volumes")

na_vol_name = pool.name
- v = self.f.volume_names()
- if na_vol_name not in v:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")

lun_name = self.f.lun_build_name(na_vol_name, volume_name)

- self.f.lun_create(lun_name, size_bytes)
+ try:
+ self.f.lun_create(lun_name, size_bytes)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_LARGE:
+ raise LsmError(
+ ErrorNumber.NOT_ENOUGH_SPACE,
+ "No enough requested free size in pool")
+ elif fe.errno == na.FilerError.EVDISK_ERROR_VDISK_EXISTS:
+ raise LsmError(
+ ErrorNumber.NAME_CONFLICT,
+ "Requested volume name is already used by other volume")
+ elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL:
+ # Size too small should not be raised. By API defination,
+ # we should create a LUN with mimun size.
+ min_size = self.f.lun_min_size()
+ return self.volume_create(
+ pool, volume_name, min_size, provisioning, flags)
+ elif fe.errno == na.FilerError.EVDISK_ERROR_NO_SUCH_VOLUME:
+ # When NetApp volume is offline, we will get this error also.
+ self._check_na_volume(na_vol_name)
+ else:
+ raise

#Get the information about the newly created LUN
return None, self._get_volume(lun_name, pool.id)
@@ -579,10 +600,48 @@ class Ontap(IStorageAreaNetwork, INfs):

@handle_ontap_errors
def volume_resize(self, volume, new_size_bytes, flags=0):
- self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), new_size_bytes)
+ try:
+ self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), new_size_bytes)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL:
+ min_size = self.f.lun_min_size()
+ try:
+ self.f.lun_resize(_lsm_vol_to_na_vol_path(volume),
+ min_size)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED:
+ # As requested size is not the one we are send to
+ # self.f.lun_resize(), we should silently pass.
+ pass
+ else:
+ raise
+ elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED:
+ raise LsmError(ErrorNumber.NO_STATE_CHANGE,
+ "Requested size is the same as current "
+ "volume size")
+ elif fe.errno == na.FilerError.EVDISK_ERROR_NO_SUCH_VOLUME:
+ # When NetApp volume is offline, we will get this error also.
+ self._check_na_volume(na_vol_name)
+ else:
+ raise
return None, self._get_volume(_lsm_vol_to_na_vol_path(volume),
volume.pool_id)

+ def _check_na_volume(self, na_vol_name):
+ na_vols = self.f.volumes(volume_name=na_vol_name)
+ if len(na_vols) == 0:
+ raise LsmError(ErrorNumber.NOT_FOUND_POOL,
+ "Pool not found")
+ elif len(na_vols) == 1:
+ # NetApp Volume is disabled.
+ if na_vols[0]['state'] == 'offline':
+ raise LsmError(ErrorNumber.POOL_NOT_READY,
+ "Pool not ready for volume creation")
+ else:
+ raise LsmError(ErrorNumber.PLUGIN_BUG,
+ "volume_create(): "
+ "Got 2 or more na_vols: %s" % na_vols)
+
def _volume_on_aggr(self, pool, volume):
search = Ontap._vol_to_na_volume_name(volume)
contained_volumes = self.f.aggregate_volume_names(pool.name)
diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index b97dadb..f2fd568 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -463,7 +463,6 @@ class ErrorNumber(object):
PLUGIN_NOT_EXIST = 311

NOT_ENOUGH_SPACE = 350
- SIZE_TOO_SMALL = 353

TRANSPORT_COMMUNICATION = 400
TRANSPORT_SERIALIZATION = 401
@@ -477,6 +476,8 @@ class ErrorNumber(object):
EMPTY_ACCESS_GROUP = 511 # volume_mask() will fail if access group
# has no member/initiator.

+ POOL_NOT_READY = 512 # Pool is not ready for create/resize/etc
+
_LOCALS = locals()

@staticmethod
--
1.9.3
Gris Ge
2014-09-04 14:53:08 UTC
Permalink
* Remove SIZE_TOO_SMALL as volume_create() and volume_resize() should
return larger or equal size volume than requested.
* Add POOL_NOT_READY and LSM_ERR_POOL_NOT_READY error:
Pool is not ready for volume/fs/etc create/resize/delete/etc
* SIZE_TOO_SMALL support status by plugin:
* SMI-S plugin:
Never handle SIZE_TOO_SMALL, so no harm for this.
EMC VMAX and VNX support this without code changes.
* ONTAP plugin:
Use lun_min_size() method to get minimum size and create/resize using
that size.
* Extra changes for ONTAP plugin:
1. Handle disabled NetApp volume(LSM Pool) status.
2. Handle POOL_NOT_READY in volume_create().
3. Handle NO_STATE_CHANGE in volume_resize().

Changes in V2:
* Remove POOL_NOT_READY error handling in volume_resize().
Once NetApp Volume is disable, all its LUNs(LSM Volumes) gone.
User will always get NOT_FOUND_VOLUME error in that case.

Signed-off-by: Gris Ge <***@redhat.com>
---
.../include/libstoragemgmt/libstoragemgmt_error.h | 5 +-
plugin/ontap/na.py | 14 ++++-
plugin/ontap/ontap.py | 68 ++++++++++++++++++++--
python_binding/lsm/_common.py | 3 +-
4 files changed, 77 insertions(+), 13 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
index c978e8c..0b28ddc 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
@@ -79,8 +79,6 @@ typedef enum {

LSM_ERR_NOT_ENOUGH_SPACE = 350, /**< Insufficient space */

- LSM_ERR_SIZE_TOO_SMALL = 353, /**< Size specified is too small */
-
LSM_ERR_TRANSPORT_COMMUNICATION = 400, /**< Error comunicating with plug-in */
LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**< Transport serialization error */
LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**< Parameter transported over IPC is invalid */
@@ -90,7 +88,8 @@ typedef enum {

LSM_ERR_UNSUPPORTED_SEARCH_KEY = 510, /**< Unsupport search key */

- LSM_ERR_EMPTY_ACCESS_GROUP = 511
+ LSM_ERR_EMPTY_ACCESS_GROUP = 511,
+ LSM_ERR_POOL_NOT_READY = 512,

} lsm_error_number;

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 6c6b32a..2e00f92 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -139,12 +139,17 @@ class FilerError(Exception):
NO_SUCH_IGROUP = 9003

# Using the name from NetApp SDK netapp_errno.h
+ EVDISK_ERROR_VDISK_EXISTS = 9012 # LUN name already in use
EVDISK_ERROR_VDISK_EXPORTED = 9013 # LUN is currently mapped
- EVDISK_ERROR_INITGROUP_MAPS_EXIST = 9029 # LUN maps for this initiator
- # group exist
EVDISK_ERROR_VDISK_NOT_ENABLED = 9014 # LUN is not online
EVDISK_ERROR_VDISK_NOT_DISABLED = 9015 # LUN is not offline
- EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked
+ EVDISK_ERROR_INITGROUP_MAPS_EXIST = 9029 # LUN maps for this initiator
+ # group exist
+ EVDISK_ERROR_SIZE_TOO_LARGE = 9034 # LUN size too large.
+ EVDISK_ERROR_NO_SUCH_VOLUME = 9036 # NetApp Volume not exists.
+ EVDISK_ERROR_SIZE_TOO_SMALL = 9041 # Specified too small a size
+ EVDISK_ERROR_SIZE_UNCHANGED = 9042 # requested size is the same.
+ EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked

def __init__(self, errno, reason, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
@@ -285,6 +290,9 @@ class Filer(object):
# No LUN found.
return []

+ def lun_min_size(self):
+ return self._invoke('lun-get-minsize', {'type': 'image'})['min-size']
+
def lun_create(self, full_path_name, size_bytes):
"""
Creates a lun
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index a74f4a0..e03f423 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -35,7 +35,6 @@ e_map = {
na.Filer.ESIZE_TOO_LARGE: ErrorNumber.NOT_ENOUGH_SPACE,
na.Filer.ENOSPACE: ErrorNumber.NOT_ENOUGH_SPACE,
na.Filer.ENO_SUCH_FS: ErrorNumber.NOT_FOUND_FS,
- na.Filer.EVOLUME_TOO_SMALL: ErrorNumber.SIZE_TOO_SMALL,
na.Filer.EAPILICENSE: ErrorNumber.NOT_LICENSED,
na.Filer.EFSDOESNOTEXIST: ErrorNumber.NOT_FOUND_FS,
na.Filer.EFSOFFLINE: ErrorNumber.NO_SUPPORT_ONLINE_CHANGE,
@@ -446,6 +445,10 @@ class Ontap(IStorageAreaNetwork, INfs):
% na_aggr['name']
break

+ if status & Pool.STATUS_OK and na_vol['state'] == 'offline':
+ status = Pool.STATUS_STOPPED
+ status_info = 'Disabled by admin'
+
# This volume should be noted that it is reserved for system
# and thus cannot be removed.
if pool_name == '/vol/vol0':
@@ -545,13 +548,31 @@ class Ontap(IStorageAreaNetwork, INfs):
"Pool not suitable for creating volumes")

na_vol_name = pool.name
- v = self.f.volume_names()
- if na_vol_name not in v:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")

lun_name = self.f.lun_build_name(na_vol_name, volume_name)

- self.f.lun_create(lun_name, size_bytes)
+ try:
+ self.f.lun_create(lun_name, size_bytes)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_LARGE:
+ raise LsmError(
+ ErrorNumber.NOT_ENOUGH_SPACE,
+ "No enough requested free size in pool")
+ elif fe.errno == na.FilerError.EVDISK_ERROR_VDISK_EXISTS:
+ raise LsmError(
+ ErrorNumber.NAME_CONFLICT,
+ "Requested volume name is already used by other volume")
+ elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL:
+ # Size too small should not be raised. By API defination,
+ # we should create a LUN with mimun size.
+ min_size = self.f.lun_min_size()
+ return self.volume_create(
+ pool, volume_name, min_size, provisioning, flags)
+ elif fe.errno == na.FilerError.EVDISK_ERROR_NO_SUCH_VOLUME:
+ # When NetApp volume is offline, we will get this error also.
+ self._check_na_volume(na_vol_name)
+ else:
+ raise

#Get the information about the newly created LUN
return None, self._get_volume(lun_name, pool.id)
@@ -579,10 +600,45 @@ class Ontap(IStorageAreaNetwork, INfs):

@handle_ontap_errors
def volume_resize(self, volume, new_size_bytes, flags=0):
- self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), new_size_bytes)
+ try:
+ self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), new_size_bytes)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL:
+ min_size = self.f.lun_min_size()
+ try:
+ self.f.lun_resize(_lsm_vol_to_na_vol_path(volume),
+ min_size)
+ except na.FilerError as fe:
+ if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED:
+ # As requested size is not the one we are send to
+ # self.f.lun_resize(), we should silently pass.
+ pass
+ else:
+ raise
+ elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED:
+ raise LsmError(ErrorNumber.NO_STATE_CHANGE,
+ "Requested size is the same as current "
+ "volume size")
+ else:
+ raise
return None, self._get_volume(_lsm_vol_to_na_vol_path(volume),
volume.pool_id)

+ def _check_na_volume(self, na_vol_name):
+ na_vols = self.f.volumes(volume_name=na_vol_name)
+ if len(na_vols) == 0:
+ raise LsmError(ErrorNumber.NOT_FOUND_POOL,
+ "Pool not found")
+ elif len(na_vols) == 1:
+ # NetApp Volume is disabled.
+ if na_vols[0]['state'] == 'offline':
+ raise LsmError(ErrorNumber.POOL_NOT_READY,
+ "Pool not ready for volume creation")
+ else:
+ raise LsmError(ErrorNumber.PLUGIN_BUG,
+ "volume_create(): "
+ "Got 2 or more na_vols: %s" % na_vols)
+
def _volume_on_aggr(self, pool, volume):
search = Ontap._vol_to_na_volume_name(volume)
contained_volumes = self.f.aggregate_volume_names(pool.name)
diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index b97dadb..f2fd568 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -463,7 +463,6 @@ class ErrorNumber(object):
PLUGIN_NOT_EXIST = 311

NOT_ENOUGH_SPACE = 350
- SIZE_TOO_SMALL = 353

TRANSPORT_COMMUNICATION = 400
TRANSPORT_SERIALIZATION = 401
@@ -477,6 +476,8 @@ class ErrorNumber(object):
EMPTY_ACCESS_GROUP = 511 # volume_mask() will fail if access group
# has no member/initiator.

+ POOL_NOT_READY = 512 # Pool is not ready for create/resize/etc
+
_LOCALS = locals()

@staticmethod
--
1.9.3
Tony Asleson
2014-09-04 15:41:06 UTC
Permalink
Patch series committed, with V2 for patch 8.

Thanks,
Tony
Post by Gris Ge
Rename UNSUPPORTED_VOLUME_EXPAND to UNSUPPORTED_VOLUME_GROW
lsmcli: add some key back to column display way
lsmcli: Rename some lsmcli output cloumn key
Rename VOLUME_ONLINE to VOLUME_ENABLE and _OFFLINE to _DISABLE
C and Python Library: Rename error ACCESS_GROUP_MASKED to
LAST_INIT_IN_ACCESS_GROUP
SMI-S plugin: Handle LAST_INIT_IN_ACCESS_GROUP, NO_STATE_CHANGE error
in access_group_initiator_delete()
ONTAP plugin: Improve volume_mask() and
access_group_initiator_delete()
Remove SIZE_TOO_SMALL error and add POOL_NOT_READY error
.../libstoragemgmt/libstoragemgmt_capabilities.h | 4 +-
.../include/libstoragemgmt/libstoragemgmt_error.h | 4 +-
.../include/libstoragemgmt/libstoragemgmt_types.h | 2 +-
plugin/nstor/nstor.py | 4 +-
plugin/ontap/na.py | 13 +-
plugin/ontap/ontap.py | 126 ++++++++--
plugin/sim/simarray.py | 2 +-
plugin/simc/simc_lsmplugin.c | 4 +-
plugin/smispy/dmtf.py | 6 -
plugin/smispy/smis.py | 58 ++---
plugin/targetd/targetd.py | 2 -
python_binding/lsm/_common.py | 6 +-
python_binding/lsm/_data.py | 6 +-
test/plugin_test.py | 2 +-
test/tester.c | 12 +-
tools/lsmcli/data_display.py | 272 ++++++++++-----------
16 files changed, 279 insertions(+), 244 deletions(-)
Loading...