Discussion:
[Libstoragemgmt-devel] [PATCH V2 00/13] New method: volume_raid_info()
Gris Ge
2015-02-15 07:35:13 UTC
Permalink
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.

* These plugins support this new method:
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID

* The C library part might be buggy considering my C skill set.

* Potential support by other plugin:
* Targetd:
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
* SMI-S:
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
* ONTAP:
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
* Nstor:
No document found about strip settings.

* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.

Changes in V2:
* Patch 6/13 and 10/13:
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
* Patch 10/13:
Add missing capability LSM_CAP_VOLUME_RAID_INFO

Gris Ge (13):
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
Python Library: Fix decorator problem with docstrings
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
Constant Test: Fix missing constant with number in it.
lsmcli: Add volume_raid_info() support.
Simulator Plugin: Add initial volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
MegaRAID Plugin: Add volume_raid_info() support.

c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 35 ++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 13 +-
c_binding/lsm_mgmt.cpp | 45 +++++
c_binding/lsm_plugin_ipc.cpp | 175 ++++++++++------
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/sim/simulator.py | 8 +-
plugin/simc/simc_lsmplugin.c | 27 +++
python_binding/lsm/_client.py | 92 +++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
tools/utility/check_const.pl | 6 +-
18 files changed, 789 insertions(+), 68 deletions(-)
--
1.8.3.1
Gris Ge
2015-02-15 07:35:14 UTC
Permalink
* Treating each MegaRAID DG(disk group) as LSM pool.
* Based on storcli output of:
storcli /c0/dall show all J

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -115,6 +115,47 @@ def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict):
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)


+def _mega_size_to_lsm(mega_size):
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ if re_match:
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+def _pool_status_of(dg_top):
+ """
+ Return status
+ """
+ if dg_top['State'] in _POOL_STATUS_MAP.keys():
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+def _pool_id_of(dg_id, sys_id):
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -217,7 +258,11 @@ class MegaRAID(IPlugin):
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ if real_data and 'Response Data' in real_data.keys():
+ return real_data['Response Data']
+
+ return real_data
else:
return output

@@ -317,7 +362,55 @@ class MegaRAID(IPlugin):

return search_property(rc_lsm_disks, search_key, search_value)

+ @staticmethod
+ def _dg_free_size(dg_num, free_space_list):
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ for free_space in free_space_list:
+ if int(free_space['DG']) == int(dg_num):
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num):
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ if status == Pool.STATUS_UNKNOWN:
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
flags=Client.FLAG_RSVD):
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ for ctrl_num in range(self._ctrl_count()):
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ for dg_top in dg_show_output['TOPOLOGY']:
+ if dg_top['Arr'] != '-':
+ continue
+ if dg_top['DG'] == '-':
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-02-15 07:35:15 UTC
Permalink
* Treating MegaRAID VD as LSM Volume.
* Using 'storcli /c0/vall show all' to query all volumes.
* Add new capability: Capabilities.VOLUMES

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 46 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 5e3802b..ae2e953 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin, Pool)
+ Disk, VERSION, search_property, IPlugin, Pool, Volume)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -226,6 +226,7 @@ class MegaRAID(IPlugin):
"System not found")
cap = Capabilities()
cap.set(Capabilities.DISKS)
+ cap.set(Capabilities.VOLUMES)
return cap

def _storcli_exec(self, storcli_cmds, flag_json=True):
@@ -414,3 +415,46 @@ class MegaRAID(IPlugin):
dg_top, free_space_list, ctrl_num))

return search_property(lsm_pools, search_key, search_value)
+
+ @staticmethod
+ def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list,
+ vd_prop_info, vd_path):
+
+ vol_id = "%s:VD%d" % (sys_id, vd_id)
+ name = "VD %d" % vd_id
+ vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information.
+ block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz'])
+ num_of_blocks = vd_prop_info['Number of Blocks']
+ admin_state = Volume.ADMIN_STATE_ENABLED
+ if vd_prop_info['Exposed to OS'] != 'Yes' or \
+ vd_basic_info['Access'] != 'RW':
+ admin_state = Volume.ADMIN_STATE_DISABLED
+ pool_id = _pool_id_of(dg_id, sys_id)
+ plugin_data = vd_path
+ return Volume(
+ vol_id, name, vpd83, block_size, num_of_blocks, admin_state,
+ sys_id, pool_id, plugin_data)
+
+ @_handle_errors
+ def volumes(self, search_key=None, search_value=None, flags=0):
+ lsm_vols = []
+ for ctrl_num in range(self._ctrl_count()):
+ vol_show_output = self._storcli_exec(
+ ["/c%d/vall" % ctrl_num, "show", "all"])
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ for key_name in vol_show_output.keys():
+ if key_name.startswith('/c'):
+ vd_basic_info = vol_show_output[key_name][0]
+ (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/')
+ dg_id = int(dg_id)
+ vd_id = int(vd_id)
+ vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id]
+
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ lsm_vols.append(
+ MegaRAID._vd_to_lsm_vol(
+ vd_id, dg_id, sys_id, vd_basic_info,
+ vd_pd_info_list, vd_prop_info, key_name))
+
+ return search_property(lsm_vols, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-02-15 07:35:16 UTC
Permalink
* In storcli of MegaRAID, 'Rbld' of disk status indicate this disk
in using for reconstructing pool data.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index ae2e953..83abf63 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -93,6 +93,7 @@ _DISK_STATE_MAP = {
'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK,
'UGood': Disk.STATUS_STOPPED | Disk.STATUS_OK,
'UBad': Disk.STATUS_STOPPED | Disk.STATUS_ERROR,
+ 'Rbld': Disk.STATUS_RECONSTRUCT,
}
--
1.8.3.1
Gris Ge
2015-02-15 07:35:17 UTC
Permalink
* With decorator, the docstring of original method will be deleted.
* Use functools.wraps() to keep the docstring of original method.
Check
http://stackoverflow.com/questions/1782843/python-decorator-problem-with-docstrings
for detail.

* With this fix user can check method help message in interactive python with
command:
help(lsm.Client.volume_create)

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_common.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index f2fd568..4c87661 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -533,6 +533,7 @@ def return_requires(*types):
is quite important.
"""
def outer(func):
+ @functools.wraps(func)
def inner(*args, **kwargs):
r = func(*args, **kwargs)
--
1.8.3.1
Gris Ge
2015-02-15 07:35:18 UTC
Permalink
* The docstring of lsm.Client.volume_raid_info() contains full detail
about this new method. Quick info:
Usage:
volume_raid_info(self, volume, flags=0)
Returns:
[raid_type, strip_size, extent_count, min_io_size, opt_io_size]
# strip_size is the size of strip on each disk/extent
# extent_count is the disk/extent count.
# min_io_size is minimum I/O size. Also the preferred I/O size
# of random I/O.
# opt_io_size is optimal I/O size. Also the preferred I/O size
# of sequential I/O.

* Why not use 'pool_raid_info' instead?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not implementing RAID
at pool level but at volume level.

* Why use 'extent_count' instead of 'disk_count'?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not using disk
directly to assemble RAID group.

* Why we need 'min_io_size' and 'opt_io_size' when we have 'extent_count'
and 'strip_size'?
Normally, min_io_size is strip_size, opt_io_size could be calculated by
raid_type, strip_size and extent_count. But on NetApp, I/O test[1]
indicate their optimal I/O size is 64KiB no matter how many disks in
the RAID group. It might[2] because NetApp created a WAFL filesystem on
RAID group which changed the optimal I/O size.

In general, the optimal I/O size or min_io_size of some RAID system
might not base on strip size and RAID disk/extent count.
We'd better expose those information directly instead forcing user
to guess from strip size and disk/extent count.

* New constants:
Volume.RAID_TYPE_UNKNOWN
# The plugin failed to detect the volume's RAID type.
Volume.RAID_TYPE_RAID0
# Stripe
Volume.RAID_TYPE_RAID1
# Mirror for two disks. For 4 disks or more, they are RAID10.
Volume.RAID_TYPE_RAID3
# Byte-level striping with dedicated parity
Volume.RAID_TYPE_RAID4
# Block-level striping with dedicated parity
Volume.RAID_TYPE_RAID5
# Block-level striping with distributed parity
Volume.RAID_TYPE_RAID6
# Block-level striping with two distributed parities, aka, RAID-DP
Volume.RAID_TYPE_RAID10
# Stripe of mirrors
Volume.RAID_TYPE_RAID15
# Parity of mirrors
Volume.RAID_TYPE_RAID16
# Dual parity of mirrors
Volume.RAID_TYPE_RAID50
# Stripe of parities
Volume.RAID_TYPE_RAID60
# Stripe of dual parities
Volume.RAID_TYPE_RAID51
# Mirror of parities
Volume.RAID_TYPE_RAID61
# Mirror of dual parities
Volume.RAID_TYPE_JBOD
# Just bunch of disks, no parity, no striping.
Volume.RAID_TYPE_MIXED
# This volume contains multiple RAID settings.
Volume.RAID_TYPE_OTHER
# Vendor specific RAID type

Volume.STRIP_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN

* New Capability:
lsm.Volume.VOLUME_RAID_INFO

[1] On a 24 disks RAID6(RAID-DP), 4KiB strip size(not changeable):
* With I/O size 90112(4096 * 22), write speed is 73.4 MB/s
* With I/O size 65536, write speed is 86.9 MB/s
# the optimal_io_size exposed via sysfs from SCSI BLOCK LIMITS(0xB0) VPD

[2] No NetApp official document confirm or deny it. Waiting NetApp's reply.

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_client.py | 92 +++++++++++++++++++++++++++++++++++++++++++
python_binding/lsm/_data.py | 42 ++++++++++++++++++++
2 files changed, 134 insertions(+)

diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index e637962..b0bc11a 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -971,3 +971,95 @@ class Client(INetworkAttachedStorage):
"""
_check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('target_ports', _del_self(locals()))
+
+ ## Returns the RAID information of certain volume
+ # @param self The this pointer
+ # @param raid_type The RAID type of this volume
+ # @param strip_size The size of strip of disk or other storage
+ # extent.
+ # @param extent_count The count of disks or other storage extent
+ # in this RAID group.
+ # @param min_io_size The preferred I/O size of random I/O.
+ # @param opt_io_size The preferred I/O size of sequential I/O.
+ # @returns List of target ports, else raises LsmError
+ @_return_requires([int, int, int, int, int])
+ def volume_raid_info(self, volume, flags=FLAG_RSVD):
+ """Query the RAID information of certain volume.
+
+ Query the RAID type, strip size, extents count, minimum I/O size,
+ optimal I/O size of given volume.
+ This method requires this capability:
+ lsm.Capabilities.VOLUME_RAID_INFO
+
+ Args:
+ volume (Volume object): Volume to query
+ flags (int): Reserved for future use. Should be set as
+ lsm.Client.FLAG_RSVD
+ Returns:
+ [raid_type, strip_size, extent_count, min_io_size, opt_io_size]
+
+ raid_type (int): RAID Type of requested volume.
+ Could be one of these values:
+ Volume.RAID_TYPE_RAID0
+ Stripe
+ Volume.RAID_TYPE_RAID1
+ Two disks Mirror
+ Volume.RAID_TYPE_RAID3
+ Byte-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID4
+ Block-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID5
+ Block-level striping with distributed parity
+ Volume.RAID_TYPE_RAID6
+ Block-level striping with two distributed parities,
+ aka, RAID-DP
+ Volume.RAID_TYPE_RAID10
+ Stripe of mirrors
+ Volume.RAID_TYPE_RAID15
+ Parity of mirrors
+ Volume.RAID_TYPE_RAID16
+ Dual parity of mirrors
+ Volume.RAID_TYPE_RAID50
+ Stripe of parities
+ Volume.RAID_TYPE_RAID60
+ Stripe of dual parities
+ Volume.RAID_TYPE_RAID51
+ Mirror of parities
+ Volume.RAID_TYPE_RAID61
+ Mirror of dual parities
+ Volume.RAID_TYPE_JBOD
+ Just bunch of disks, no parity, no striping.
+ Volume.RAID_TYPE_UNKNOWN
+ The plugin failed to detect the volume's RAID type.
+ Volume.RAID_TYPE_MIXED
+ This volume contains multiple RAID settings.
+ Volume.RAID_TYPE_OTHER
+ Vendor specific RAID type
+ strip_size(int): The size of strip on each disk or other storage
+ extent.
+ For RAID1/JBOD, it should be set as sector size.
+ If plugin failed to detect strip size, it should be set
+ as Volume.STRIP_SIZE_UNKNOWN(-1).
+ extent_count(int): The count of disks or other storage extents
+ assembled in the RAID group.
+ If plugin failed to detect extent_count, it should be set
+ as Volume.EXTENT_COUNT_UNKNOWN(-1).
+ min_io_size(int): The minimum I/O size, device preferred I/O
+ size for random I/O. Any I/O size not equal to a multiple
+ of this value may get significant speed penalty.
+ Normally it refers to strip size of each disk(extent).
+ If plugin failed to detect min_io_size, it should try these
+ values in the sequence of:
+ logical sector size -> physical sector size ->
+ Volume.MIN_IO_SIZE_UNKNOWN(-1).
+ opt_io_size(int): The optimal I/O size, device preferred I/O
+ size for sequential I/O. Normally it refers to RAID group
+ stripe size.
+ If plugin failed to detect opt_io_size, it should be set
+ to Volume.OPT_IO_SIZE_UNKNOWN
+ Raises:
+ LsmError:
+ ErrorNumber.NO_SUPPORT
+ No support.
+ """
+ return self._tp.rpc('volume_raid_info', _del_self(locals()))
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 067c766..8606f61 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -258,6 +258,46 @@ class Volume(IData):
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1

+ RAID_TYPE_UNKNOWN = -1
+ # The plugin failed to detect the volume's RAID type.
+ RAID_TYPE_RAID0 = 0
+ # Stripe
+ RAID_TYPE_RAID1 = 1
+ # Mirror for two disks. For 4 disks or more, they are RAID10.
+ RAID_TYPE_RAID3 = 3
+ # Byte-level striping with dedicated parity
+ RAID_TYPE_RAID4 = 4
+ # Block-level striping with dedicated parity
+ RAID_TYPE_RAID5 = 5
+ # Block-level striping with distributed parity
+ RAID_TYPE_RAID6 = 6
+ # Block-level striping with two distributed parities, aka, RAID-DP
+ RAID_TYPE_RAID10 = 10
+ # Stripe of mirrors
+ RAID_TYPE_RAID15 = 15
+ # Parity of mirrors
+ RAID_TYPE_RAID16 = 16
+ # Dual parity of mirrors
+ RAID_TYPE_RAID50 = 50
+ # Stripe of parities
+ RAID_TYPE_RAID60 = 60
+ # Stripe of dual parities
+ RAID_TYPE_RAID51 = 51
+ # Mirror of parities
+ RAID_TYPE_RAID61 = 61
+ # Mirror of dual parities
+ RAID_TYPE_JBOD = 20
+ # Just bunch of disks, no parity, no striping.
+ RAID_TYPE_MIXED = 21
+ # This volume contains multiple RAID settings.
+ RAID_TYPE_OTHER = 22
+ # Vendor specific RAID type
+
+ STRIP_SIZE_UNKNOWN = -1
+ EXTENT_COUNT_UNKNOWN = -1
+ MIN_IO_SIZE_UNKNOWN = -1
+ OPT_IO_SIZE_UNKNOWN = -1
+
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
@@ -669,6 +709,8 @@ class Capabilities(IData):

VOLUME_ISCSI_CHAP_AUTHENTICATION = 53

+ VOLUME_RAID_INFO = 54
+
VOLUME_THIN = 55

#File system
--
1.8.3.1
Gris Ge
2015-02-15 07:35:20 UTC
Permalink
* Allowing check_const.pl to check constants with number in it.
Example:
LSM_VOLUME_RAID_TYPE_RAID1

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/utility/check_const.pl | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/utility/check_const.pl b/tools/utility/check_const.pl
index 9e5a700..e41c1e9 100644
--- a/tools/utility/check_const.pl
+++ b/tools/utility/check_const.pl
@@ -101,7 +101,7 @@ my $REGEX_C_CONST_FORMAT = qr/
(?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT)
)
(?<CNAME_PAT>
- [A-Z][A-Z_]+
+ [A-Z][A-Z_0-9]+
)
(?<HEADER1>
[\ \t]*
@@ -179,7 +179,7 @@ sub py_name_2_c_name($) {
# 2. Convert System to SYSTEM
# 3. Convert Capabilities to CAP and etc using %PY_CLASS_NAME_CONV;
my $py_name = shift;
- if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_]+)$/ ) {
+ if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_][A-Z_0-9]+)$/ ) {
my $py_class_name = $1;
my $py_var_name = $2;

@@ -308,7 +308,7 @@ sub _get_py_class_consts($$){
}
if ($line =~ /^$current_idention
[\ ]+
- ([A-Z][A-Z\_]+)
+ ([A-Z][A-Z\_0-9]+)
[\ ]*=[\ ]*
($REGEX_VALUE_FORMAT)/x){
my $var_name = $1;
--
1.8.3.1
Gris Ge
2015-02-15 07:35:19 UTC
Permalink
* Please check python API document for detail about lsm_volume_raid_info()
method. Quick info:

Retrieves the pool id that the volume is derived from.
@param[in] c Valid connection
@param[in] v Volume ptr.
@param[out] raid_type Enum of lsm_volume_raid_type
@param[out] strip_size Size of the strip on disk or other storage extent.
@param[out] extent_count Count of disks or other storage extents in this
RAID group.
@param[out] min_io_size Minimum I/O size, also the preferred I/O size
of random I/O.
@param[out] opt_io_size Optimal I/O size, also the preferred I/O size
of sequential I/O.
@param[in] flags Reserved, set to 0
@return LSM_ERR_OK on success else error reason.

* New plugin interface: lsm_plug_volume_raid_info

* New enum type: lsm_volume_raid_type

* New capability:
LSM_CAP_VOLUME_RAID_INFO

* New constants:
LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
/**^ Unknown */
LSM_VOLUME_RAID_TYPE_RAID0 = 0,
/**^ Stripe */
LSM_VOLUME_RAID_TYPE_RAID1 = 1,
/**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
LSM_VOLUME_RAID_TYPE_RAID3 = 3,
/**^ Byte-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID4 = 4,
/**^ Block-level striping with dedicated parity */
/**^ Block-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID5 = 5,
/**^ Block-level striping with distributed parity */
LSM_VOLUME_RAID_TYPE_RAID6 = 6,
/**^ Block-level striping with two distributed parities, aka, RAID-DP */
LSM_VOLUME_RAID_TYPE_RAID10 = 10,
/**^ Stripe of mirrors */
LSM_VOLUME_RAID_TYPE_RAID15 = 15,
/**^ Parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID16 = 16,
/**^ Dual parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID50 = 50,
/**^ Stripe of parities */
LSM_VOLUME_RAID_TYPE_RAID60 = 60,
/**^ Stripe of dual parities */
LSM_VOLUME_RAID_TYPE_RAID51 = 51,
/**^ Mirror of parities */
LSM_VOLUME_RAID_TYPE_RAID61 = 61,
/**^ Mirror of dual parities */
LSM_VOLUME_RAID_TYPE_JBOD = 20,
/**^ Just bunch of disks, no parity, no striping. */
LSM_VOLUME_RAID_TYPE_MIXED = 21,
/**^ This volume contains multiple RAID settings. */
LSM_VOLUME_RAID_TYPE_OTHER = 22,
/**^ Vendor specific RAID type */

LSM_VOLUME_STRIP_SIZE_UNKNOWN
LSM_VOLUME_EXTENT_COUNT_UNKNOWN
LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

V2: Change call back registration

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 +++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 35 +++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 +++++
c_binding/lsm_datatypes.hpp | 13 +-
c_binding/lsm_mgmt.cpp | 45 ++++++
c_binding/lsm_plugin_ipc.cpp | 175 ++++++++++++++-------
7 files changed, 274 insertions(+), 60 deletions(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 879f184..d65534c 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -844,6 +844,26 @@ extern "C" {
uint32_t *count,
lsm_flag flags);

+/**
+ * Retrieves the pool id that the volume is derived from.
+ * @param[in] c Valid connection
+ * @param[in] v Volume ptr.
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on disk or other storage extent.
+ * @param[out] extent_count Count of disks or other storage extents in this
+ * RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved, set to 0
+ * @return LSM_ERR_OK on success else error reason.
+ */
+int LSM_DLL_EXPORT lsm_volume_raid_info(
+ lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size, lsm_flag flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 7d6182c..18490f3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -77,6 +77,9 @@ typedef enum {

LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**< If you can configure iSCSI chap authentication */

+ LSM_CAP_VOLUME_RAID_INFO = 54,
+ /** ^ If you can query RAID information from volume */
+
LSM_CAP_VOLUME_THIN = 55, /**< Thin provisioned volumes are supported */

LSM_CAP_FS = 100, /**< List file systems */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index e7874f7..2ad9b71 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -319,6 +319,28 @@ typedef int (*lsm_plug_volume_resize)(lsm_plugin_ptr c, lsm_volume *volume,
*/
typedef int (*lsm_plug_volume_delete)(lsm_plugin_ptr c, lsm_volume *volume,
char **job, lsm_flag flags);
+
+/**
+ * Query the RAID information of a volume
+ * @param[in] c Valid lsm plug-in pointer
+ * @param[in] volume Volume to be deleted
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on each disk or other
+ * storage extent.
+ * @param[out] extent_count Count of of disks of other storage extents in
+ * this RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved
+ * @return LSM_ERR_OK, else error reason
+ */
+typedef int (*lsm_plug_volume_raid_info)(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type, int32_t *strip_size,
+ int32_t *extent_count, int32_t *min_io_size,
+ int32_t *opt_io_size, lsm_flag flags);
+
/**
* Place a volume online, callback function signature.
* @param[in] c Valid lsm plug-in pointer
@@ -745,6 +767,7 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
lsm_flag flags);
/** \struct lsm_san_ops_v1
* \brief Block array oriented functions (callback functions)
+ * NOTE: This structure cannot change as we need to maintain backwards compatibility
*/
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
@@ -774,6 +797,7 @@ struct lsm_san_ops_v1 {

/** \struct lsm_fs_ops_v1
* \brief File system oriented functionality
+ * NOTE: This structure cannot change as we need to maintain backwards compatibility
*/
struct lsm_fs_ops_v1 {
lsm_plug_fs_list fs_list; /**< list file systems */
@@ -792,6 +816,7 @@ struct lsm_fs_ops_v1 {

/** \struct lsm_nas_ops_v1
* \brief NAS system oriented functionality call back functions
+ * NOTE: This structure cannot change as we need to maintain backwards compatibility
*/
struct lsm_nas_ops_v1 {
lsm_plug_nfs_auth_types nfs_auth_types; /**< List nfs authentication types */
@@ -839,6 +864,16 @@ int LSM_DLL_EXPORT lsm_register_plugin_v1( lsm_plugin_ptr plug,
struct lsm_nas_ops_v1 *nas_ops );

/**
+ * Used to register the call back function when the plugin supports volume raid
+ * info.
+ * @param plug Pointer provided by the framework
+ * @param fp Function pointer to register, can be NULL to un-reg
+ * @return LSM_ERR_OK on success, else error reason.
+ */
+int LSM_DLL_EXPORT lsm_register_callback_volume_raid_info( lsm_plugin_ptr plug,
+ lsm_plug_volume_raid_info fp);
+
+/**
* Used to retrieve private data for plug-in operation.
* @param plug Opaque plug-in pointer.
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 309a5e8..5465dad 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -131,6 +131,49 @@ typedef enum {
LSM_VOLUME_PROVISION_DEFAULT = 3 /**< Default provisioning */
} lsm_volume_provision_type;

+/**< \enum lsm_volume_raid_type Different types of RAID */
+typedef enum {
+ LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
+ /**^ Unknown */
+ LSM_VOLUME_RAID_TYPE_RAID0 = 0,
+ /**^ Stripe */
+ LSM_VOLUME_RAID_TYPE_RAID1 = 1,
+ /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
+ LSM_VOLUME_RAID_TYPE_RAID3 = 3,
+ /**^ Byte-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID4 = 4,
+ /**^ Block-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID5 = 5,
+ /**^ Block-level striping with distributed parity */
+ LSM_VOLUME_RAID_TYPE_RAID6 = 6,
+ /**^ Block-level striping with two distributed parities, aka, RAID-DP */
+ LSM_VOLUME_RAID_TYPE_RAID10 = 10,
+ /**^ Stripe of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID15 = 15,
+ /**^ Parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID16 = 16,
+ /**^ Dual parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID50 = 50,
+ /**^ Stripe of parities */
+ LSM_VOLUME_RAID_TYPE_RAID60 = 60,
+ /**^ Stripe of dual parities */
+ LSM_VOLUME_RAID_TYPE_RAID51 = 51,
+ /**^ Mirror of parities */
+ LSM_VOLUME_RAID_TYPE_RAID61 = 61,
+ /**^ Mirror of dual parities */
+ LSM_VOLUME_RAID_TYPE_JBOD = 20,
+ /**^ Just bunch of disks, no parity, no striping. */
+ LSM_VOLUME_RAID_TYPE_MIXED = 21,
+ /**^ This volume contains multiple RAID settings. */
+ LSM_VOLUME_RAID_TYPE_OTHER = 22,
+ /**^ Vendor specific RAID type */
+} lsm_volume_raid_type;
+
+#define LSM_VOLUME_STRIP_SIZE_UNKNOWN -1
+#define LSM_VOLUME_EXTENT_COUNT_UNKNOWN -1
+#define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN -1
+#define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN -1
+
/**
* Admin state for volume, enabled or disabled
*/
diff --git a/c_binding/lsm_datatypes.hpp b/c_binding/lsm_datatypes.hpp
index aed6891..0fabf54 100644
--- a/c_binding/lsm_datatypes.hpp
+++ b/c_binding/lsm_datatypes.hpp
@@ -177,6 +177,17 @@ struct _lsm_system {
#define LSM_PLUGIN_MAGIC 0xAA7A000B
#define LSM_IS_PLUGIN(obj) MAGIC_CHECK(obj, LSM_PLUGIN_MAGIC)

+struct LSM_DLL_LOCAL _lsm_san_ops_added
+{
+ lsm_plug_volume_raid_info vol_raid_info;
+};
+
+struct LSM_DLL_LOCAL _lsm_san_ops
+{
+ struct lsm_san_ops_v1 *v1;
+ struct _lsm_san_ops_added added;
+};
+
/**
* Information pertaining to the plug-in specifics.
*/
@@ -190,7 +201,7 @@ struct LSM_DLL_LOCAL _lsm_plugin {
lsm_plugin_register reg; /**< Plug-in registration */
lsm_plugin_unregister unreg; /**< Plug-in unregistration */
struct lsm_mgmt_ops_v1 *mgmt_ops; /**< Callback for management ops */
- struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */
+ struct _lsm_san_ops san_ops; /**< Callbacks for SAN ops */
struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */
struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */
};
diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 37faed4..8d51825 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -1171,6 +1171,51 @@ int lsm_volume_delete(lsm_connect *c, lsm_volume *volume, char **job,

}

+int lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume,
+ lsm_volume_raid_type * raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ CONN_SETUP(c);
+
+ if( !LSM_IS_VOL(volume) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ if( !raid_type || !strip_size || !extent_count || !min_io_size ||
+ !opt_io_size) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ try {
+ std::map<std::string, Value> p;
+ p["volume"] = volume_to_value(volume);
+ p["flags"] = Value(flags);
+
+ Value parameters(p);
+ Value response;
+
+ rc = rpc(c, "volume_raid_info", parameters, response);
+ if( LSM_ERR_OK == rc ) {
+ //We get a value back, either null or job id.
+ std::vector<Value> j = response.asArray();
+ *raid_type = (lsm_volume_raid_type) j[0].asInt32_t();
+ *strip_size = j[1].asInt32_t();
+ *extent_count = j[2].asInt32_t();
+ *min_io_size = j[3].asInt32_t();
+ *opt_io_size = j[4].asInt32_t();
+ }
+ } catch( const ValueException &ve ) {
+ rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
+ ve.what());
+ }
+ return rc;
+
+}
+
+
int lsm_iscsi_chap_auth(lsm_connect *c, const char *init_id,
const char *username, const char *password,
const char *out_user, const char *out_password,
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index 7e0d034..08ae877 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -115,7 +115,7 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
if(LSM_IS_PLUGIN(plug)) {
plug->private_data = private_data;
plug->mgmt_ops = mgm_op;
- plug->san_ops = san_op;
+ plug->san_ops.v1 = san_op;
plug->fs_ops = fs_op;
plug->nas_ops = nas_op;
rc = LSM_ERR_OK;
@@ -123,6 +123,18 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
return rc;
}

+int LSM_DLL_EXPORT lsm_register_callback_volume_raid_info( lsm_plugin_ptr plug,
+ lsm_plug_volume_raid_info fp)
+{
+ int rc = LSM_ERR_INVALID_ARGUMENT;
+
+ if(LSM_IS_PLUGIN(plug)) {
+ plug->san_ops.added.vol_raid_info = fp;
+ rc = LSM_ERR_OK;
+ }
+ return rc;
+}
+
void *lsm_private_data_get(lsm_plugin_ptr plug)
{
if (!LSM_IS_PLUGIN(plug)) {
@@ -514,13 +526,13 @@ static int handle_target_ports(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->san_ops->target_port_list ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->target_port_list ) {
lsm_target_port **target_ports = NULL;
uint32_t count = 0;

if( LSM_FLAG_EXPECTED_TYPE(params) &&
((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK )) {
- rc = p->san_ops->target_port_list(p, key, val, &target_ports, &count,
+ rc = p->san_ops.v1->target_port_list(p, key, val, &target_ports, &count,
LSM_FLAG_GET_VALUE(params));
if( LSM_ERR_OK == rc) {
std::vector<Value> result;
@@ -599,13 +611,13 @@ static int handle_volumes(lsm_plugin_ptr p, Value &params, Value &response)
char *val = NULL;


- if( p && p->san_ops && p->san_ops->vol_get ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_get ) {
lsm_volume **vols = NULL;
uint32_t count = 0;

if( LSM_FLAG_EXPECTED_TYPE(params) &&
(rc = get_search_params(params, &key, &val)) == LSM_ERR_OK ) {
- rc = p->san_ops->vol_get(p, key, val, &vols, &count,
+ rc = p->san_ops.v1->vol_get(p, key, val, &vols, &count,
LSM_FLAG_GET_VALUE(params));

get_volumes(rc, vols, count, response);
@@ -641,13 +653,13 @@ static int handle_disks(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->san_ops->disk_get ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->disk_get ) {
lsm_disk **disks = NULL;
uint32_t count = 0;

if( LSM_FLAG_EXPECTED_TYPE(params) &&
(rc = get_search_params(params, &key, &val)) == LSM_ERR_OK ) {
- rc = p->san_ops->disk_get(p, key, val, &disks, &count,
+ rc = p->san_ops.v1->disk_get(p, key, val, &disks, &count,
LSM_FLAG_GET_VALUE(params));
get_disks(rc, disks, count, response);
free(key);
@@ -664,7 +676,7 @@ static int handle_disks(lsm_plugin_ptr p, Value &params, Value &response)
static int handle_volume_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->vol_create ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_create ) {

Value v_p = params["pool"];
Value v_name = params["volume_name"];
@@ -685,7 +697,7 @@ static int handle_volume_create(lsm_plugin_ptr p, Value &params, Value &response
uint64_t size = v_size.asUint64_t();
lsm_volume_provision_type pro = (lsm_volume_provision_type)v_prov.asInt32_t();

- rc = p->san_ops->vol_create(p, pool, name, size, pro, &vol, &job,
+ rc = p->san_ops.v1->vol_create(p, pool, name, size, pro, &vol, &job,
LSM_FLAG_GET_VALUE(params));

Value v = volume_to_value(vol);
@@ -710,7 +722,7 @@ static int handle_volume_create(lsm_plugin_ptr p, Value &params, Value &response
static int handle_volume_resize(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->vol_resize ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_resize ) {
Value v_vol = params["volume"];
Value v_size = params["new_size_bytes"];

@@ -724,7 +736,7 @@ static int handle_volume_resize(lsm_plugin_ptr p, Value &params, Value &response
uint64_t size = v_size.asUint64_t();
char *job = NULL;

- rc = p->san_ops->vol_resize(p, vol, size, &resized_vol, &job,
+ rc = p->san_ops.v1->vol_resize(p, vol, size, &resized_vol, &job,
LSM_FLAG_GET_VALUE(params));

Value v = volume_to_value(resized_vol);
@@ -749,7 +761,7 @@ static int handle_volume_replicate(lsm_plugin_ptr p, Value &params, Value &respo
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->vol_replicate ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_replicate ) {

Value v_pool = params["pool"];
Value v_vol_src = params["volume_src"];
@@ -772,7 +784,7 @@ static int handle_volume_replicate(lsm_plugin_ptr p, Value &params, Value &respo
char *job = NULL;

if( vol ) {
- rc = p->san_ops->vol_replicate(p, pool, rep, vol, name,
+ rc = p->san_ops.v1->vol_replicate(p, pool, rep, vol, name,
&newVolume, &job,
LSM_FLAG_GET_VALUE(params));

@@ -802,7 +814,7 @@ static int handle_volume_replicate_range_block_size( lsm_plugin_ptr p,
int rc = LSM_ERR_NO_SUPPORT;
uint32_t block_size = 0;

- if( p && p->san_ops && p->san_ops->vol_rep_range_bs ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_rep_range_bs ) {
Value v_s = params["system"];

if( IS_CLASS_SYSTEM(v_s) &&
@@ -810,7 +822,7 @@ static int handle_volume_replicate_range_block_size( lsm_plugin_ptr p,
lsm_system *sys = value_to_system(v_s);

if( sys ) {
- rc = p->san_ops->vol_rep_range_bs(p, sys, &block_size,
+ rc = p->san_ops.v1->vol_rep_range_bs(p, sys, &block_size,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_OK == rc ) {
@@ -834,7 +846,7 @@ static int handle_volume_replicate_range(lsm_plugin_ptr p, Value &params,
int rc = LSM_ERR_NO_SUPPORT;
uint32_t range_count = 0;
char *job = NULL;
- if( p && p->san_ops && p->san_ops->vol_rep_range ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_rep_range ) {
Value v_rep = params["rep_type"];
Value v_vol_src = params["volume_src"];
Value v_vol_dest = params["volume_dest"];
@@ -855,7 +867,7 @@ static int handle_volume_replicate_range(lsm_plugin_ptr p, Value &params,

if( source && dest && ranges ) {

- rc = p->san_ops->vol_rep_range(p, repType, source, dest, ranges,
+ rc = p->san_ops.v1->vol_rep_range(p, repType, source, dest, ranges,
range_count, &job,
LSM_FLAG_GET_VALUE(params));

@@ -880,10 +892,54 @@ static int handle_volume_replicate_range(lsm_plugin_ptr p, Value &params,
return rc;
}

+static int handle_volume_raid_info(lsm_plugin_ptr p, Value &params,
+ Value &response)
+{
+ int rc = LSM_ERR_NO_SUPPORT;
+ if( p && p->san_ops.added.vol_raid_info) {
+ Value v_vol = params["volume"];
+
+ if(IS_CLASS_VOLUME(v_vol) &&
+ LSM_FLAG_EXPECTED_TYPE(params) ) {
+ lsm_volume *vol = value_to_volume(v_vol);
+ std::vector<Value> result;
+
+ if( vol ) {
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size;
+ int32_t extent_count;
+ int32_t min_io_size;
+ int32_t opt_io_size;
+
+ rc = p->san_ops.added.vol_raid_info(
+ p, vol, &raid_type, &strip_size, &extent_count,
+ &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params));
+
+ if( LSM_ERR_OK == rc ) {
+ result.push_back(Value((int32_t)raid_type));
+ result.push_back(Value(strip_size));
+ result.push_back(Value(extent_count));
+ result.push_back(Value(min_io_size));
+ result.push_back(Value(opt_io_size));
+ response = Value(result);
+ }
+
+ lsm_volume_record_free(vol);
+ } else {
+ rc = LSM_ERR_NO_MEMORY;
+ }
+
+ } else {
+ rc = LSM_ERR_TRANSPORT_INVALID_ARG;
+ }
+ }
+ return rc;
+}
+
static int handle_volume_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->vol_delete ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_delete ) {
Value v_vol = params["volume"];

if(IS_CLASS_VOLUME(v_vol) &&
@@ -893,7 +949,7 @@ static int handle_volume_delete(lsm_plugin_ptr p, Value &params, Value &response
if( vol ) {
char *job = NULL;

- rc = p->san_ops->vol_delete(p, vol, &job,
+ rc = p->san_ops.v1->vol_delete(p, vol, &job,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_JOB_STARTED == rc ) {
@@ -918,8 +974,8 @@ static int handle_vol_enable_disable( lsm_plugin_ptr p, Value &params,
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops &&
- ((online)? p->san_ops->vol_enable : p->san_ops->vol_disable)) {
+ if( p && p->san_ops.v1 &&
+ ((online)? p->san_ops.v1->vol_enable : p->san_ops.v1->vol_disable)) {

Value v_vol = params["volume"];

@@ -928,10 +984,10 @@ static int handle_vol_enable_disable( lsm_plugin_ptr p, Value &params,
lsm_volume *vol = value_to_volume(v_vol);
if( vol ) {
if( online ) {
- rc = p->san_ops->vol_enable(p, vol,
+ rc = p->san_ops.v1->vol_enable(p, vol,
LSM_FLAG_GET_VALUE(params));
} else {
- rc = p->san_ops->vol_disable(p, vol,
+ rc = p->san_ops.v1->vol_disable(p, vol,
LSM_FLAG_GET_VALUE(params));
}

@@ -962,14 +1018,14 @@ static int ag_list(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->san_ops->ag_list ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_list ) {

if( LSM_FLAG_EXPECTED_TYPE(params) &&
(rc = get_search_params(params, &key, &val)) == LSM_ERR_OK ) {
lsm_access_group **groups = NULL;
uint32_t count;

- rc = p->san_ops->ag_list(p, key, val, &groups, &count,
+ rc = p->san_ops.v1->ag_list(p, key, val, &groups, &count,
LSM_FLAG_GET_VALUE(params));
if( LSM_ERR_OK == rc ) {
response = access_group_list_to_value(groups, count);
@@ -992,7 +1048,7 @@ static int ag_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_create ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_create ) {
Value v_name = params["name"];
Value v_init_id = params["init_id"];
Value v_init_type = params["init_type"];
@@ -1008,7 +1064,7 @@ static int ag_create(lsm_plugin_ptr p, Value &params, Value &response)
lsm_system *system = value_to_system(v_system);

if( system ) {
- rc = p->san_ops->ag_create(
+ rc = p->san_ops.v1->ag_create(
p,
v_name.asC_str(),
v_init_id.asC_str(),
@@ -1034,7 +1090,7 @@ static int ag_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_delete ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_delete ) {
Value v_access_group = params["access_group"];

if( IS_CLASS_ACCESS_GROUP(v_access_group) &&
@@ -1043,7 +1099,7 @@ static int ag_delete(lsm_plugin_ptr p, Value &params, Value &response)
lsm_access_group *ag = value_to_access_group(v_access_group);

if( ag ) {
- rc = p->san_ops->ag_delete(p, ag, LSM_FLAG_GET_VALUE(params));
+ rc = p->san_ops.v1->ag_delete(p, ag, LSM_FLAG_GET_VALUE(params));
lsm_access_group_record_free(ag);
} else {
rc = LSM_ERR_NO_MEMORY;
@@ -1060,7 +1116,7 @@ static int ag_initiator_add(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_add_initiator ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_add_initiator ) {

Value v_group = params["access_group"];
Value v_init_id = params["init_id"];
@@ -1079,7 +1135,7 @@ static int ag_initiator_add(lsm_plugin_ptr p, Value &params, Value &response)
lsm_access_group_init_type id_type =
(lsm_access_group_init_type) v_init_type.asInt32_t();

- rc = p->san_ops->ag_add_initiator(p, ag, id, id_type,
+ rc = p->san_ops.v1->ag_add_initiator(p, ag, id, id_type,
&updated_access_group,
LSM_FLAG_GET_VALUE(params));

@@ -1105,7 +1161,7 @@ static int ag_initiator_del(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_del_initiator ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_del_initiator ) {

Value v_group = params["access_group"];
Value v_init_id = params["init_id"];
@@ -1123,7 +1179,7 @@ static int ag_initiator_del(lsm_plugin_ptr p, Value &params, Value &response)
const char *id = v_init_id.asC_str();
lsm_access_group_init_type id_type =
(lsm_access_group_init_type) v_init_type.asInt32_t();
- rc = p->san_ops->ag_del_initiator(p, ag, id, id_type,
+ rc = p->san_ops.v1->ag_del_initiator(p, ag, id, id_type,
&updated_access_group,
LSM_FLAG_GET_VALUE(params));

@@ -1148,7 +1204,7 @@ static int volume_mask(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_grant ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_grant ) {

Value v_group = params["access_group"];
Value v_vol = params["volume"];
@@ -1161,7 +1217,7 @@ static int volume_mask(lsm_plugin_ptr p, Value &params, Value &response)
lsm_volume *vol = value_to_volume(v_vol);

if( ag && vol ) {
- rc = p->san_ops->ag_grant(p, ag, vol,
+ rc = p->san_ops.v1->ag_grant(p, ag, vol,
LSM_FLAG_GET_VALUE(params));
} else {
rc = LSM_ERR_NO_MEMORY;
@@ -1182,7 +1238,7 @@ static int volume_unmask(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_revoke ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_revoke ) {

Value v_group = params["access_group"];
Value v_vol = params["volume"];
@@ -1195,7 +1251,7 @@ static int volume_unmask(lsm_plugin_ptr p, Value &params, Value &response)
lsm_volume *vol = value_to_volume(v_vol);

if( ag && vol ) {
- rc = p->san_ops->ag_revoke(p, ag, vol,
+ rc = p->san_ops.v1->ag_revoke(p, ag, vol,
LSM_FLAG_GET_VALUE(params));
} else {
rc = LSM_ERR_NO_MEMORY;
@@ -1216,7 +1272,7 @@ static int vol_accessible_by_ag(lsm_plugin_ptr p, Value &params, Value &response
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->vol_accessible_by_ag ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_accessible_by_ag ) {
Value v_access_group = params["access_group"];

if( IS_CLASS_ACCESS_GROUP(v_access_group) &&
@@ -1227,7 +1283,7 @@ static int vol_accessible_by_ag(lsm_plugin_ptr p, Value &params, Value &response
lsm_volume **vols = NULL;
uint32_t count = 0;

- rc = p->san_ops->vol_accessible_by_ag(p, ag, &vols, &count,
+ rc = p->san_ops.v1->vol_accessible_by_ag(p, ag, &vols, &count,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_OK == rc ) {
@@ -1257,7 +1313,7 @@ static int ag_granted_to_volume(lsm_plugin_ptr p, Value &params, Value &response
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->ag_granted_to_vol ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->ag_granted_to_vol ) {

Value v_vol = params["volume"];

@@ -1269,7 +1325,7 @@ static int ag_granted_to_volume(lsm_plugin_ptr p, Value &params, Value &response
lsm_access_group **groups = NULL;
uint32_t count = 0;

- rc = p->san_ops->ag_granted_to_vol(p, volume, &groups, &count,
+ rc = p->san_ops.v1->ag_granted_to_vol(p, volume, &groups, &count,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_OK == rc ) {
@@ -1298,7 +1354,7 @@ static int volume_dependency(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->vol_child_depends ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_child_depends ) {

Value v_vol = params["volume"];

@@ -1309,7 +1365,7 @@ static int volume_dependency(lsm_plugin_ptr p, Value &params, Value &response)
if( volume ) {
uint8_t yes;

- rc = p->san_ops->vol_child_depends(p, volume, &yes,
+ rc = p->san_ops.v1->vol_child_depends(p, volume, &yes,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_OK == rc ) {
@@ -1333,7 +1389,7 @@ static int volume_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->vol_child_depends_rm ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->vol_child_depends_rm ) {

Value v_vol = params["volume"];

@@ -1345,7 +1401,7 @@ static int volume_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response

char *job = NULL;

- rc = p->san_ops->vol_child_depends_rm(p, volume, &job,
+ rc = p->san_ops.v1->vol_child_depends_rm(p, volume, &job,
LSM_FLAG_GET_VALUE(params));

if( LSM_ERR_JOB_STARTED == rc ) {
@@ -1371,7 +1427,7 @@ static int fs(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->fs_ops->fs_list ) {
+ if( p && p->san_ops.v1 && p->fs_ops->fs_list ) {
if( LSM_FLAG_EXPECTED_TYPE(params) &&
((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK )) {

@@ -1407,7 +1463,7 @@ static int fs_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_create ) {
+ if( p && p->san_ops.v1 && p->fs_ops->fs_create ) {

Value v_pool = params["pool"];
Value v_name = params["name"];
@@ -1459,7 +1515,7 @@ static int fs_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_delete ) {

Value v_fs = params["fs"];

@@ -1493,7 +1549,7 @@ static int fs_resize(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_resize ) {
+ if( p && p->fs_ops && p->fs_ops->fs_resize ) {

Value v_fs = params["fs"];
Value v_size = params["new_size_bytes"];
@@ -1541,7 +1597,7 @@ static int fs_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_clone ) {

Value v_src_fs = params["src_fs"];
Value v_name = params["dest_fs_name"];
@@ -1597,7 +1653,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_OK;

- if( p && p->san_ops && p->fs_ops->fs_file_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_file_clone ) {

Value v_fs = params["fs"];
Value v_src_name = params["src_file_name"];
@@ -1648,7 +1704,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1686,7 +1742,7 @@ static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency_rm ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency_rm ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1725,7 +1781,7 @@ static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &respon
static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_list ) {

Value v_fs = params["fs"];

@@ -1766,7 +1822,7 @@ static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_create ) {

Value v_fs = params["fs"];
Value v_ss_name = params["snapshot_name"];
@@ -1814,7 +1870,7 @@ static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_delete ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
@@ -1851,7 +1907,7 @@ static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_restore(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_restore ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_restore ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
@@ -2069,7 +2125,7 @@ static int iscsi_chap(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->san_ops->iscsi_chap_auth ) {
+ if( p && p->san_ops.v1 && p->san_ops.v1->iscsi_chap_auth ) {
Value v_init = params["init_id"];
Value v_in_user = params["in_user"];
Value v_in_password = params["in_password"];
@@ -2087,7 +2143,7 @@ static int iscsi_chap(lsm_plugin_ptr p, Value &params, Value &response)
Value::null_t == v_out_password.valueType()) &&
LSM_FLAG_EXPECTED_TYPE(params) ) {

- rc = p->san_ops->iscsi_chap_auth(p, v_init.asC_str(),
+ rc = p->san_ops.v1->iscsi_chap_auth(p, v_init.asC_str(),
v_in_user.asC_str(),
v_in_password.asC_str(),
v_out_user.asC_str(),
@@ -2153,6 +2209,7 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("volume_replicate_range", handle_volume_replicate_range)
("volume_resize", handle_volume_resize)
("volumes_accessible_by_access_group", vol_accessible_by_ag)
+ ("volume_raid_info", handle_volume_raid_info)
("volumes", handle_volumes);

static int process_request(lsm_plugin_ptr p, const std::string &method, Value &request,
--
1.8.3.1
Gris Ge
2015-02-15 07:35:21 UTC
Permalink
* New command:
lsmcli volume-raid-info --vol <VOL_ID>

* New alias:
lsmcli vri == lsmcli volume-raid-info

Changes in V2:
* Fix output format when volume not found passed to _get_item() in
volume_raid_info()

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/cmdline.py | 18 +++++++++++++-
tools/lsmcli/data_display.py | 58 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index a781314..980b3a0 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,7 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,

from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- vol_provision_str_to_type, vol_rep_type_str_to_type)
+ vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo)


## Wraps the invocation to the command line
@@ -368,6 +368,14 @@ cmds = (
),

dict(
+ name='volume-raid-info',
+ help='Query volume RAID infomation',
+ args=[
+ dict(vol_id_opt),
+ ],
+ ),
+
+ dict(
name='access-group-create',
help='Create an access group',
args=[
@@ -628,6 +636,7 @@ aliases = (
['aa', 'access-group-add'],
['ar', 'access-group-remove'],
['ad', 'access-group-delete'],
+ ['vri', 'volume-raid-info'],
)


@@ -1318,6 +1327,13 @@ class CmdLine:
self._wait_for_it("volume-dependant-rm",
self.c.volume_child_dependency_rm(v), None)

+ def volume_raid_info(self, args):
+ lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume")
+ self.display_data(
+ [
+ VolumeRAIDInfo(
+ lsm_vol.id, *self.c.volume_raid_info(lsm_vol))])
+
## Displays file system dependants
def fs_dependants(self, args):
fs = _get_item(self.c.fs(), args.fs, "File System")
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 285a14f..6dd5ffa 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -243,6 +243,41 @@ class PlugData(object):
self.version = plugin_version


+class VolumeRAIDInfo(object):
+ _RAID_TYPE_MAP = {
+ Volume.RAID_TYPE_RAID0: 'RAID0',
+ Volume.RAID_TYPE_RAID1: 'RAID1',
+ Volume.RAID_TYPE_RAID3: 'RAID3',
+ Volume.RAID_TYPE_RAID4: 'RAID4',
+ Volume.RAID_TYPE_RAID5: 'RAID5',
+ Volume.RAID_TYPE_RAID6: 'RAID6',
+ Volume.RAID_TYPE_RAID10: 'RAID10',
+ Volume.RAID_TYPE_RAID15: 'RAID15',
+ Volume.RAID_TYPE_RAID16: 'RAID16',
+ Volume.RAID_TYPE_RAID50: 'RAID50',
+ Volume.RAID_TYPE_RAID60: 'RAID60',
+ Volume.RAID_TYPE_RAID51: 'RAID51',
+ Volume.RAID_TYPE_RAID61: 'RAID61',
+ Volume.RAID_TYPE_JBOD: 'JBOD',
+ Volume.RAID_TYPE_MIXED: 'MIXED',
+ Volume.RAID_TYPE_OTHER: 'OTHER',
+ Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ }
+
+ def __init__(self, vol_id, raid_type, strip_size, extent_count,
+ min_io_size, opt_io_size):
+ self.vol_id = vol_id
+ self.raid_type = raid_type
+ self.strip_size = strip_size
+ self.extent_count = extent_count
+ self.min_io_size = min_io_size
+ self.opt_io_size = opt_io_size
+
+ @staticmethod
+ def raid_type_to_str(raid_type):
+ return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP)
+
+
class DisplayData(object):

def __init__(self):
@@ -498,6 +533,29 @@ class DisplayData(object):
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}

+ VOL_RAID_INFO_HEADER = OrderedDict()
+ VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID'
+ VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type'
+ VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size'
+ VOL_RAID_INFO_HEADER['extent_count'] = 'Extent Count'
+ VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size'
+ VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size'
+
+ VOL_RAID_INFO_COLUMN_SKIP_KEYS = []
+
+ VOL_RAID_INFO_VALUE_CONV_ENUM = {
+ 'raid_type': VolumeRAIDInfo.raid_type_to_str,
+ }
+ VOL_RAID_INFO_VALUE_CONV_HUMAN = [
+ 'strip_size', 'min_io_size', 'opt_io_size']
+
+ VALUE_CONVERT[VolumeRAIDInfo] = {
+ 'headers': VOL_RAID_INFO_HEADER,
+ 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS,
+ 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN,
+ }
+
@staticmethod
def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human,
flag_human, flag_enum):
--
1.8.3.1
Gris Ge
2015-02-15 07:35:22 UTC
Permalink
* Simply return XXX_UNKNOWN.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/sim/simulator.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index 79b6df5..892c67e 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -17,7 +17,7 @@
# Gris Ge <***@redhat.com>

from lsm import (uri_parse, VERSION, Capabilities, INfs,
- IStorageAreaNetwork, search_property)
+ IStorageAreaNetwork, search_property, Volume)

from simarray import SimArray

@@ -290,3 +290,9 @@ class SimPlugin(INfs, IStorageAreaNetwork):
return search_property(
[SimPlugin._sim_data_2_lsm(t) for t in sim_tgts],
search_key, search_value)
+
+ def volume_raid_info(self, volume, flags=0):
+ return [
+ Volume.RAID_TYPE_UNKNOWN, Volume.STRIP_SIZE_UNKNOWN,
+ Volume.EXTENT_COUNT_UNKNOWN, Volume.MIN_IO_SIZE_UNKNOWN,
+ Volume.OPT_IO_SIZE_UNKNOWN]
--
1.8.3.1
Gris Ge
2015-02-15 07:35:23 UTC
Permalink
* Simply set XXX_UNKNOWN on output parameter.

V2:
- Add call to register volume_raid_info
- Add LSM_CAP_VOLUMERAID_INFO to capabilties

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/simc/simc_lsmplugin.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)

diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index 7c4d287..324731e 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -391,6 +391,7 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_EXPORTS,
LSM_CAP_EXPORT_FS,
LSM_CAP_EXPORT_REMOVE,
+ LSM_CAP_VOLUME_RAID_INFO,
-1
);

@@ -956,6 +957,29 @@ static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
return rc;
}

+static int volume_raid_info(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
+ struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume));
+
+ if( !av) {
+ rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME,
+ "volume not found!");
+ }
+
+ *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN;
+ *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN;
+ *extent_count = LSM_VOLUME_EXTENT_COUNT_UNKNOWN;
+ *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN;
+ *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN;
+ return rc;
+}
+
static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume *v,
lsm_flag flags)
{
@@ -2245,6 +2269,9 @@ int load( lsm_plugin_ptr c, const char *uri, const char *password,
} else {
rc = lsm_register_plugin_v1( c, pd, &mgm_ops,
&san_ops, &fs_ops, &nfs_ops);
+ if( LSM_ERR_OK == rc ) {
+ rc = lsm_register_callback_volume_raid_info(c, volume_raid_info);
+ }
}
}
return rc;
--
1.8.3.1
Gris Ge
2015-02-15 07:35:24 UTC
Permalink
* Simply run that command and check the volume ID of output.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/cmdtest.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/test/cmdtest.py b/test/cmdtest.py
index 551e7af..b4a2405 100755
--- a/test/cmdtest.py
+++ b/test/cmdtest.py
@@ -676,6 +676,25 @@ def search_test(cap, system_id):
volume_delete(vol_id)
return

+def volume_raid_info_test(cap, system_id):
+ if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']:
+ test_pool_id = name_to_id(OP_POOL, test_pool_name)
+
+ if test_pool_id is None:
+ print 'Pool %s is not available!' % test_pool_name
+ exit(10)
+
+ vol_id = create_volume(test_pool_id)
+ out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1]
+ r = parse(out)
+ if len(r[0]) != 6:
+ print "volume-raid-info got expected output: %s" % out
+ exit(10)
+ if r[0][0] != vol_id:
+ print "volume-raid-info output volume ID is not requested " \
+ "volume ID %s" % out
+ exit(10)
+ return

def run_all_tests(cap, system_id):
test_display(cap, system_id)
@@ -688,6 +707,8 @@ def run_all_tests(cap, system_id):

search_test(cap, system_id)

+ volume_raid_info_test(cap, system_id)
+
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--command", action="store", type="string",
--
1.8.3.1
Gris Ge
2015-02-15 07:35:25 UTC
Permalink
* Simply invoke lsm_volume_raid_info() with no additional test.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)

diff --git a/test/tester.c b/test/tester.c
index 150c38b..8738be1 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -2853,6 +2853,35 @@ START_TEST(test_volume_vpd_check)
}
END_TEST

+START_TEST(test_volume_raid_info)
+{
+ lsm_volume *volume = NULL;
+ char *job = NULL;
+ lsm_pool *pool = get_test_pool(c);
+
+ int rc = lsm_volume_create(
+ c, pool, "volume_raid_info_test", 20000000,
+ LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD);
+
+ fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED,
+ "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c)));
+
+ if( LSM_ERR_JOB_STARTED == rc ) {
+ volume = wait_for_job_vol(c, &job);
+ }
+
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size, extent_count, min_io_size, opt_io_size;
+
+ G(
+ rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size,
+ &extent_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD);
+
+ G(rc, lsm_volume_record_free, volume);
+ volume = NULL;
+}
+END_TEST
+
Suite * lsm_suite(void)
{
Suite *s = suite_create("libStorageMgmt");
@@ -2888,6 +2917,7 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_ss);
tcase_add_test(basic, test_nfs_exports);
tcase_add_test(basic, test_invalid_input);
+ tcase_add_test(basic, test_volume_raid_info);

suite_add_tcase(s, basic);
return s;
--
1.8.3.1
Gris Ge
2015-02-15 07:35:26 UTC
Permalink
* Use 'storcli /c0/v1 show all' command line output to determine
RAID type, strip size and disk count.

* Calculate optimal I/O size by strip size multiple with RAID
data(not mirrot, not parity) disks count.

* Tested query on RAID 0, 1, 5, 10, 50.

* Tested the optimal I/O size on RAID 5:
[***@storageqe-08 ~]# lsmenv mega lsmcli vri --vol SV03403550:VD1
Device alias: mega
URI: megaraid://
lsmcli vri --vol SV03403550:VD1
Volume ID | RAID Type | Strip Size | Extent Count | Minimum I/O Size | Optimal I/O Size
--------------------------------------------------------------------------------------------
SV03403550:VD1 | RAID5 | 131072 | 5 | 131072 | 524288

Time: 0:00.29
[***@storageqe-08 ~]# dd if=/dev/urandom of=test.img bs=1M count=1000
1000+0 records in
1000+0 records out
1048576000 bytes (1.0 GB) copied, 153.174 s, 6.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 58.9573 s, 17.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 37.7282 s, 27.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 35.3351 s, 29.7 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 70.0779 s, 15.0 MB/s

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 83abf63..e754cd8 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -157,6 +157,33 @@ def _pool_id_of(dg_id, sys_id):
return "%s:DG%s" % (sys_id, dg_id)


+_RAID_TYPE_MAP = {
+ 'RAID0': Volume.RAID_TYPE_RAID0,
+ 'RAID1': Volume.RAID_TYPE_RAID1,
+ 'RAID5': Volume.RAID_TYPE_RAID5,
+ 'RAID6': Volume.RAID_TYPE_RAID6,
+ 'RAID00': Volume.RAID_TYPE_RAID0,
+ # Some MegaRAID only support max 16 disks in each span.
+ # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000.
+ # All of them are considered as RAID0
+ 'RAID10': Volume.RAID_TYPE_RAID10,
+ 'RAID50': Volume.RAID_TYPE_RAID50,
+ 'RAID60': Volume.RAID_TYPE_RAID60,
+}
+
+
+def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info):
+ raid_type = _RAID_TYPE_MAP.get(
+ vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN)
+
+ # In LSI, four disks or more RAID1 is actually a RAID10.
+ if raid_type == Volume.RAID_TYPE_RAID1 and \
+ int(vd_prop_info['Number of Drives Per Span']) >= 4:
+ raid_type = Volume.RAID_TYPE_RAID10
+
+ return raid_type
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -459,3 +486,52 @@ class MegaRAID(IPlugin):
vd_pd_info_list, vd_prop_info, key_name))

return search_property(lsm_vols, search_key, search_value)
+
+ @_handle_errors
+ def volume_raid_info(self, volume, flags=Client.FLAG_RSVD):
+ if not volume.plugin_data:
+ raise LsmError(
+ ErrorNumber.INVALID_ARGUMENT,
+ "Ilegal input volume argument: missing plugin_data property")
+
+ vd_path = volume.plugin_data
+ vol_show_output = self._storcli_exec([vd_path, "show", "all"])
+ vd_basic_info = vol_show_output[vd_path][0]
+ vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info)
+ strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size'])
+ disk_count = (
+ int(vd_prop_info['Number of Drives Per Span']) *
+ int(vd_prop_info['Span Depth']))
+ if raid_type == Volume.RAID_TYPE_RAID0:
+ strip_count = disk_count
+ elif raid_type == Volume.RAID_TYPE_RAID1:
+ strip_count = 1
+ elif raid_type == Volume.RAID_TYPE_RAID5:
+ strip_count = disk_count - 1
+ elif raid_type == Volume.RAID_TYPE_RAID6:
+ strip_count = disk_count - 2
+ elif raid_type == Volume.RAID_TYPE_RAID50:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 1) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID60:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 2) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID10:
+ strip_count = (
+ int(vd_prop_info['Number of Drives Per Span']) / 2 *
+ int(vd_prop_info['Span Depth']))
+ else:
+ # MegaRAID does not support 15 or 16 yet.
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unexpected RAID type: %s" %
+ vd_basic_info['TYPE'])
+
+ return [
+ raid_type, strip_size, disk_count, strip_size,
+ strip_size * strip_count]
--
1.8.3.1
Tony Asleson
2015-02-16 22:41:27 UTC
Permalink
Post by Gris Ge
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
Before we commit this patch I want to investigate alternative ways to
handle this. I think we can do better than this approach by introducing
a structure with a version field or something similar. If anyone has
suggestions, please post them!

Thanks!

Regards,
Tony
Gris Ge
2015-02-26 12:35:36 UTC
Permalink
* Treating each MegaRAID DG(disk group) as LSM pool.
* Based on storcli output of:
storcli /c0/dall show all J

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -115,6 +115,47 @@ def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict):
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)


+def _mega_size_to_lsm(mega_size):
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ if re_match:
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+def _pool_status_of(dg_top):
+ """
+ Return status
+ """
+ if dg_top['State'] in _POOL_STATUS_MAP.keys():
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+def _pool_id_of(dg_id, sys_id):
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -217,7 +258,11 @@ class MegaRAID(IPlugin):
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ if real_data and 'Response Data' in real_data.keys():
+ return real_data['Response Data']
+
+ return real_data
else:
return output

@@ -317,7 +362,55 @@ class MegaRAID(IPlugin):

return search_property(rc_lsm_disks, search_key, search_value)

+ @staticmethod
+ def _dg_free_size(dg_num, free_space_list):
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ for free_space in free_space_list:
+ if int(free_space['DG']) == int(dg_num):
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num):
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ if status == Pool.STATUS_UNKNOWN:
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
flags=Client.FLAG_RSVD):
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ for ctrl_num in range(self._ctrl_count()):
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ for dg_top in dg_show_output['TOPOLOGY']:
+ if dg_top['Arr'] != '-':
+ continue
+ if dg_top['DG'] == '-':
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
--
1.8.3.1
Tony Asleson
2015-02-26 22:14:44 UTC
Permalink
Hi Gris,

Overall:
* Overall, looks good!
* Passes `make check` && `make distcheck`
* The way you came up for adding new call backs to the plug-in API is an
improvement over what I had. This way we are adding less calls over
time and preserving code and binary API/ABI compatibility. If others
have suggestions please make them known.

Review comments:

* lsm_mgmt.cpp: Missing LSM_FLAG_UNUSED_CHECK for lsm_volume_raid_info,
you mentioned previously about doing this to make sure the flags are
utilized correctly.
* simulator.py import for Volume is unneeded
* Using -1 for unknown values, libblkid/sysfs uses 0 to denote when a
value is unknown, should be adopt that value too, to keep things consistent?

Regards,
Tony
Post by Gris Ge
* Treating each MegaRAID DG(disk group) as LSM pool.
storcli /c0/dall show all J
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)
diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno
from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)
from lsm.plugin.megaraid.utils import cmd_exec, ExecError
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+ """
+ Return status
+ """
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ return real_data['Response Data']
+
+ return real_data
return output
return search_property(rc_lsm_disks, search_key, search_value)
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ continue
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
Gris Ge
2015-03-01 10:22:30 UTC
Permalink
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.

* These plugins support this new method:
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID

* The C library part might be buggy considering my C skill set.

* Potential support by other plugin:
* Targetd:
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
* SMI-S:
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
* ONTAP:
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
* Nstor:
No document found about strip settings.

* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.

Changes in V2:
* Patch 6/13 and 10/13:
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
* Patch 10/13:
Add missing capability LSM_CAP_VOLUME_RAID_INFO

Changes in V3:
* Patch 6/13:
Another approach to register newly added API with full backward
compatibility:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Patch 9/13:
Full volume_raid_info() support in simulator plugin.

Changes in V4:

* Included Tony's bug fix patch for C API:
[PATCH] lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

* Included ONTAP plugin support.

* Patch 4/15 and 5/15:
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

* Patch 5/15:
* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.

* Patch 6/15:
* Removed unneeded import 'Volume' from simulator.py.

Gris Ge (14):
Python Library: Fix decorator problem with docstrings
Constant Test: Fix missing constant with number in it.
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
Simulator Plugin: Add volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
lsmcli: Add volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
MegaRAID Plugin: Add volume_raid_info() support.
ONTAP Plugin: Add volume_raid_info() support.

Tony Asleson (1):
lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 ++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++
c_binding/lsm_plugin_ipc.cpp | 86 ++++++--
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/ontap/na.py | 8 +-
plugin/ontap/ontap.py | 55 ++++--
plugin/sim/simarray.py | 151 +++++++++-----
plugin/sim/simulator.py | 3 +
plugin/simc/simc_lsmplugin.c | 33 +++-
python_binding/lsm/_client.py | 94 +++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
tools/utility/check_const.pl | 6 +-
21 files changed, 905 insertions(+), 91 deletions(-)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:31 UTC
Permalink
From: Tony Asleson <***@redhat.com>

In a number of places in the fs handlers were were checking
if san_ops was valid before dereferencing a fs_ops pointer.

This is clearly wrong and will result is a seg fault if a
plugin implements fs operations and not block operations.

Signed-off-by: Tony Asleson <***@redhat.com>
Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/lsm_plugin_ipc.cpp | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index 7e0d034..f5374b9 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -1371,7 +1371,7 @@ static int fs(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->fs_ops->fs_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_list ) {
if( LSM_FLAG_EXPECTED_TYPE(params) &&
((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK )) {

@@ -1407,7 +1407,7 @@ static int fs_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_create ) {

Value v_pool = params["pool"];
Value v_name = params["name"];
@@ -1459,7 +1459,7 @@ static int fs_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_delete ) {

Value v_fs = params["fs"];

@@ -1493,7 +1493,7 @@ static int fs_resize(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_resize ) {
+ if( p && p->fs_ops && p->fs_ops->fs_resize ) {

Value v_fs = params["fs"];
Value v_size = params["new_size_bytes"];
@@ -1541,7 +1541,7 @@ static int fs_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_clone ) {

Value v_src_fs = params["src_fs"];
Value v_name = params["dest_fs_name"];
@@ -1597,7 +1597,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_OK;

- if( p && p->san_ops && p->fs_ops->fs_file_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_file_clone ) {

Value v_fs = params["fs"];
Value v_src_name = params["src_file_name"];
@@ -1648,7 +1648,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1686,7 +1686,7 @@ static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency_rm ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency_rm ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1725,7 +1725,7 @@ static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &respon
static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_list ) {

Value v_fs = params["fs"];

@@ -1766,7 +1766,7 @@ static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_create ) {

Value v_fs = params["fs"];
Value v_ss_name = params["snapshot_name"];
@@ -1814,7 +1814,7 @@ static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_delete ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
@@ -1851,7 +1851,7 @@ static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_restore(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_restore ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_restore ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
--
1.8.3.1
Gris Ge
2015-03-01 10:22:32 UTC
Permalink
* With decorator, the docstring of original method will be deleted.
* Use functools.wraps() to keep the docstring of original method.
Check
http://stackoverflow.com/questions/1782843/python-decorator-problem-with-docstrings
for detail.

* With this fix user can check method help message in interactive python with
command:
help(lsm.Client.volume_create)

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_common.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index f2fd568..4c87661 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -533,6 +533,7 @@ def return_requires(*types):
is quite important.
"""
def outer(func):
+ @functools.wraps(func)
def inner(*args, **kwargs):
r = func(*args, **kwargs)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:33 UTC
Permalink
* Allowing check_const.pl to check constants with number in it.
Example:
LSM_VOLUME_RAID_TYPE_RAID1

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/utility/check_const.pl | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/utility/check_const.pl b/tools/utility/check_const.pl
index 9e5a700..e41c1e9 100644
--- a/tools/utility/check_const.pl
+++ b/tools/utility/check_const.pl
@@ -101,7 +101,7 @@ my $REGEX_C_CONST_FORMAT = qr/
(?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT)
)
(?<CNAME_PAT>
- [A-Z][A-Z_]+
+ [A-Z][A-Z_0-9]+
)
(?<HEADER1>
[\ \t]*
@@ -179,7 +179,7 @@ sub py_name_2_c_name($) {
# 2. Convert System to SYSTEM
# 3. Convert Capabilities to CAP and etc using %PY_CLASS_NAME_CONV;
my $py_name = shift;
- if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_]+)$/ ) {
+ if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_][A-Z_0-9]+)$/ ) {
my $py_class_name = $1;
my $py_var_name = $2;

@@ -308,7 +308,7 @@ sub _get_py_class_consts($$){
}
if ($line =~ /^$current_idention
[\ ]+
- ([A-Z][A-Z\_]+)
+ ([A-Z][A-Z\_0-9]+)
[\ ]*=[\ ]*
($REGEX_VALUE_FORMAT)/x){
my $var_name = $1;
--
1.8.3.1
Gris Ge
2015-03-01 10:22:34 UTC
Permalink
* The docstring of lsm.Client.volume_raid_info() contains full detail
about this new method. Quick info:
Usage:
volume_raid_info(self, volume, flags=0)
Returns:
[raid_type, strip_size, extent_count, min_io_size, opt_io_size]
# strip_size is the size of strip on each disk/extent
# extent_count is the disk/extent count.
# min_io_size is minimum I/O size. Also the preferred I/O size
# of random I/O.
# opt_io_size is optimal I/O size. Also the preferred I/O size
# of sequential I/O.

* Why not use 'pool_raid_info' instead?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not implementing RAID
at pool level but at volume level.

* Why use 'extent_count' instead of 'disk_count'?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not using disk
directly to assemble RAID group.

* Why we need 'min_io_size' and 'opt_io_size' when we have 'extent_count'
and 'strip_size'?
Normally, min_io_size is strip_size, opt_io_size could be calculated by
raid_type, strip_size and extent_count. But on NetApp, I/O test[1]
indicate their optimal I/O size is 64KiB no matter how many disks in
the RAID group. It might[2] because NetApp created a WAFL filesystem on
RAID group which changed the optimal I/O size.

In general, the optimal I/O size or min_io_size of some RAID system
might not base on strip size and RAID disk/extent count.
We'd better expose those information directly instead forcing user
to guess from strip size and disk/extent count.

* New constants:
Volume.RAID_TYPE_UNKNOWN
# The plugin failed to detect the volume's RAID type.
Volume.RAID_TYPE_RAID0
# Stripe
Volume.RAID_TYPE_RAID1
# Mirror for two disks. For 4 disks or more, they are RAID10.
Volume.RAID_TYPE_RAID3
# Byte-level striping with dedicated parity
Volume.RAID_TYPE_RAID4
# Block-level striping with dedicated parity
Volume.RAID_TYPE_RAID5
# Block-level striping with distributed parity
Volume.RAID_TYPE_RAID6
# Block-level striping with two distributed parities, aka, RAID-DP
Volume.RAID_TYPE_RAID10
# Stripe of mirrors
Volume.RAID_TYPE_RAID15
# Parity of mirrors
Volume.RAID_TYPE_RAID16
# Dual parity of mirrors
Volume.RAID_TYPE_RAID50
# Stripe of parities
Volume.RAID_TYPE_RAID60
# Stripe of dual parities
Volume.RAID_TYPE_RAID51
# Mirror of parities
Volume.RAID_TYPE_RAID61
# Mirror of dual parities
Volume.RAID_TYPE_JBOD
# Just bunch of disks, no parity, no striping.
Volume.RAID_TYPE_MIXED
# This volume contains multiple RAID settings.
Volume.RAID_TYPE_OTHER
# Vendor specific RAID type

Volume.STRIP_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN

* New Capability:
lsm.Volume.VOLUME_RAID_INFO

[1] On a 24 disks RAID6(RAID-DP), 4KiB strip size(not changeable):
* With I/O size 90112(4096 * 22), write speed is 73.4 MB/s
* With I/O size 65536, write speed is 86.9 MB/s
# the optimal_io_size exposed via sysfs from SCSI BLOCK LIMITS(0xB0) VPD

[2] No NetApp official document confirm or deny it. Waiting NetApp's reply.

Changes in V2:
* Add 'New in 1.2' docstring.

Changes in V4(No change in V3):
* Change the value of these constants from -1 to 0 to align with
libblkid/sysfs:
Volume.STRIP_SIZE_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_client.py | 94 +++++++++++++++++++++++++++++++++++++++++++
python_binding/lsm/_data.py | 42 +++++++++++++++++++
2 files changed, 136 insertions(+)

diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index e637962..d42e324 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -971,3 +971,97 @@ class Client(INetworkAttachedStorage):
"""
_check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('target_ports', _del_self(locals()))
+
+ ## Returns the RAID information of certain volume
+ # @param self The this pointer
+ # @param raid_type The RAID type of this volume
+ # @param strip_size The size of strip of disk or other storage
+ # extent.
+ # @param extent_count The count of disks or other storage extent
+ # in this RAID group.
+ # @param min_io_size The preferred I/O size of random I/O.
+ # @param opt_io_size The preferred I/O size of sequential I/O.
+ # @returns List of target ports, else raises LsmError
+ @_return_requires([int, int, int, int, int])
+ def volume_raid_info(self, volume, flags=FLAG_RSVD):
+ """Query the RAID information of certain volume.
+
+ New in version 1.2.
+
+ Query the RAID type, strip size, extents count, minimum I/O size,
+ optimal I/O size of given volume.
+ This method requires this capability:
+ lsm.Capabilities.VOLUME_RAID_INFO
+
+ Args:
+ volume (Volume object): Volume to query
+ flags (int): Reserved for future use. Should be set as
+ lsm.Client.FLAG_RSVD
+ Returns:
+ [raid_type, strip_size, extent_count, min_io_size, opt_io_size]
+
+ raid_type (int): RAID Type of requested volume.
+ Could be one of these values:
+ Volume.RAID_TYPE_RAID0
+ Stripe
+ Volume.RAID_TYPE_RAID1
+ Two disks Mirror
+ Volume.RAID_TYPE_RAID3
+ Byte-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID4
+ Block-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID5
+ Block-level striping with distributed parity
+ Volume.RAID_TYPE_RAID6
+ Block-level striping with two distributed parities,
+ aka, RAID-DP
+ Volume.RAID_TYPE_RAID10
+ Stripe of mirrors
+ Volume.RAID_TYPE_RAID15
+ Parity of mirrors
+ Volume.RAID_TYPE_RAID16
+ Dual parity of mirrors
+ Volume.RAID_TYPE_RAID50
+ Stripe of parities
+ Volume.RAID_TYPE_RAID60
+ Stripe of dual parities
+ Volume.RAID_TYPE_RAID51
+ Mirror of parities
+ Volume.RAID_TYPE_RAID61
+ Mirror of dual parities
+ Volume.RAID_TYPE_JBOD
+ Just bunch of disks, no parity, no striping.
+ Volume.RAID_TYPE_UNKNOWN
+ The plugin failed to detect the volume's RAID type.
+ Volume.RAID_TYPE_MIXED
+ This volume contains multiple RAID settings.
+ Volume.RAID_TYPE_OTHER
+ Vendor specific RAID type
+ strip_size(int): The size of strip on each disk or other storage
+ extent.
+ For RAID1/JBOD, it should be set as sector size.
+ If plugin failed to detect strip size, it should be set
+ as Volume.STRIP_SIZE_UNKNOWN(0).
+ extent_count(int): The count of disks or other storage extents
+ assembled in the RAID group.
+ If plugin failed to detect extent_count, it should be set
+ as Volume.EXTENT_COUNT_UNKNOWN(0).
+ min_io_size(int): The minimum I/O size, device preferred I/O
+ size for random I/O. Any I/O size not equal to a multiple
+ of this value may get significant speed penalty.
+ Normally it refers to strip size of each disk(extent).
+ If plugin failed to detect min_io_size, it should try these
+ values in the sequence of:
+ logical sector size -> physical sector size ->
+ Volume.MIN_IO_SIZE_UNKNOWN(0).
+ opt_io_size(int): The optimal I/O size, device preferred I/O
+ size for sequential I/O. Normally it refers to RAID group
+ stripe size.
+ If plugin failed to detect opt_io_size, it should be set
+ to Volume.OPT_IO_SIZE_UNKNOWN(0).
+ Raises:
+ LsmError:
+ ErrorNumber.NO_SUPPORT
+ No support.
+ """
+ return self._tp.rpc('volume_raid_info', _del_self(locals()))
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 067c766..904d390 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -258,6 +258,46 @@ class Volume(IData):
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1

+ RAID_TYPE_UNKNOWN = -1
+ # The plugin failed to detect the volume's RAID type.
+ RAID_TYPE_RAID0 = 0
+ # Stripe
+ RAID_TYPE_RAID1 = 1
+ # Mirror for two disks. For 4 disks or more, they are RAID10.
+ RAID_TYPE_RAID3 = 3
+ # Byte-level striping with dedicated parity
+ RAID_TYPE_RAID4 = 4
+ # Block-level striping with dedicated parity
+ RAID_TYPE_RAID5 = 5
+ # Block-level striping with distributed parity
+ RAID_TYPE_RAID6 = 6
+ # Block-level striping with two distributed parities, aka, RAID-DP
+ RAID_TYPE_RAID10 = 10
+ # Stripe of mirrors
+ RAID_TYPE_RAID15 = 15
+ # Parity of mirrors
+ RAID_TYPE_RAID16 = 16
+ # Dual parity of mirrors
+ RAID_TYPE_RAID50 = 50
+ # Stripe of parities
+ RAID_TYPE_RAID60 = 60
+ # Stripe of dual parities
+ RAID_TYPE_RAID51 = 51
+ # Mirror of parities
+ RAID_TYPE_RAID61 = 61
+ # Mirror of dual parities
+ RAID_TYPE_JBOD = 20
+ # Just bunch of disks, no parity, no striping.
+ RAID_TYPE_MIXED = 21
+ # This volume contains multiple RAID settings.
+ RAID_TYPE_OTHER = 22
+ # Vendor specific RAID type
+
+ STRIP_SIZE_UNKNOWN = 0
+ EXTENT_COUNT_UNKNOWN = 0
+ MIN_IO_SIZE_UNKNOWN = 0
+ OPT_IO_SIZE_UNKNOWN = 0
+
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
@@ -669,6 +709,8 @@ class Capabilities(IData):

VOLUME_ISCSI_CHAP_AUTHENTICATION = 53

+ VOLUME_RAID_INFO = 54
+
VOLUME_THIN = 55

#File system
--
1.8.3.1
Gris Ge
2015-03-01 10:22:35 UTC
Permalink
* Please check python API document for detail about lsm_volume_raid_info()
method. Quick info:

Retrieves the pool id that the volume is derived from.
@param[in] c Valid connection
@param[in] v Volume ptr.
@param[out] raid_type Enum of lsm_volume_raid_type
@param[out] strip_size Size of the strip on disk or other storage extent.
@param[out] extent_count Count of disks or other storage extents in this
RAID group.
@param[out] min_io_size Minimum I/O size, also the preferred I/O size
of random I/O.
@param[out] opt_io_size Optimal I/O size, also the preferred I/O size
of sequential I/O.
@param[in] flags Reserved, set to 0
@return LSM_ERR_OK on success else error reason.

* New plugin interface: lsm_plug_volume_raid_info

* New enum type: lsm_volume_raid_type

* New capability:
LSM_CAP_VOLUME_RAID_INFO

* New constants:
LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
/**^ Unknown */
LSM_VOLUME_RAID_TYPE_RAID0 = 0,
/**^ Stripe */
LSM_VOLUME_RAID_TYPE_RAID1 = 1,
/**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
LSM_VOLUME_RAID_TYPE_RAID3 = 3,
/**^ Byte-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID4 = 4,
/**^ Block-level striping with dedicated parity */
/**^ Block-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID5 = 5,
/**^ Block-level striping with distributed parity */
LSM_VOLUME_RAID_TYPE_RAID6 = 6,
/**^ Block-level striping with two distributed parities, aka, RAID-DP */
LSM_VOLUME_RAID_TYPE_RAID10 = 10,
/**^ Stripe of mirrors */
LSM_VOLUME_RAID_TYPE_RAID15 = 15,
/**^ Parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID16 = 16,
/**^ Dual parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID50 = 50,
/**^ Stripe of parities */
LSM_VOLUME_RAID_TYPE_RAID60 = 60,
/**^ Stripe of dual parities */
LSM_VOLUME_RAID_TYPE_RAID51 = 51,
/**^ Mirror of parities */
LSM_VOLUME_RAID_TYPE_RAID61 = 61,
/**^ Mirror of dual parities */
LSM_VOLUME_RAID_TYPE_JBOD = 20,
/**^ Just bunch of disks, no parity, no striping. */
LSM_VOLUME_RAID_TYPE_MIXED = 21,
/**^ This volume contains multiple RAID settings. */
LSM_VOLUME_RAID_TYPE_OTHER = 22,
/**^ Vendor specific RAID type */

LSM_VOLUME_STRIP_SIZE_UNKNOWN
LSM_VOLUME_EXTENT_COUNT_UNKNOWN
LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

V2: Change call back registration

Changes in V3:

* New implementation for adding new methods:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Add 'New in version 1.2' comment of lsm_volume_raid_info() function.

Changes in V4:

* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 +++++++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 ++
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 +++++++++++++++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 +++++++++++++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++++++++++++++
c_binding/lsm_plugin_ipc.cpp | 62 +++++++++++++++++++++-
7 files changed, 231 insertions(+), 1 deletion(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 879f184..b7e7e5b 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -844,6 +844,26 @@ extern "C" {
uint32_t *count,
lsm_flag flags);

+/**
+ * Retrieves the pool id that the volume is derived from. New in version 1.2.
+ * @param[in] c Valid connection
+ * @param[in] v Volume ptr.
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on disk or other storage extent.
+ * @param[out] extent_count Count of disks or other storage extents in this
+ * RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved, set to 0
+ * @return LSM_ERR_OK on success else error reason.
+ */
+int LSM_DLL_EXPORT lsm_volume_raid_info(
+ lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size, lsm_flag flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 7d6182c..18490f3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -77,6 +77,9 @@ typedef enum {

LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**< If you can configure iSCSI chap authentication */

+ LSM_CAP_VOLUME_RAID_INFO = 54,
+ /** ^ If you can query RAID information from volume */
+
LSM_CAP_VOLUME_THIN = 55, /**< Thin provisioned volumes are supported */

LSM_CAP_FS = 100, /**< List file systems */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index e7874f7..11c6653 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -745,6 +745,8 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
lsm_flag flags);
/** \struct lsm_san_ops_v1
* \brief Block array oriented functions (callback functions)
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
@@ -774,6 +776,8 @@ struct lsm_san_ops_v1 {

/** \struct lsm_fs_ops_v1
* \brief File system oriented functionality
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_fs_ops_v1 {
lsm_plug_fs_list fs_list; /**< list file systems */
@@ -792,6 +796,8 @@ struct lsm_fs_ops_v1 {

/** \struct lsm_nas_ops_v1
* \brief NAS system oriented functionality call back functions
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_nas_ops_v1 {
lsm_plug_nfs_auth_types nfs_auth_types; /**< List nfs authentication types */
@@ -801,6 +807,37 @@ struct lsm_nas_ops_v1 {
};

/**
+ * Query the RAID information of a volume
+ * @param[in] c Valid lsm plug-in pointer
+ * @param[in] volume Volume to be deleted
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on each disk or other
+ * storage extent.
+ * @param[out] extent_count Count of of disks of other storage extents in
+ * this RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved
+ * @return LSM_ERR_OK, else error reason
+ */
+typedef int (*lsm_plug_volume_raid_info)(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type, int32_t *strip_size,
+ int32_t *extent_count, int32_t *min_io_size,
+ int32_t *opt_io_size, lsm_flag flags);
+
+/** \struct lsm_ops_v1_2
+ * \brief Functions added in version 1.2
+ * NOTE: This structure will change during the developement util version 1.2
+ * released.
+ */
+struct lsm_ops_v1_2 {
+ lsm_plug_volume_raid_info vol_raid_info;
+ /**^ Query volume RAID information*/
+};
+
+/**
* Copies the memory pointed to by item with given type t.
* @param t Type of item to copy
* @param item Pointer to src
@@ -839,6 +876,24 @@ int LSM_DLL_EXPORT lsm_register_plugin_v1( lsm_plugin_ptr plug,
struct lsm_nas_ops_v1 *nas_ops );

/**
+ * Used to register version 1.2 APIs plug-in operation.
+ * @param plug Pointer provided by the framework
+ * @param private_data Private data to be used for whatever the plug-in
+ * needs
+ * @param mgm_ops Function pointers for struct lsm_mgmt_ops_v1
+ * @param san_ops Function pointers for struct lsm_san_ops_v1
+ * @param fs_ops Function pointers for struct lsm_fs_ops_v1
+ * @param nas_ops Function pointers for struct lsm_nas_ops_v1
+ * @param ops_v1_2 Function pointers for struct lsm_ops_v1_2
+ * @return LSM_ERR_OK on success, else error reason.
+ */
+int LSM_DLL_EXPORT lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug,
+ void * private_data, struct lsm_mgmt_ops_v1 *mgm_ops,
+ struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops,
+ struct lsm_nas_ops_v1 *nas_ops, struct lsm_ops_v1_2 *ops_v1_2);
+
+/**
* Used to retrieve private data for plug-in operation.
* @param plug Opaque plug-in pointer.
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 309a5e8..78794af 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -131,6 +131,49 @@ typedef enum {
LSM_VOLUME_PROVISION_DEFAULT = 3 /**< Default provisioning */
} lsm_volume_provision_type;

+/**< \enum lsm_volume_raid_type Different types of RAID */
+typedef enum {
+ LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
+ /**^ Unknown */
+ LSM_VOLUME_RAID_TYPE_RAID0 = 0,
+ /**^ Stripe */
+ LSM_VOLUME_RAID_TYPE_RAID1 = 1,
+ /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
+ LSM_VOLUME_RAID_TYPE_RAID3 = 3,
+ /**^ Byte-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID4 = 4,
+ /**^ Block-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID5 = 5,
+ /**^ Block-level striping with distributed parity */
+ LSM_VOLUME_RAID_TYPE_RAID6 = 6,
+ /**^ Block-level striping with two distributed parities, aka, RAID-DP */
+ LSM_VOLUME_RAID_TYPE_RAID10 = 10,
+ /**^ Stripe of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID15 = 15,
+ /**^ Parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID16 = 16,
+ /**^ Dual parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID50 = 50,
+ /**^ Stripe of parities */
+ LSM_VOLUME_RAID_TYPE_RAID60 = 60,
+ /**^ Stripe of dual parities */
+ LSM_VOLUME_RAID_TYPE_RAID51 = 51,
+ /**^ Mirror of parities */
+ LSM_VOLUME_RAID_TYPE_RAID61 = 61,
+ /**^ Mirror of dual parities */
+ LSM_VOLUME_RAID_TYPE_JBOD = 20,
+ /**^ Just bunch of disks, no parity, no striping. */
+ LSM_VOLUME_RAID_TYPE_MIXED = 21,
+ /**^ This volume contains multiple RAID settings. */
+ LSM_VOLUME_RAID_TYPE_OTHER = 22,
+ /**^ Vendor specific RAID type */
+} lsm_volume_raid_type;
+
+#define LSM_VOLUME_STRIP_SIZE_UNKNOWN 0
+#define LSM_VOLUME_EXTENT_COUNT_UNKNOWN 0
+#define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN 0
+#define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN 0
+
/**
* Admin state for volume, enabled or disabled
*/
diff --git a/c_binding/lsm_datatypes.hpp b/c_binding/lsm_datatypes.hpp
index aed6891..6a6271f 100644
--- a/c_binding/lsm_datatypes.hpp
+++ b/c_binding/lsm_datatypes.hpp
@@ -193,6 +193,7 @@ struct LSM_DLL_LOCAL _lsm_plugin {
struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */
struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */
struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */
+ struct lsm_ops_v1_2 *ops_v1_2; /**< Callbacks for v1.2 ops */
};


diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 37faed4..c57d3de 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -1171,6 +1171,54 @@ int lsm_volume_delete(lsm_connect *c, lsm_volume *volume, char **job,

}

+int lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume,
+ lsm_volume_raid_type * raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ if( LSM_FLAG_UNUSED_CHECK(flags) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ int rc = LSM_ERR_OK;
+ CONN_SETUP(c);
+
+ if( !LSM_IS_VOL(volume) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ if( !raid_type || !strip_size || !extent_count || !min_io_size ||
+ !opt_io_size) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ try {
+ std::map<std::string, Value> p;
+ p["volume"] = volume_to_value(volume);
+ p["flags"] = Value(flags);
+
+ Value parameters(p);
+ Value response;
+
+ rc = rpc(c, "volume_raid_info", parameters, response);
+ if( LSM_ERR_OK == rc ) {
+ //We get a value back, either null or job id.
+ std::vector<Value> j = response.asArray();
+ *raid_type = (lsm_volume_raid_type) j[0].asInt32_t();
+ *strip_size = j[1].asInt32_t();
+ *extent_count = j[2].asInt32_t();
+ *min_io_size = j[3].asInt32_t();
+ *opt_io_size = j[4].asInt32_t();
+ }
+ } catch( const ValueException &ve ) {
+ rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
+ ve.what());
+ }
+ return rc;
+
+}
+
int lsm_iscsi_chap_auth(lsm_connect *c, const char *init_id,
const char *username, const char *password,
const char *out_user, const char *out_password,
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index f5374b9..2cd041f 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -123,6 +123,21 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
return rc;
}

+int lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op,
+ struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op,
+ struct lsm_nas_ops_v1 *nas_op, struct lsm_ops_v1_2 *ops_v1_2)
+{
+ int rc = lsm_register_plugin_v1(
+ plug, private_data, mgm_op, san_op, fs_op, nas_op);
+
+ if (rc != LSM_ERR_OK){
+ return rc;
+ }
+ plug->ops_v1_2 = ops_v1_2;
+ return rc;
+}
+
void *lsm_private_data_get(lsm_plugin_ptr plug)
{
if (!LSM_IS_PLUGIN(plug)) {
@@ -956,6 +971,50 @@ static int handle_volume_disable(lsm_plugin_ptr p, Value &params, Value &respons
return handle_vol_enable_disable(p, params, response, 0);
}

+static int handle_volume_raid_info(lsm_plugin_ptr p, Value &params,
+ Value &response)
+{
+ int rc = LSM_ERR_NO_SUPPORT;
+ if( p && p->ops_v1_2 && p->ops_v1_2->vol_raid_info) {
+ Value v_vol = params["volume"];
+
+ if(IS_CLASS_VOLUME(v_vol) &&
+ LSM_FLAG_EXPECTED_TYPE(params) ) {
+ lsm_volume *vol = value_to_volume(v_vol);
+ std::vector<Value> result;
+
+ if( vol ) {
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size;
+ int32_t extent_count;
+ int32_t min_io_size;
+ int32_t opt_io_size;
+
+ rc = p->ops_v1_2->vol_raid_info(
+ p, vol, &raid_type, &strip_size, &extent_count,
+ &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params));
+
+ if( LSM_ERR_OK == rc ) {
+ result.push_back(Value((int32_t)raid_type));
+ result.push_back(Value(strip_size));
+ result.push_back(Value(extent_count));
+ result.push_back(Value(min_io_size));
+ result.push_back(Value(opt_io_size));
+ response = Value(result);
+ }
+
+ lsm_volume_record_free(vol);
+ } else {
+ rc = LSM_ERR_NO_MEMORY;
+ }
+
+ } else {
+ rc = LSM_ERR_TRANSPORT_INVALID_ARG;
+ }
+ }
+ return rc;
+}
+
static int ag_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2153,7 +2212,8 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("volume_replicate_range", handle_volume_replicate_range)
("volume_resize", handle_volume_resize)
("volumes_accessible_by_access_group", vol_accessible_by_ag)
- ("volumes", handle_volumes);
+ ("volumes", handle_volumes)
+ ("volume_raid_info", handle_volume_raid_info);

static int process_request(lsm_plugin_ptr p, const std::string &method, Value &request,
Value &response)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:36 UTC
Permalink
* Introduced full support of volume_raid_info().
* For sub-pool, use raid info from parent pool.
* For RAID 1 and JBOD, set strip_size, min_io_size, and opt_io_size
as block size(512).
* For other RAID, calculate out opt_io_size with data disk count.
* For RAID_TYPE_MIXED, raise PLUGIN_BUG LsmError.

* Replaced PoolRAID.RAID_TYPE_RAID_XXX with Volume.RAID_TYPE_RAID_XXX.

* Replaced PoolRAID.RAID_TYPE_NOT_APPLICABLE with Volume.RAID_TYPE_OTHER.

* Bumped simulator data version to 3.1 due to previous Volume.RAID_TYPE_OTHER
change.

Changes in V4(No changes in V2 and V3):

* Removed unneeded import 'Volume' in simulator.py.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/sim/simarray.py | 151 +++++++++++++++++++++++++++++++-----------------
plugin/sim/simulator.py | 3 +
2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index 73f4492..d4feb7f 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -67,26 +67,6 @@ def _random_vpd():


class PoolRAID(object):
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_DISK = 1
MEMBER_TYPE_DISK_MIX = 10
@@ -136,37 +116,37 @@ class PoolRAID(object):
return PoolRAID.MEMBER_TYPE_UNKNOWN

_RAID_DISK_CHK = {
- RAID_TYPE_JBOD: lambda x: x > 0,
- RAID_TYPE_RAID0: lambda x: x > 0,
- RAID_TYPE_RAID1: lambda x: x == 2,
- RAID_TYPE_RAID3: lambda x: x >= 3,
- RAID_TYPE_RAID4: lambda x: x >= 3,
- RAID_TYPE_RAID5: lambda x: x >= 3,
- RAID_TYPE_RAID6: lambda x: x >= 4,
- RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
- RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_JBOD: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID0: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID1: lambda x: x == 2,
+ Volume.RAID_TYPE_RAID3: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID4: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID5: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID6: lambda x: x >= 4,
+ Volume.RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
}

_RAID_PARITY_DISK_COUNT_FUNC = {
- RAID_TYPE_JBOD: lambda x: x,
- RAID_TYPE_RAID0: lambda x: x,
- RAID_TYPE_RAID1: lambda x: 1,
- RAID_TYPE_RAID3: lambda x: x - 1,
- RAID_TYPE_RAID4: lambda x: x - 1,
- RAID_TYPE_RAID5: lambda x: x - 1,
- RAID_TYPE_RAID6: lambda x: x - 2,
- RAID_TYPE_RAID10: lambda x: x / 2,
- RAID_TYPE_RAID15: lambda x: x / 2 - 1,
- RAID_TYPE_RAID16: lambda x: x / 2 - 2,
- RAID_TYPE_RAID50: lambda x: x - 2,
- RAID_TYPE_RAID60: lambda x: x - 4,
- RAID_TYPE_RAID51: lambda x: x / 2 - 1,
- RAID_TYPE_RAID61: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_JBOD: lambda x: x,
+ Volume.RAID_TYPE_RAID0: lambda x: x,
+ Volume.RAID_TYPE_RAID1: lambda x: 1,
+ Volume.RAID_TYPE_RAID3: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID4: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID5: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID6: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID10: lambda x: x / 2,
+ Volume.RAID_TYPE_RAID15: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID16: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_RAID50: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID60: lambda x: x - 4,
+ Volume.RAID_TYPE_RAID51: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID61: lambda x: x / 2 - 2,
}

@staticmethod
@@ -191,7 +171,7 @@ class PoolRAID(object):


class BackStore(object):
- VERSION = "3.0"
+ VERSION = "3.1"
VERSION_SIGNATURE = 'LSM_SIMULATOR_DATA_%s_%s' % (VERSION, md5(VERSION))
JOB_DEFAULT_DURATION = 1
JOB_DATA_TYPE_VOL = 1
@@ -201,6 +181,7 @@ class BackStore(object):
SYS_ID = "sim-01"
SYS_NAME = "LSM simulated storage plug-in"
BLK_SIZE = 512
+ STRIP_SIZE = 131072 # 128 KiB

_LIST_SPLITTER = '#'

@@ -724,7 +705,7 @@ class BackStore(object):

pool_1_id = self.sim_pool_create_from_disk(
name='Pool 1',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=pool_1_disks,
element_type=Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
@@ -744,7 +725,7 @@ class BackStore(object):

self.sim_pool_create_from_disk(
name='Pool 3',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=ssd_pool_disks,
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
@@ -755,7 +736,7 @@ class BackStore(object):
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
Pool.ELEMENT_TYPE_DELTA,
- raid_type=PoolRAID.RAID_TYPE_RAID0,
+ raid_type=Volume.RAID_TYPE_RAID0,
sim_disk_ids=test_pool_disks)

self._data_add(
@@ -1009,13 +990,23 @@ class BackStore(object):
'status_info': '',
'element_type': element_type,
'unsupported_actions': unsupported_actions,
- 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': Volume.RAID_TYPE_OTHER,
'member_type': PoolRAID.MEMBER_TYPE_POOL,
'parent_pool_id': parent_pool_id,
'total_space': size,
})
return self.lastrowid

+ def sim_pool_disks_count(self, sim_pool_id):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE owner_pool_id=%s;" %
+ sim_pool_id)[0][0]
+
+ def sim_pool_data_disks_count(self, sim_pool_id=None):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE "
+ "owner_pool_id=%s and role='DATA';" % sim_pool_id)[0][0]
+
def sim_vols(self, sim_ag_id=None):
"""
Return a list of sim_vol dict.
@@ -2231,3 +2222,57 @@ class SimArray(object):
@_handle_errors
def target_ports(self):
return list(SimArray._sim_tgt_2_lsm(t) for t in self.bs_obj.sim_tgts())
+
+ @_handle_errors
+ def volume_raid_info(self, lsm_vol):
+ sim_pool = self.bs_obj.sim_pool_of_id(
+ SimArray._lsm_id_to_sim_id(
+ lsm_vol.pool_id,
+ LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")))
+
+ raid_type = sim_pool['raid_type']
+ strip_size = Volume.STRIP_SIZE_UNKNOWN
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = Volume.OPT_IO_SIZE_UNKNOWN
+ extent_count = Volume.EXTENT_COUNT_UNKNOWN
+
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
+ parent_sim_pool = self.bs_obj.sim_pool_of_id(
+ sim_pool['parent_pool_id'])
+ raid_type = parent_sim_pool['raid_type']
+
+ extent_count = self.bs_obj.sim_pool_disks_count(
+ parent_sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ parent_sim_pool['id'])
+ else:
+ extent_count = self.bs_obj.sim_pool_disks_count(
+ sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_UNKNOWN or \
+ raid_type == Volume.RAID_TYPE_OTHER:
+ return [
+ raid_type, strip_size, extent_count, min_io_size,
+ opt_io_size]
+
+ if raid_type == Volume.RAID_TYPE_MIXED:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unsupported RAID_TYPE_MIXED pool "
+ "%s" % sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_RAID1 or \
+ raid_type == Volume.RAID_TYPE_JBOD:
+ strip_size = BackStore.BLK_SIZE
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = BackStore.BLK_SIZE
+ else:
+ strip_size = BackStore.STRIP_SIZE
+ min_io_size = BackStore.STRIP_SIZE
+ opt_io_size = int(data_disk_count * BackStore.STRIP_SIZE)
+
+ return [
+ raid_type, strip_size, extent_count, min_io_size,
+ opt_io_size]
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index 8f7adfc..d562cd6 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -289,3 +289,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
return search_property(
[SimPlugin._sim_data_2_lsm(t) for t in sim_tgts],
search_key, search_value)
+
+ def volume_raid_info(self, volume, flags=0):
+ return self.sim_array.volume_raid_info(volume)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:37 UTC
Permalink
* Simply set XXX_UNKNOWN on output parameter.

V2:
- Add call to register volume_raid_info
- Add LSM_CAP_VOLUMERAID_INFO to capabilties

Changes in V3:
* Use lsm_register_plugin_v1_2() to register lsm_volume_raid_info() support.

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/simc/simc_lsmplugin.c | 33 ++++++++++++++++++++++++++++++---
1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index 7c4d287..987d096 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -391,6 +391,7 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_EXPORTS,
LSM_CAP_EXPORT_FS,
LSM_CAP_EXPORT_REMOVE,
+ LSM_CAP_VOLUME_RAID_INFO,
-1
);

@@ -956,6 +957,33 @@ static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
return rc;
}

+static int volume_raid_info(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
+ struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume));
+
+ if( !av) {
+ rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME,
+ "volume not found!");
+ }
+
+ *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN;
+ *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN;
+ *extent_count = LSM_VOLUME_EXTENT_COUNT_UNKNOWN;
+ *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN;
+ *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN;
+ return rc;
+}
+
+static struct lsm_ops_v1_2 ops_v1_2 = {
+ volume_raid_info
+};
+
static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume *v,
lsm_flag flags)
{
@@ -1527,7 +1555,6 @@ static struct lsm_san_ops_v1 san_ops = {
list_targets
};

-
static int fs_list(lsm_plugin_ptr c, const char *search_key,
const char *search_value, lsm_fs **fs[], uint32_t *count,
lsm_flag flags)
@@ -2243,8 +2270,8 @@ int load( lsm_plugin_ptr c, const char *uri, const char *password,
_unload(pd);
pd = NULL;
} else {
- rc = lsm_register_plugin_v1( c, pd, &mgm_ops,
- &san_ops, &fs_ops, &nfs_ops);
+ rc = lsm_register_plugin_v1_2(
+ c, pd, &mgm_ops, &san_ops, &fs_ops, &nfs_ops, &ops_v1_2);
}
}
return rc;
--
1.8.3.1
Gris Ge
2015-03-01 10:22:38 UTC
Permalink
* New command:
lsmcli volume-raid-info --vol <VOL_ID>

* New alias:
lsmcli vri == lsmcli volume-raid-info

Changes in V2:
* Fix output format when volume not found passed to _get_item() in
volume_raid_info()

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/cmdline.py | 18 +++++++++++++-
tools/lsmcli/data_display.py | 58 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index a781314..980b3a0 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,7 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,

from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- vol_provision_str_to_type, vol_rep_type_str_to_type)
+ vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo)


## Wraps the invocation to the command line
@@ -368,6 +368,14 @@ cmds = (
),

dict(
+ name='volume-raid-info',
+ help='Query volume RAID infomation',
+ args=[
+ dict(vol_id_opt),
+ ],
+ ),
+
+ dict(
name='access-group-create',
help='Create an access group',
args=[
@@ -628,6 +636,7 @@ aliases = (
['aa', 'access-group-add'],
['ar', 'access-group-remove'],
['ad', 'access-group-delete'],
+ ['vri', 'volume-raid-info'],
)


@@ -1318,6 +1327,13 @@ class CmdLine:
self._wait_for_it("volume-dependant-rm",
self.c.volume_child_dependency_rm(v), None)

+ def volume_raid_info(self, args):
+ lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume")
+ self.display_data(
+ [
+ VolumeRAIDInfo(
+ lsm_vol.id, *self.c.volume_raid_info(lsm_vol))])
+
## Displays file system dependants
def fs_dependants(self, args):
fs = _get_item(self.c.fs(), args.fs, "File System")
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 285a14f..6dd5ffa 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -243,6 +243,41 @@ class PlugData(object):
self.version = plugin_version


+class VolumeRAIDInfo(object):
+ _RAID_TYPE_MAP = {
+ Volume.RAID_TYPE_RAID0: 'RAID0',
+ Volume.RAID_TYPE_RAID1: 'RAID1',
+ Volume.RAID_TYPE_RAID3: 'RAID3',
+ Volume.RAID_TYPE_RAID4: 'RAID4',
+ Volume.RAID_TYPE_RAID5: 'RAID5',
+ Volume.RAID_TYPE_RAID6: 'RAID6',
+ Volume.RAID_TYPE_RAID10: 'RAID10',
+ Volume.RAID_TYPE_RAID15: 'RAID15',
+ Volume.RAID_TYPE_RAID16: 'RAID16',
+ Volume.RAID_TYPE_RAID50: 'RAID50',
+ Volume.RAID_TYPE_RAID60: 'RAID60',
+ Volume.RAID_TYPE_RAID51: 'RAID51',
+ Volume.RAID_TYPE_RAID61: 'RAID61',
+ Volume.RAID_TYPE_JBOD: 'JBOD',
+ Volume.RAID_TYPE_MIXED: 'MIXED',
+ Volume.RAID_TYPE_OTHER: 'OTHER',
+ Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ }
+
+ def __init__(self, vol_id, raid_type, strip_size, extent_count,
+ min_io_size, opt_io_size):
+ self.vol_id = vol_id
+ self.raid_type = raid_type
+ self.strip_size = strip_size
+ self.extent_count = extent_count
+ self.min_io_size = min_io_size
+ self.opt_io_size = opt_io_size
+
+ @staticmethod
+ def raid_type_to_str(raid_type):
+ return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP)
+
+
class DisplayData(object):

def __init__(self):
@@ -498,6 +533,29 @@ class DisplayData(object):
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}

+ VOL_RAID_INFO_HEADER = OrderedDict()
+ VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID'
+ VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type'
+ VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size'
+ VOL_RAID_INFO_HEADER['extent_count'] = 'Extent Count'
+ VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size'
+ VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size'
+
+ VOL_RAID_INFO_COLUMN_SKIP_KEYS = []
+
+ VOL_RAID_INFO_VALUE_CONV_ENUM = {
+ 'raid_type': VolumeRAIDInfo.raid_type_to_str,
+ }
+ VOL_RAID_INFO_VALUE_CONV_HUMAN = [
+ 'strip_size', 'min_io_size', 'opt_io_size']
+
+ VALUE_CONVERT[VolumeRAIDInfo] = {
+ 'headers': VOL_RAID_INFO_HEADER,
+ 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS,
+ 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN,
+ }
+
@staticmethod
def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human,
flag_human, flag_enum):
--
1.8.3.1
Gris Ge
2015-03-01 10:22:39 UTC
Permalink
* Simply run that command and check the volume ID of output.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/cmdtest.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/test/cmdtest.py b/test/cmdtest.py
index b603601..e80e027 100755
--- a/test/cmdtest.py
+++ b/test/cmdtest.py
@@ -676,6 +676,25 @@ def search_test(cap, system_id):
volume_delete(vol_id)
return

+def volume_raid_info_test(cap, system_id):
+ if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']:
+ test_pool_id = name_to_id(OP_POOL, test_pool_name)
+
+ if test_pool_id is None:
+ print 'Pool %s is not available!' % test_pool_name
+ exit(10)
+
+ vol_id = create_volume(test_pool_id)
+ out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1]
+ r = parse(out)
+ if len(r[0]) != 6:
+ print "volume-raid-info got expected output: %s" % out
+ exit(10)
+ if r[0][0] != vol_id:
+ print "volume-raid-info output volume ID is not requested " \
+ "volume ID %s" % out
+ exit(10)
+ return

def run_all_tests(cap, system_id):
test_display(cap, system_id)
@@ -688,6 +707,8 @@ def run_all_tests(cap, system_id):

search_test(cap, system_id)

+ volume_raid_info_test(cap, system_id)
+
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--command", action="store", type="string",
--
1.8.3.1
Gris Ge
2015-03-01 10:22:40 UTC
Permalink
* Simply invoke lsm_volume_raid_info() with no additional test.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)

diff --git a/test/tester.c b/test/tester.c
index 2edd18c..6cae568 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -2858,6 +2858,35 @@ START_TEST(test_volume_vpd_check)
}
END_TEST

+START_TEST(test_volume_raid_info)
+{
+ lsm_volume *volume = NULL;
+ char *job = NULL;
+ lsm_pool *pool = get_test_pool(c);
+
+ int rc = lsm_volume_create(
+ c, pool, "volume_raid_info_test", 20000000,
+ LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD);
+
+ fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED,
+ "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c)));
+
+ if( LSM_ERR_JOB_STARTED == rc ) {
+ volume = wait_for_job_vol(c, &job);
+ }
+
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size, extent_count, min_io_size, opt_io_size;
+
+ G(
+ rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size,
+ &extent_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD);
+
+ G(rc, lsm_volume_record_free, volume);
+ volume = NULL;
+}
+END_TEST
+
Suite * lsm_suite(void)
{
Suite *s = suite_create("libStorageMgmt");
@@ -2893,6 +2922,7 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_ss);
tcase_add_test(basic, test_nfs_exports);
tcase_add_test(basic, test_invalid_input);
+ tcase_add_test(basic, test_volume_raid_info);

suite_add_tcase(s, basic);
return s;
--
1.8.3.1
Gris Ge
2015-03-01 10:22:41 UTC
Permalink
* Treating each MegaRAID DG(disk group) as LSM pool.
* Based on storcli output of:
storcli /c0/dall show all J

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -115,6 +115,47 @@ def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict):
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)


+def _mega_size_to_lsm(mega_size):
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ if re_match:
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+def _pool_status_of(dg_top):
+ """
+ Return status
+ """
+ if dg_top['State'] in _POOL_STATUS_MAP.keys():
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+def _pool_id_of(dg_id, sys_id):
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -217,7 +258,11 @@ class MegaRAID(IPlugin):
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ if real_data and 'Response Data' in real_data.keys():
+ return real_data['Response Data']
+
+ return real_data
else:
return output

@@ -317,7 +362,55 @@ class MegaRAID(IPlugin):

return search_property(rc_lsm_disks, search_key, search_value)

+ @staticmethod
+ def _dg_free_size(dg_num, free_space_list):
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ for free_space in free_space_list:
+ if int(free_space['DG']) == int(dg_num):
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num):
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ if status == Pool.STATUS_UNKNOWN:
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
flags=Client.FLAG_RSVD):
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ for ctrl_num in range(self._ctrl_count()):
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ for dg_top in dg_show_output['TOPOLOGY']:
+ if dg_top['Arr'] != '-':
+ continue
+ if dg_top['DG'] == '-':
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:42 UTC
Permalink
* Treating MegaRAID VD as LSM Volume.
* Using 'storcli /c0/vall show all' to query all volumes.
* Add new capability: Capabilities.VOLUMES

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 46 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 5e3802b..ae2e953 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin, Pool)
+ Disk, VERSION, search_property, IPlugin, Pool, Volume)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -226,6 +226,7 @@ class MegaRAID(IPlugin):
"System not found")
cap = Capabilities()
cap.set(Capabilities.DISKS)
+ cap.set(Capabilities.VOLUMES)
return cap

def _storcli_exec(self, storcli_cmds, flag_json=True):
@@ -414,3 +415,46 @@ class MegaRAID(IPlugin):
dg_top, free_space_list, ctrl_num))

return search_property(lsm_pools, search_key, search_value)
+
+ @staticmethod
+ def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list,
+ vd_prop_info, vd_path):
+
+ vol_id = "%s:VD%d" % (sys_id, vd_id)
+ name = "VD %d" % vd_id
+ vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information.
+ block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz'])
+ num_of_blocks = vd_prop_info['Number of Blocks']
+ admin_state = Volume.ADMIN_STATE_ENABLED
+ if vd_prop_info['Exposed to OS'] != 'Yes' or \
+ vd_basic_info['Access'] != 'RW':
+ admin_state = Volume.ADMIN_STATE_DISABLED
+ pool_id = _pool_id_of(dg_id, sys_id)
+ plugin_data = vd_path
+ return Volume(
+ vol_id, name, vpd83, block_size, num_of_blocks, admin_state,
+ sys_id, pool_id, plugin_data)
+
+ @_handle_errors
+ def volumes(self, search_key=None, search_value=None, flags=0):
+ lsm_vols = []
+ for ctrl_num in range(self._ctrl_count()):
+ vol_show_output = self._storcli_exec(
+ ["/c%d/vall" % ctrl_num, "show", "all"])
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ for key_name in vol_show_output.keys():
+ if key_name.startswith('/c'):
+ vd_basic_info = vol_show_output[key_name][0]
+ (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/')
+ dg_id = int(dg_id)
+ vd_id = int(vd_id)
+ vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id]
+
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ lsm_vols.append(
+ MegaRAID._vd_to_lsm_vol(
+ vd_id, dg_id, sys_id, vd_basic_info,
+ vd_pd_info_list, vd_prop_info, key_name))
+
+ return search_property(lsm_vols, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-01 10:22:43 UTC
Permalink
* In storcli of MegaRAID, 'Rbld' of disk status indicate this disk
in using for reconstructing pool data.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index ae2e953..83abf63 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -93,6 +93,7 @@ _DISK_STATE_MAP = {
'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK,
'UGood': Disk.STATUS_STOPPED | Disk.STATUS_OK,
'UBad': Disk.STATUS_STOPPED | Disk.STATUS_ERROR,
+ 'Rbld': Disk.STATUS_RECONSTRUCT,
}
--
1.8.3.1
Gris Ge
2015-03-01 10:22:44 UTC
Permalink
* Use 'storcli /c0/v1 show all' command line output to determine
RAID type, strip size and disk count.

* Calculate optimal I/O size by strip size multiple with RAID
data(not mirrot, not parity) disks count.

* Tested query on RAID 0, 1, 5, 10, 50.

* Tested the optimal I/O size on RAID 5:
[***@storageqe-08 ~]# lsmenv mega lsmcli vri --vol SV03403550:VD1
Device alias: mega
URI: megaraid://
lsmcli vri --vol SV03403550:VD1
Volume ID | RAID Type | Strip Size | Extent Count | Minimum I/O Size | Optimal I/O Size
--------------------------------------------------------------------------------------------
SV03403550:VD1 | RAID5 | 131072 | 5 | 131072 | 524288

Time: 0:00.29
[***@storageqe-08 ~]# dd if=/dev/urandom of=test.img bs=1M count=1000
1000+0 records in
1000+0 records out
1048576000 bytes (1.0 GB) copied, 153.174 s, 6.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 58.9573 s, 17.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 37.7282 s, 27.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 35.3351 s, 29.7 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 70.0779 s, 15.0 MB/s

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 83abf63..e754cd8 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -157,6 +157,33 @@ def _pool_id_of(dg_id, sys_id):
return "%s:DG%s" % (sys_id, dg_id)


+_RAID_TYPE_MAP = {
+ 'RAID0': Volume.RAID_TYPE_RAID0,
+ 'RAID1': Volume.RAID_TYPE_RAID1,
+ 'RAID5': Volume.RAID_TYPE_RAID5,
+ 'RAID6': Volume.RAID_TYPE_RAID6,
+ 'RAID00': Volume.RAID_TYPE_RAID0,
+ # Some MegaRAID only support max 16 disks in each span.
+ # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000.
+ # All of them are considered as RAID0
+ 'RAID10': Volume.RAID_TYPE_RAID10,
+ 'RAID50': Volume.RAID_TYPE_RAID50,
+ 'RAID60': Volume.RAID_TYPE_RAID60,
+}
+
+
+def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info):
+ raid_type = _RAID_TYPE_MAP.get(
+ vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN)
+
+ # In LSI, four disks or more RAID1 is actually a RAID10.
+ if raid_type == Volume.RAID_TYPE_RAID1 and \
+ int(vd_prop_info['Number of Drives Per Span']) >= 4:
+ raid_type = Volume.RAID_TYPE_RAID10
+
+ return raid_type
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -459,3 +486,52 @@ class MegaRAID(IPlugin):
vd_pd_info_list, vd_prop_info, key_name))

return search_property(lsm_vols, search_key, search_value)
+
+ @_handle_errors
+ def volume_raid_info(self, volume, flags=Client.FLAG_RSVD):
+ if not volume.plugin_data:
+ raise LsmError(
+ ErrorNumber.INVALID_ARGUMENT,
+ "Ilegal input volume argument: missing plugin_data property")
+
+ vd_path = volume.plugin_data
+ vol_show_output = self._storcli_exec([vd_path, "show", "all"])
+ vd_basic_info = vol_show_output[vd_path][0]
+ vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info)
+ strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size'])
+ disk_count = (
+ int(vd_prop_info['Number of Drives Per Span']) *
+ int(vd_prop_info['Span Depth']))
+ if raid_type == Volume.RAID_TYPE_RAID0:
+ strip_count = disk_count
+ elif raid_type == Volume.RAID_TYPE_RAID1:
+ strip_count = 1
+ elif raid_type == Volume.RAID_TYPE_RAID5:
+ strip_count = disk_count - 1
+ elif raid_type == Volume.RAID_TYPE_RAID6:
+ strip_count = disk_count - 2
+ elif raid_type == Volume.RAID_TYPE_RAID50:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 1) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID60:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 2) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID10:
+ strip_count = (
+ int(vd_prop_info['Number of Drives Per Span']) / 2 *
+ int(vd_prop_info['Span Depth']))
+ else:
+ # MegaRAID does not support 15 or 16 yet.
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unexpected RAID type: %s" %
+ vd_basic_info['TYPE'])
+
+ return [
+ raid_type, strip_size, disk_count, strip_size,
+ strip_size * strip_count]
--
1.8.3.1
Gris Ge
2015-03-01 10:22:45 UTC
Permalink
* NetApp ONTAP strip size(minimum I/O size) is 4KiB, stripe size(
optimal I/O size) is 64KiB. Both are unchangeable.

* The extent count(disk count) is taken from aggregate 'disk-count'
property.

* Changed Filer.aggregates() to accept an optional argument 'aggr_name'
which query defined aggregate only.

* Uncommented and updated the old code for converting NetApp RAID level to
libstoragemgmt RAID level.

* Tested on ONTAP simulator 8.1.1 7-mode and real ONTAP 8.0.2 7-mode.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/ontap/na.py | 8 ++++++--
plugin/ontap/ontap.py | 55 +++++++++++++++++++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 1e015ba..b68577c 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -231,11 +231,15 @@ class Filer(object):
disks = self._invoke('disk-list-info')
return disks['disk-details']['disk-detail-info']

- def aggregates(self):
+ def aggregates(self, aggr_name=None):
"""
Return a list of aggregates
+ If aggr_name provided, return [na_aggr]
"""
- pools = self._invoke('aggr-list-info')
+ if aggr_name:
+ pools = self._invoke('aggr-list-info', {'aggregate': aggr_name})
+ else:
+ pools = self._invoke('aggr-list-info')
tmp = pools['aggregates']['aggr-info']
return to_list(tmp)

diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index c2a2c58..e175a71 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -121,6 +121,10 @@ class Ontap(IStorageAreaNetwork, INfs):
'restricted': 'volume is restricted to protocol accesses',
}

+ # strip size: http://www.netapp.com/us/media/tr-3001.pdf
+ _STRIP_SIZE = 4096
+ _OPT_IO_SIZE = 65536
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -310,19 +314,6 @@ class Ontap(IStorageAreaNetwork, INfs):
return search_property(
[self._lun(l) for l in luns], search_key, search_value)

-# @staticmethod
-# def _raid_type_of_na_aggr(na_aggr):
-# na_raid_statuses = na_aggr['raid-status'].split(',')
-# if 'raid0' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID0
-# if 'raid4' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID4
-# if 'raid_dp' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID6
-# if 'mixed_raid_type' in na_raid_statuses:
-# return Pool.RAID_TYPE_MIXED
-# return Pool.RAID_TYPE_UNKNOWN
-
# This is based on NetApp ONTAP Manual pages:
# https://library.netapp.com/ecmdocs/ECMP1196890/html/man1/na_aggr.1.html
_AGGR_RAID_STATUS_CONV = {
@@ -1290,3 +1281,41 @@ class Ontap(IStorageAreaNetwork, INfs):
self.sys_info.id))

return search_property(tp, search_key, search_value)
+
+ @staticmethod
+ def _raid_type_of_na_aggr(na_aggr):
+ na_raid_statuses = na_aggr['raid-status'].split(',')
+ if 'mixed_raid_type' in na_raid_statuses:
+ return Volume.RAID_TYPE_MIXED
+ elif 'raid0' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID0
+ elif 'raid4' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID4
+ elif 'raid_dp' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID6
+ return Pool.RAID_TYPE_UNKNOWN
+
+ @handle_ontap_errors
+ def volume_raid_info(self, volume, flags=0):
+ na_vol_name = Ontap._get_volume_from_path(volume.pool_id)
+ na_vol = self.f.volumes(volume_name=na_vol_name)
+ if len(na_vol) == 0:
+ # If parent pool not found, then this LSM volume should not exist.
+ raise LsmError(
+ ErrorNumber.NOT_FOUND_VOLUME,
+ "Volume not found")
+ if len(na_vol) != 1:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got 2+ na_vols from self.f.volumes() "
+ "%s" % na_vol)
+
+ na_vol = na_vol[0]
+ na_aggr_name = na_vol['containing-aggregate']
+ na_aggr = self.f.aggregates(aggr_name=na_aggr_name)[0]
+ raid_type = Ontap._raid_type_of_na_aggr(na_aggr)
+ extent_count = int(na_aggr['disk-count'])
+
+ return [
+ raid_type, Ontap._STRIP_SIZE, extent_count, Ontap._STRIP_SIZE,
+ Ontap._OPT_IO_SIZE]
--
1.8.3.1
Tony Asleson
2015-03-02 22:52:48 UTC
Permalink
Post by Gris Ge
* Changed the value of these constants from -1 to 0 to align with
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN
Hi Gris,

Looking at the API addition, I see that the data sizes are all int32_t,
which leads me to the following questions:

1. Should we change to unsigned as we changed from -1 to 0 for default?
2. Should we increase the size of the returned values in
lsm_volume_raid_info, specifically extent_count. For example using a
LVM PE size of 4K, 2**31 gets you to 8T size, which is a couple desktop
drives. I'm thinking we should bump this to uint64_t, the others seem
safe, what do you think?

Thanks!

Regards,
Tony
Gris Ge
2015-03-03 12:50:55 UTC
Permalink
Post by Tony Asleson
Hi Gris,
Looking at the API addition, I see that the data sizes are all int32_t,
1. Should we change to unsigned as we changed from -1 to 0 for default?
Yes. That's a good point.
Post by Tony Asleson
2. Should we increase the size of the returned values in
lsm_volume_raid_info, specifically extent_count. For example using a
LVM PE size of 4K, 2**31 gets you to 8T size, which is a couple desktop
drives. I'm thinking we should bump this to uint64_t, the others seem
safe, what do you think?
Ahh, that's the confusing I am trying to avoid. The 'extent_count' in
lsm means how many disks or other storage object assembled the RAID
group. For LVM, it means how many PV are used to create certain LV not
how many PE are used.

I am changing it back 'disk_count' which could be less confusing if
that's OK to you.

For future support of LVM RAID and EMC VMAX, we still count the
physical disks, not PE(some user might use 2+ PEs from one disk in
one VG for LVM RAID, they should only counted as 1 disk).
Post by Tony Asleson
Thanks!
Regards,
Tony
--
Gris Ge
Gris Ge
2015-03-03 15:28:47 UTC
Permalink
Post by Gris Ge
Post by Tony Asleson
2. Should we increase the size of the returned values in
lsm_volume_raid_info, specifically extent_count. For example using a
LVM PE size of 4K, 2**31 gets you to 8T size, which is a couple desktop
drives. I'm thinking we should bump this to uint64_t, the others seem
safe, what do you think?
Ahh, that's the confusing I am trying to avoid. The 'extent_count' in
lsm means how many disks or other storage object assembled the RAID
group. For LVM, it means how many PV are used to create certain LV not
how many PE are used.
I am changing it back 'disk_count' which could be less confusing if
that's OK to you.
No. 'disk_count' is not a proper word here. User might confused when
getting RAID 1 with disk_count as 1 if some one sliced a disk as two
for RAID 1.

Currently, I got 'strip_count_per_stripe' or the SMI-S 'stripe_length'.

Any idea?
--
Gris Ge
Tony Asleson
2015-03-03 15:55:16 UTC
Permalink
Post by Gris Ge
Post by Gris Ge
Post by Tony Asleson
2. Should we increase the size of the returned values in
lsm_volume_raid_info, specifically extent_count. For example using a
LVM PE size of 4K, 2**31 gets you to 8T size, which is a couple desktop
drives. I'm thinking we should bump this to uint64_t, the others seem
safe, what do you think?
Ahh, that's the confusing I am trying to avoid. The 'extent_count' in
lsm means how many disks or other storage object assembled the RAID
group. For LVM, it means how many PV are used to create certain LV not
how many PE are used.
I am changing it back 'disk_count' which could be less confusing if
that's OK to you.
No. 'disk_count' is not a proper word here. User might confused when
getting RAID 1 with disk_count as 1 if some one sliced a disk as two
for RAID 1.
Having a RAID 1 on a single disk kind of negates the reason for having a
mirror. In my opinion, a user knowing that they are doing a mirror on a
single device is valuable information. LVM suggests that you only
create one PV per disk to avoid this this scenario.
Post by Gris Ge
Currently, I got 'strip_count_per_stripe' or the SMI-S 'stripe_length'.
Any idea?
To me stripe_length seems redundant with strip_size. What is it you are
trying to convey?

Your original comment:
"Count of disks or other storage extents in this RAID group"

This is why I raised the original question about variable size and
extents. Maybe simply say device_count, "Number of physically separate
devices which are used to store the data".

Regards,
Tony
Gris Ge
2015-03-04 08:57:17 UTC
Permalink
Post by Tony Asleson
Post by Gris Ge
Post by Gris Ge
I am changing it back 'disk_count' which could be less confusing if
that's OK to you.
No. 'disk_count' is not a proper word here. User might confused when
getting RAID 1 with disk_count as 1 if some one sliced a disk as two
for RAID 1.
Having a RAID 1 on a single disk kind of negates the reason for having a
mirror. In my opinion, a user knowing that they are doing a mirror on a
single device is valuable information. LVM suggests that you only
create one PV per disk to avoid this this scenario.
Post by Gris Ge
Currently, I got 'strip_count_per_stripe' or the SMI-S 'stripe_length'.
Any idea?
To me stripe_length seems redundant with strip_size. What is it you are
trying to convey?
"Count of disks or other storage extents in this RAID group"
This is why I raised the original question about variable size and
extents. Maybe simply say device_count, "Number of physically separate
devices which are used to store the data".
Regards,
Tony
Hi Tony,

Thanks for the suggestions.
After careful review, I finally goes to 'disk_count' as it fit the most
common expectation on RAID. Most of the RAID user will know what this
property/value means without any additional explanation.

For those using LVM RAID, EMC VMAX or other special RAID system(if we
support them on this method in the future), the API document
explains how this value been calculated.

I will send out the V5 patch and we could continue this topic on that.

Best regards.
--
Gris Ge
Gris Ge
2015-03-04 09:09:18 UTC
Permalink
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.

* These plugins support this new method:
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID

* The C library part might be buggy considering my C skill set.

* Potential support by other plugin:
* Targetd:
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
* SMI-S:
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
* ONTAP:
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
* Nstor:
No document found about strip settings.

* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.

Changes in V2:
* Patch 6/13 and 10/13:
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
* Patch 10/13:
Add missing capability LSM_CAP_VOLUME_RAID_INFO

Changes in V3:
* Patch 6/13:
Another approach to register newly added API with full backward
compatibility:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Patch 9/13:
Full volume_raid_info() support in simulator plugin.

Changes in V4:

* Included Tony's bug fix patch for C API:
[PATCH] lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

* Included ONTAP plugin support.

* Patch 4/15 and 5/15:
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

* Patch 5/15:
* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.

* Patch 6/15:
* Removed unneeded import 'Volume' from simulator.py.

Changes in V5:

* Change output argument name from 'extent_count' to 'disk_count' as
it fit the most common expectation on RAID. Please check patch 3/15
for detail.

* Change constant name:
* 'Volume.EXTENT_COUNT_UNKNOWN' -> 'Volume.DISK_COUNT_UNKNOWN'
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN -> LSM_VOLUME_DISK_COUNT_UNKNOWN
* Change C API these output arguments data type from 'int32_t' to 'uint32_t':
* strip_size
* disk_count
* min_io_size
* opt_io_size

* Sync plugins for this change.

Gris Ge (13):
Python Library: Fix decorator problem with docstrings
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
Simulator Plugin: Add volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
lsmcli: Add volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
MegaRAID Plugin: Add volume_raid_info() support.
ONTAP Plugin: Add volume_raid_info() support.

Tony Asleson (1):
lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 ++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++
c_binding/lsm_plugin_ipc.cpp | 86 ++++++--
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/ontap/na.py | 8 +-
plugin/ontap/ontap.py | 55 ++++--
plugin/sim/simarray.py | 149 +++++++++-----
plugin/sim/simulator.py | 3 +
plugin/simc/simc_lsmplugin.c | 33 +++-
python_binding/lsm/_client.py | 103 ++++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
20 files changed, 909 insertions(+), 88 deletions(-)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:19 UTC
Permalink
From: Tony Asleson <***@redhat.com>

In a number of places in the fs handlers were were checking
if san_ops was valid before dereferencing a fs_ops pointer.

This is clearly wrong and will result is a seg fault if a
plugin implements fs operations and not block operations.

Signed-off-by: Tony Asleson <***@redhat.com>
Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/lsm_plugin_ipc.cpp | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index 7e0d034..f5374b9 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -1371,7 +1371,7 @@ static int fs(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->fs_ops->fs_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_list ) {
if( LSM_FLAG_EXPECTED_TYPE(params) &&
((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK )) {

@@ -1407,7 +1407,7 @@ static int fs_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_create ) {

Value v_pool = params["pool"];
Value v_name = params["name"];
@@ -1459,7 +1459,7 @@ static int fs_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_delete ) {

Value v_fs = params["fs"];

@@ -1493,7 +1493,7 @@ static int fs_resize(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_resize ) {
+ if( p && p->fs_ops && p->fs_ops->fs_resize ) {

Value v_fs = params["fs"];
Value v_size = params["new_size_bytes"];
@@ -1541,7 +1541,7 @@ static int fs_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_clone ) {

Value v_src_fs = params["src_fs"];
Value v_name = params["dest_fs_name"];
@@ -1597,7 +1597,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_OK;

- if( p && p->san_ops && p->fs_ops->fs_file_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_file_clone ) {

Value v_fs = params["fs"];
Value v_src_name = params["src_file_name"];
@@ -1648,7 +1648,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1686,7 +1686,7 @@ static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency_rm ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency_rm ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1725,7 +1725,7 @@ static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &respon
static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_list ) {

Value v_fs = params["fs"];

@@ -1766,7 +1766,7 @@ static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_create ) {

Value v_fs = params["fs"];
Value v_ss_name = params["snapshot_name"];
@@ -1814,7 +1814,7 @@ static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_delete ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
@@ -1851,7 +1851,7 @@ static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_restore(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_restore ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_restore ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
--
1.8.3.1
Gris Ge
2015-03-04 09:09:20 UTC
Permalink
* With decorator, the docstring of original method will be deleted.
* Use functools.wraps() to keep the docstring of original method.
Check
http://stackoverflow.com/questions/1782843/python-decorator-problem-with-docstrings
for detail.

* With this fix user can check method help message in interactive python with
command:
help(lsm.Client.volume_create)

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_common.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index f2fd568..4c87661 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -533,6 +533,7 @@ def return_requires(*types):
is quite important.
"""
def outer(func):
+ @functools.wraps(func)
def inner(*args, **kwargs):
r = func(*args, **kwargs)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:21 UTC
Permalink
* The docstring of lsm.Client.volume_raid_info() contains full detail
about this new method. Quick info:
Usage:
volume_raid_info(self, volume, flags=0)
Returns:
[raid_type, strip_size, extent_count, min_io_size, opt_io_size]
# strip_size is the size of strip on each disk/extent
# extent_count is the disk/extent count.
# min_io_size is minimum I/O size. Also the preferred I/O size
# of random I/O.
# opt_io_size is optimal I/O size. Also the preferred I/O size
# of sequential I/O.

* Why not use 'pool_raid_info' instead?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not implementing RAID
at pool level but at volume level.

* Why use 'extent_count' instead of 'disk_count'?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not using disk
directly to assemble RAID group.

* Why we need 'min_io_size' and 'opt_io_size' when we have 'extent_count'
and 'strip_size'?
Normally, min_io_size is strip_size, opt_io_size could be calculated by
raid_type, strip_size and extent_count. But on NetApp, I/O test[1]
indicate their optimal I/O size is 64KiB no matter how many disks in
the RAID group. It might[2] because NetApp created a WAFL filesystem on
RAID group which changed the optimal I/O size.

In general, the optimal I/O size or min_io_size of some RAID system
might not base on strip size and RAID disk/extent count.
We'd better expose those information directly instead forcing user
to guess from strip size and disk/extent count.

* New constants:
Volume.RAID_TYPE_UNKNOWN
# The plugin failed to detect the volume's RAID type.
Volume.RAID_TYPE_RAID0
# Stripe
Volume.RAID_TYPE_RAID1
# Mirror for two disks. For 4 disks or more, they are RAID10.
Volume.RAID_TYPE_RAID3
# Byte-level striping with dedicated parity
Volume.RAID_TYPE_RAID4
# Block-level striping with dedicated parity
Volume.RAID_TYPE_RAID5
# Block-level striping with distributed parity
Volume.RAID_TYPE_RAID6
# Block-level striping with two distributed parities, aka, RAID-DP
Volume.RAID_TYPE_RAID10
# Stripe of mirrors
Volume.RAID_TYPE_RAID15
# Parity of mirrors
Volume.RAID_TYPE_RAID16
# Dual parity of mirrors
Volume.RAID_TYPE_RAID50
# Stripe of parities
Volume.RAID_TYPE_RAID60
# Stripe of dual parities
Volume.RAID_TYPE_RAID51
# Mirror of parities
Volume.RAID_TYPE_RAID61
# Mirror of dual parities
Volume.RAID_TYPE_JBOD
# Just bunch of disks, no parity, no striping.
Volume.RAID_TYPE_MIXED
# This volume contains multiple RAID settings.
Volume.RAID_TYPE_OTHER
# Vendor specific RAID type

Volume.STRIP_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN

* New Capability:
lsm.Volume.VOLUME_RAID_INFO

[1] On a 24 disks RAID6(RAID-DP), 4KiB strip size(not changeable):
* With I/O size 90112(4096 * 22), write speed is 73.4 MB/s
* With I/O size 65536, write speed is 86.9 MB/s
# the optimal_io_size exposed via sysfs from SCSI BLOCK LIMITS(0xB0) VPD

[2] No NetApp official document confirm or deny it. Waiting NetApp's reply.

Changes in V2:
* Add 'New in 1.2' docstring.

Changes in V4(No change in V3):
* Change the value of these constants from -1 to 0 to align with
libblkid/sysfs:
Volume.STRIP_SIZE_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN

Changes in V5:
* Rename the return value 'extent_count' to 'disk_count' as it fit the
most common expectation on RAID.
For LVM RAID, EMC VMAX and other specific disk slice based RAID,
the 'disk_count' here means the slice count.
The constant renamed from EXTENT_COUNT_UNKNOWN to DISK_COUNT_UNKNOWN.

* Updated docstring to explain the 'disk_count' return value when facing
RAID system using disk slice.

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_client.py | 103 ++++++++++++++++++++++++++++++++++++++++++
python_binding/lsm/_data.py | 42 +++++++++++++++++
2 files changed, 145 insertions(+)

diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index e637962..a641b1d 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -971,3 +971,106 @@ class Client(INetworkAttachedStorage):
"""
_check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('target_ports', _del_self(locals()))
+
+ ## Returns the RAID information of certain volume
+ # @param self The this pointer
+ # @param raid_type The RAID type of this volume
+ # @param strip_size The size of strip of disk or other storage
+ # extent.
+ # @param disk_count The count of disks of RAID group(s) where
+ # this volume allocated from.
+ # @param min_io_size The preferred I/O size of random I/O.
+ # @param opt_io_size The preferred I/O size of sequential I/O.
+ # @returns List of target ports, else raises LsmError
+ @_return_requires([int, int, int, int, int])
+ def volume_raid_info(self, volume, flags=FLAG_RSVD):
+ """Query the RAID information of certain volume.
+
+ New in version 1.2.
+
+ Query the RAID type, strip size, extents count, minimum I/O size,
+ optimal I/O size of given volume.
+
+ This method requires this capability:
+ lsm.Capabilities.VOLUME_RAID_INFO
+
+ Args:
+ volume (Volume object): Volume to query
+ flags (int): Reserved for future use. Should be set as
+ lsm.Client.FLAG_RSVD
+ Returns:
+ [raid_type, strip_size, disk_count, min_io_size, opt_io_size]
+
+ raid_type (int): RAID Type of requested volume.
+ Could be one of these values:
+ Volume.RAID_TYPE_RAID0
+ Stripe
+ Volume.RAID_TYPE_RAID1
+ Two disks Mirror
+ Volume.RAID_TYPE_RAID3
+ Byte-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID4
+ Block-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID5
+ Block-level striping with distributed parity
+ Volume.RAID_TYPE_RAID6
+ Block-level striping with two distributed parities,
+ aka, RAID-DP
+ Volume.RAID_TYPE_RAID10
+ Stripe of mirrors
+ Volume.RAID_TYPE_RAID15
+ Parity of mirrors
+ Volume.RAID_TYPE_RAID16
+ Dual parity of mirrors
+ Volume.RAID_TYPE_RAID50
+ Stripe of parities
+ Volume.RAID_TYPE_RAID60
+ Stripe of dual parities
+ Volume.RAID_TYPE_RAID51
+ Mirror of parities
+ Volume.RAID_TYPE_RAID61
+ Mirror of dual parities
+ Volume.RAID_TYPE_JBOD
+ Just bunch of disks, no parity, no striping.
+ Volume.RAID_TYPE_UNKNOWN
+ The plugin failed to detect the volume's RAID type.
+ Volume.RAID_TYPE_MIXED
+ This volume contains multiple RAID settings.
+ Volume.RAID_TYPE_OTHER
+ Vendor specific RAID type
+ strip_size(int): The size of strip on each disk or other storage
+ extent.
+ For RAID1/JBOD, it should be set as sector size.
+ If plugin failed to detect strip size, it should be set
+ as Volume.STRIP_SIZE_UNKNOWN(0).
+ disk_count(int): The count of disks used for assembling the RAID
+ group(s) where this volume allocated from.
+ For any RAID system using the slice of disk, this value
+ indicate how many disk slices are used for the RAID.
+ For exmaple, on LVM RAID, the 'disk_count' here indicate the
+ count of PVs used for certain volume.
+ Another example, on EMC VMAX, the 'disk_count' here indicate
+ how many hyper volumes are used for this volume.
+ For any RAID system using remote LUN for data storing, each
+ remote LUN should be count as a disk.
+ If the plugin failed to detect disk_count, it should be set
+ as Volume.DISK_COUNT_UNKNOWN(0).
+ min_io_size(int): The minimum I/O size, device preferred I/O
+ size for random I/O. Any I/O size not equal to a multiple
+ of this value may get significant speed penalty.
+ Normally it refers to strip size of each disk(extent).
+ If plugin failed to detect min_io_size, it should try these
+ values in the sequence of:
+ logical sector size -> physical sector size ->
+ Volume.MIN_IO_SIZE_UNKNOWN(0).
+ opt_io_size(int): The optimal I/O size, device preferred I/O
+ size for sequential I/O. Normally it refers to RAID group
+ stripe size.
+ If plugin failed to detect opt_io_size, it should be set
+ to Volume.OPT_IO_SIZE_UNKNOWN(0).
+ Raises:
+ LsmError:
+ ErrorNumber.NO_SUPPORT
+ No support.
+ """
+ return self._tp.rpc('volume_raid_info', _del_self(locals()))
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 067c766..6fb2325 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -258,6 +258,46 @@ class Volume(IData):
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1

+ RAID_TYPE_UNKNOWN = -1
+ # The plugin failed to detect the volume's RAID type.
+ RAID_TYPE_RAID0 = 0
+ # Stripe
+ RAID_TYPE_RAID1 = 1
+ # Mirror for two disks. For 4 disks or more, they are RAID10.
+ RAID_TYPE_RAID3 = 3
+ # Byte-level striping with dedicated parity
+ RAID_TYPE_RAID4 = 4
+ # Block-level striping with dedicated parity
+ RAID_TYPE_RAID5 = 5
+ # Block-level striping with distributed parity
+ RAID_TYPE_RAID6 = 6
+ # Block-level striping with two distributed parities, aka, RAID-DP
+ RAID_TYPE_RAID10 = 10
+ # Stripe of mirrors
+ RAID_TYPE_RAID15 = 15
+ # Parity of mirrors
+ RAID_TYPE_RAID16 = 16
+ # Dual parity of mirrors
+ RAID_TYPE_RAID50 = 50
+ # Stripe of parities
+ RAID_TYPE_RAID60 = 60
+ # Stripe of dual parities
+ RAID_TYPE_RAID51 = 51
+ # Mirror of parities
+ RAID_TYPE_RAID61 = 61
+ # Mirror of dual parities
+ RAID_TYPE_JBOD = 20
+ # Just bunch of disks, no parity, no striping.
+ RAID_TYPE_MIXED = 21
+ # This volume contains multiple RAID settings.
+ RAID_TYPE_OTHER = 22
+ # Vendor specific RAID type
+
+ STRIP_SIZE_UNKNOWN = 0
+ DISK_COUNT_UNKNOWN = 0
+ MIN_IO_SIZE_UNKNOWN = 0
+ OPT_IO_SIZE_UNKNOWN = 0
+
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
@@ -669,6 +709,8 @@ class Capabilities(IData):

VOLUME_ISCSI_CHAP_AUTHENTICATION = 53

+ VOLUME_RAID_INFO = 54
+
VOLUME_THIN = 55

#File system
--
1.8.3.1
Gris Ge
2015-03-04 09:09:22 UTC
Permalink
* Please check python API document for detail about lsm_volume_raid_info()
method. Quick info:

Retrieves the pool id that the volume is derived from.
@param[in] c Valid connection
@param[in] v Volume ptr.
@param[out] raid_type Enum of lsm_volume_raid_type
@param[out] strip_size Size of the strip on disk or other storage extent.
@param[out] extent_count Count of disks or other storage extents in this
RAID group.
@param[out] min_io_size Minimum I/O size, also the preferred I/O size
of random I/O.
@param[out] opt_io_size Optimal I/O size, also the preferred I/O size
of sequential I/O.
@param[in] flags Reserved, set to 0
@return LSM_ERR_OK on success else error reason.

* New plugin interface: lsm_plug_volume_raid_info

* New enum type: lsm_volume_raid_type

* New capability:
LSM_CAP_VOLUME_RAID_INFO

* New constants:
LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
/**^ Unknown */
LSM_VOLUME_RAID_TYPE_RAID0 = 0,
/**^ Stripe */
LSM_VOLUME_RAID_TYPE_RAID1 = 1,
/**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
LSM_VOLUME_RAID_TYPE_RAID3 = 3,
/**^ Byte-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID4 = 4,
/**^ Block-level striping with dedicated parity */
/**^ Block-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID5 = 5,
/**^ Block-level striping with distributed parity */
LSM_VOLUME_RAID_TYPE_RAID6 = 6,
/**^ Block-level striping with two distributed parities, aka, RAID-DP */
LSM_VOLUME_RAID_TYPE_RAID10 = 10,
/**^ Stripe of mirrors */
LSM_VOLUME_RAID_TYPE_RAID15 = 15,
/**^ Parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID16 = 16,
/**^ Dual parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID50 = 50,
/**^ Stripe of parities */
LSM_VOLUME_RAID_TYPE_RAID60 = 60,
/**^ Stripe of dual parities */
LSM_VOLUME_RAID_TYPE_RAID51 = 51,
/**^ Mirror of parities */
LSM_VOLUME_RAID_TYPE_RAID61 = 61,
/**^ Mirror of dual parities */
LSM_VOLUME_RAID_TYPE_JBOD = 20,
/**^ Just bunch of disks, no parity, no striping. */
LSM_VOLUME_RAID_TYPE_MIXED = 21,
/**^ This volume contains multiple RAID settings. */
LSM_VOLUME_RAID_TYPE_OTHER = 22,
/**^ Vendor specific RAID type */

LSM_VOLUME_STRIP_SIZE_UNKNOWN
LSM_VOLUME_EXTENT_COUNT_UNKNOWN
LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

V2: Change call back registration

Changes in V3:

* New implementation for adding new methods:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Add 'New in version 1.2' comment of lsm_volume_raid_info() function.

Changes in V4:

* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

Changes in V5:
* Rename output argument 'extent_count' to 'disk_count'.
* Change constant name from LSM_VOLUME_EXTENT_COUNT_UNKNOWN to
LSM_VOLUME_DISK_COUNT_UNKNOWN.
* Change data type from 'int32_t' to 'uint32_t' for these output arugments:
* strip_size
* disk_count
* min_io_size
* opt_io_size

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 +++++++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 ++
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 +++++++++++++++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 +++++++++++++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++++++++++++++
c_binding/lsm_plugin_ipc.cpp | 62 +++++++++++++++++++++-
7 files changed, 231 insertions(+), 1 deletion(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 879f184..6e03f78 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -844,6 +844,26 @@ extern "C" {
uint32_t *count,
lsm_flag flags);

+/**
+ * Retrieves the pool id that the volume is derived from. New in version 1.2.
+ * @param[in] c Valid connection
+ * @param[in] v Volume ptr.
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on disk or other storage extent.
+ * @param[out] disk_count Count of disks of RAID group(s) where this volume
+ * allocated from.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved, set to 0
+ * @return LSM_ERR_OK on success else error reason.
+ */
+int LSM_DLL_EXPORT lsm_volume_raid_info(
+ lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size, lsm_flag flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 7d6182c..18490f3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -77,6 +77,9 @@ typedef enum {

LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**< If you can configure iSCSI chap authentication */

+ LSM_CAP_VOLUME_RAID_INFO = 54,
+ /** ^ If you can query RAID information from volume */
+
LSM_CAP_VOLUME_THIN = 55, /**< Thin provisioned volumes are supported */

LSM_CAP_FS = 100, /**< List file systems */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index e7874f7..b36586c 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -745,6 +745,8 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
lsm_flag flags);
/** \struct lsm_san_ops_v1
* \brief Block array oriented functions (callback functions)
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
@@ -774,6 +776,8 @@ struct lsm_san_ops_v1 {

/** \struct lsm_fs_ops_v1
* \brief File system oriented functionality
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_fs_ops_v1 {
lsm_plug_fs_list fs_list; /**< list file systems */
@@ -792,6 +796,8 @@ struct lsm_fs_ops_v1 {

/** \struct lsm_nas_ops_v1
* \brief NAS system oriented functionality call back functions
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_nas_ops_v1 {
lsm_plug_nfs_auth_types nfs_auth_types; /**< List nfs authentication types */
@@ -801,6 +807,37 @@ struct lsm_nas_ops_v1 {
};

/**
+ * Query the RAID information of a volume
+ * @param[in] c Valid lsm plug-in pointer
+ * @param[in] volume Volume to be deleted
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on each disk or other
+ * storage extent.
+ * @param[out] disk_count Count of of disks of RAID group(s) where this
+ * volume allocated from.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved
+ * @return LSM_ERR_OK, else error reason
+ */
+typedef int (*lsm_plug_volume_raid_info)(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type, uint32_t *strip_size,
+ uint32_t *disk_count, uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags);
+
+/** \struct lsm_ops_v1_2
+ * \brief Functions added in version 1.2
+ * NOTE: This structure will change during the developement util version 1.2
+ * released.
+ */
+struct lsm_ops_v1_2 {
+ lsm_plug_volume_raid_info vol_raid_info;
+ /**^ Query volume RAID information*/
+};
+
+/**
* Copies the memory pointed to by item with given type t.
* @param t Type of item to copy
* @param item Pointer to src
@@ -839,6 +876,24 @@ int LSM_DLL_EXPORT lsm_register_plugin_v1( lsm_plugin_ptr plug,
struct lsm_nas_ops_v1 *nas_ops );

/**
+ * Used to register version 1.2 APIs plug-in operation.
+ * @param plug Pointer provided by the framework
+ * @param private_data Private data to be used for whatever the plug-in
+ * needs
+ * @param mgm_ops Function pointers for struct lsm_mgmt_ops_v1
+ * @param san_ops Function pointers for struct lsm_san_ops_v1
+ * @param fs_ops Function pointers for struct lsm_fs_ops_v1
+ * @param nas_ops Function pointers for struct lsm_nas_ops_v1
+ * @param ops_v1_2 Function pointers for struct lsm_ops_v1_2
+ * @return LSM_ERR_OK on success, else error reason.
+ */
+int LSM_DLL_EXPORT lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug,
+ void * private_data, struct lsm_mgmt_ops_v1 *mgm_ops,
+ struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops,
+ struct lsm_nas_ops_v1 *nas_ops, struct lsm_ops_v1_2 *ops_v1_2);
+
+/**
* Used to retrieve private data for plug-in operation.
* @param plug Opaque plug-in pointer.
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 309a5e8..562fcff 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -131,6 +131,49 @@ typedef enum {
LSM_VOLUME_PROVISION_DEFAULT = 3 /**< Default provisioning */
} lsm_volume_provision_type;

+/**< \enum lsm_volume_raid_type Different types of RAID */
+typedef enum {
+ LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
+ /**^ Unknown */
+ LSM_VOLUME_RAID_TYPE_RAID0 = 0,
+ /**^ Stripe */
+ LSM_VOLUME_RAID_TYPE_RAID1 = 1,
+ /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
+ LSM_VOLUME_RAID_TYPE_RAID3 = 3,
+ /**^ Byte-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID4 = 4,
+ /**^ Block-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID5 = 5,
+ /**^ Block-level striping with distributed parity */
+ LSM_VOLUME_RAID_TYPE_RAID6 = 6,
+ /**^ Block-level striping with two distributed parities, aka, RAID-DP */
+ LSM_VOLUME_RAID_TYPE_RAID10 = 10,
+ /**^ Stripe of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID15 = 15,
+ /**^ Parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID16 = 16,
+ /**^ Dual parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID50 = 50,
+ /**^ Stripe of parities */
+ LSM_VOLUME_RAID_TYPE_RAID60 = 60,
+ /**^ Stripe of dual parities */
+ LSM_VOLUME_RAID_TYPE_RAID51 = 51,
+ /**^ Mirror of parities */
+ LSM_VOLUME_RAID_TYPE_RAID61 = 61,
+ /**^ Mirror of dual parities */
+ LSM_VOLUME_RAID_TYPE_JBOD = 20,
+ /**^ Just bunch of disks, no parity, no striping. */
+ LSM_VOLUME_RAID_TYPE_MIXED = 21,
+ /**^ This volume contains multiple RAID settings. */
+ LSM_VOLUME_RAID_TYPE_OTHER = 22,
+ /**^ Vendor specific RAID type */
+} lsm_volume_raid_type;
+
+#define LSM_VOLUME_STRIP_SIZE_UNKNOWN 0
+#define LSM_VOLUME_DISK_COUNT_UNKNOWN 0
+#define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN 0
+#define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN 0
+
/**
* Admin state for volume, enabled or disabled
*/
diff --git a/c_binding/lsm_datatypes.hpp b/c_binding/lsm_datatypes.hpp
index aed6891..6a6271f 100644
--- a/c_binding/lsm_datatypes.hpp
+++ b/c_binding/lsm_datatypes.hpp
@@ -193,6 +193,7 @@ struct LSM_DLL_LOCAL _lsm_plugin {
struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */
struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */
struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */
+ struct lsm_ops_v1_2 *ops_v1_2; /**< Callbacks for v1.2 ops */
};


diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 37faed4..cb2665a 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -1171,6 +1171,54 @@ int lsm_volume_delete(lsm_connect *c, lsm_volume *volume, char **job,

}

+int lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume,
+ lsm_volume_raid_type * raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags)
+{
+ if( LSM_FLAG_UNUSED_CHECK(flags) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ int rc = LSM_ERR_OK;
+ CONN_SETUP(c);
+
+ if( !LSM_IS_VOL(volume) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ if( !raid_type || !strip_size || !disk_count || !min_io_size ||
+ !opt_io_size) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ try {
+ std::map<std::string, Value> p;
+ p["volume"] = volume_to_value(volume);
+ p["flags"] = Value(flags);
+
+ Value parameters(p);
+ Value response;
+
+ rc = rpc(c, "volume_raid_info", parameters, response);
+ if( LSM_ERR_OK == rc ) {
+ //We get a value back, either null or job id.
+ std::vector<Value> j = response.asArray();
+ *raid_type = (lsm_volume_raid_type) j[0].asInt32_t();
+ *strip_size = j[1].asUint32_t();
+ *disk_count = j[2].asUint32_t();
+ *min_io_size = j[3].asUint32_t();
+ *opt_io_size = j[4].asUint32_t();
+ }
+ } catch( const ValueException &ve ) {
+ rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
+ ve.what());
+ }
+ return rc;
+
+}
+
int lsm_iscsi_chap_auth(lsm_connect *c, const char *init_id,
const char *username, const char *password,
const char *out_user, const char *out_password,
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index f5374b9..d2a43d4 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -123,6 +123,21 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
return rc;
}

+int lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op,
+ struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op,
+ struct lsm_nas_ops_v1 *nas_op, struct lsm_ops_v1_2 *ops_v1_2)
+{
+ int rc = lsm_register_plugin_v1(
+ plug, private_data, mgm_op, san_op, fs_op, nas_op);
+
+ if (rc != LSM_ERR_OK){
+ return rc;
+ }
+ plug->ops_v1_2 = ops_v1_2;
+ return rc;
+}
+
void *lsm_private_data_get(lsm_plugin_ptr plug)
{
if (!LSM_IS_PLUGIN(plug)) {
@@ -956,6 +971,50 @@ static int handle_volume_disable(lsm_plugin_ptr p, Value &params, Value &respons
return handle_vol_enable_disable(p, params, response, 0);
}

+static int handle_volume_raid_info(lsm_plugin_ptr p, Value &params,
+ Value &response)
+{
+ int rc = LSM_ERR_NO_SUPPORT;
+ if( p && p->ops_v1_2 && p->ops_v1_2->vol_raid_info) {
+ Value v_vol = params["volume"];
+
+ if(IS_CLASS_VOLUME(v_vol) &&
+ LSM_FLAG_EXPECTED_TYPE(params) ) {
+ lsm_volume *vol = value_to_volume(v_vol);
+ std::vector<Value> result;
+
+ if( vol ) {
+ lsm_volume_raid_type raid_type;
+ uint32_t strip_size;
+ uint32_t disk_count;
+ uint32_t min_io_size;
+ uint32_t opt_io_size;
+
+ rc = p->ops_v1_2->vol_raid_info(
+ p, vol, &raid_type, &strip_size, &disk_count,
+ &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params));
+
+ if( LSM_ERR_OK == rc ) {
+ result.push_back(Value((int32_t)raid_type));
+ result.push_back(Value(strip_size));
+ result.push_back(Value(disk_count));
+ result.push_back(Value(min_io_size));
+ result.push_back(Value(opt_io_size));
+ response = Value(result);
+ }
+
+ lsm_volume_record_free(vol);
+ } else {
+ rc = LSM_ERR_NO_MEMORY;
+ }
+
+ } else {
+ rc = LSM_ERR_TRANSPORT_INVALID_ARG;
+ }
+ }
+ return rc;
+}
+
static int ag_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2153,7 +2212,8 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("volume_replicate_range", handle_volume_replicate_range)
("volume_resize", handle_volume_resize)
("volumes_accessible_by_access_group", vol_accessible_by_ag)
- ("volumes", handle_volumes);
+ ("volumes", handle_volumes)
+ ("volume_raid_info", handle_volume_raid_info);

static int process_request(lsm_plugin_ptr p, const std::string &method, Value &request,
Value &response)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:23 UTC
Permalink
* Introduced full support of volume_raid_info().
* For sub-pool, use raid info from parent pool.
* For RAID 1 and JBOD, set strip_size, min_io_size, and opt_io_size
as block size(512).
* For other RAID, calculate out opt_io_size with data disk count.
* For RAID_TYPE_MIXED, raise PLUGIN_BUG LsmError.

* Replaced PoolRAID.RAID_TYPE_RAID_XXX with Volume.RAID_TYPE_RAID_XXX.

* Replaced PoolRAID.RAID_TYPE_NOT_APPLICABLE with Volume.RAID_TYPE_OTHER.

* Bumped simulator data version to 3.1 due to previous Volume.RAID_TYPE_OTHER
change.

Changes in V4(No changes in V2 and V3):

* Removed unneeded import 'Volume' in simulator.py.

Changes in V5:

* Sync changes of API changes for return argument name 'disk_count' and
constant name Volume.DISK_COUNT_UNKNOWN.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/sim/simarray.py | 149 +++++++++++++++++++++++++++++++-----------------
plugin/sim/simulator.py | 3 +
2 files changed, 99 insertions(+), 53 deletions(-)

diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index 73f4492..3fa7782 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -67,26 +67,6 @@ def _random_vpd():


class PoolRAID(object):
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_DISK = 1
MEMBER_TYPE_DISK_MIX = 10
@@ -136,37 +116,37 @@ class PoolRAID(object):
return PoolRAID.MEMBER_TYPE_UNKNOWN

_RAID_DISK_CHK = {
- RAID_TYPE_JBOD: lambda x: x > 0,
- RAID_TYPE_RAID0: lambda x: x > 0,
- RAID_TYPE_RAID1: lambda x: x == 2,
- RAID_TYPE_RAID3: lambda x: x >= 3,
- RAID_TYPE_RAID4: lambda x: x >= 3,
- RAID_TYPE_RAID5: lambda x: x >= 3,
- RAID_TYPE_RAID6: lambda x: x >= 4,
- RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
- RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_JBOD: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID0: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID1: lambda x: x == 2,
+ Volume.RAID_TYPE_RAID3: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID4: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID5: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID6: lambda x: x >= 4,
+ Volume.RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
}

_RAID_PARITY_DISK_COUNT_FUNC = {
- RAID_TYPE_JBOD: lambda x: x,
- RAID_TYPE_RAID0: lambda x: x,
- RAID_TYPE_RAID1: lambda x: 1,
- RAID_TYPE_RAID3: lambda x: x - 1,
- RAID_TYPE_RAID4: lambda x: x - 1,
- RAID_TYPE_RAID5: lambda x: x - 1,
- RAID_TYPE_RAID6: lambda x: x - 2,
- RAID_TYPE_RAID10: lambda x: x / 2,
- RAID_TYPE_RAID15: lambda x: x / 2 - 1,
- RAID_TYPE_RAID16: lambda x: x / 2 - 2,
- RAID_TYPE_RAID50: lambda x: x - 2,
- RAID_TYPE_RAID60: lambda x: x - 4,
- RAID_TYPE_RAID51: lambda x: x / 2 - 1,
- RAID_TYPE_RAID61: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_JBOD: lambda x: x,
+ Volume.RAID_TYPE_RAID0: lambda x: x,
+ Volume.RAID_TYPE_RAID1: lambda x: 1,
+ Volume.RAID_TYPE_RAID3: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID4: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID5: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID6: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID10: lambda x: x / 2,
+ Volume.RAID_TYPE_RAID15: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID16: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_RAID50: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID60: lambda x: x - 4,
+ Volume.RAID_TYPE_RAID51: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID61: lambda x: x / 2 - 2,
}

@staticmethod
@@ -191,7 +171,7 @@ class PoolRAID(object):


class BackStore(object):
- VERSION = "3.0"
+ VERSION = "3.1"
VERSION_SIGNATURE = 'LSM_SIMULATOR_DATA_%s_%s' % (VERSION, md5(VERSION))
JOB_DEFAULT_DURATION = 1
JOB_DATA_TYPE_VOL = 1
@@ -201,6 +181,7 @@ class BackStore(object):
SYS_ID = "sim-01"
SYS_NAME = "LSM simulated storage plug-in"
BLK_SIZE = 512
+ STRIP_SIZE = 131072 # 128 KiB

_LIST_SPLITTER = '#'

@@ -724,7 +705,7 @@ class BackStore(object):

pool_1_id = self.sim_pool_create_from_disk(
name='Pool 1',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=pool_1_disks,
element_type=Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
@@ -744,7 +725,7 @@ class BackStore(object):

self.sim_pool_create_from_disk(
name='Pool 3',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=ssd_pool_disks,
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
@@ -755,7 +736,7 @@ class BackStore(object):
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
Pool.ELEMENT_TYPE_DELTA,
- raid_type=PoolRAID.RAID_TYPE_RAID0,
+ raid_type=Volume.RAID_TYPE_RAID0,
sim_disk_ids=test_pool_disks)

self._data_add(
@@ -1009,13 +990,23 @@ class BackStore(object):
'status_info': '',
'element_type': element_type,
'unsupported_actions': unsupported_actions,
- 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': Volume.RAID_TYPE_OTHER,
'member_type': PoolRAID.MEMBER_TYPE_POOL,
'parent_pool_id': parent_pool_id,
'total_space': size,
})
return self.lastrowid

+ def sim_pool_disks_count(self, sim_pool_id):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE owner_pool_id=%s;" %
+ sim_pool_id)[0][0]
+
+ def sim_pool_data_disks_count(self, sim_pool_id=None):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE "
+ "owner_pool_id=%s and role='DATA';" % sim_pool_id)[0][0]
+
def sim_vols(self, sim_ag_id=None):
"""
Return a list of sim_vol dict.
@@ -2231,3 +2222,55 @@ class SimArray(object):
@_handle_errors
def target_ports(self):
return list(SimArray._sim_tgt_2_lsm(t) for t in self.bs_obj.sim_tgts())
+
+ @_handle_errors
+ def volume_raid_info(self, lsm_vol):
+ sim_pool = self.bs_obj.sim_pool_of_id(
+ SimArray._lsm_id_to_sim_id(
+ lsm_vol.pool_id,
+ LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")))
+
+ raid_type = sim_pool['raid_type']
+ strip_size = Volume.STRIP_SIZE_UNKNOWN
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = Volume.OPT_IO_SIZE_UNKNOWN
+ disk_count = Volume.DISK_COUNT_UNKNOWN
+
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
+ parent_sim_pool = self.bs_obj.sim_pool_of_id(
+ sim_pool['parent_pool_id'])
+ raid_type = parent_sim_pool['raid_type']
+
+ disk_count = self.bs_obj.sim_pool_disks_count(
+ parent_sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ parent_sim_pool['id'])
+ else:
+ disk_count = self.bs_obj.sim_pool_disks_count(
+ sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_UNKNOWN or \
+ raid_type == Volume.RAID_TYPE_OTHER:
+ return [
+ raid_type, strip_size, disk_count, min_io_size,
+ opt_io_size]
+
+ if raid_type == Volume.RAID_TYPE_MIXED:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unsupported RAID_TYPE_MIXED pool "
+ "%s" % sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_RAID1 or \
+ raid_type == Volume.RAID_TYPE_JBOD:
+ strip_size = BackStore.BLK_SIZE
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = BackStore.BLK_SIZE
+ else:
+ strip_size = BackStore.STRIP_SIZE
+ min_io_size = BackStore.STRIP_SIZE
+ opt_io_size = int(data_disk_count * BackStore.STRIP_SIZE)
+
+ return [raid_type, strip_size, disk_count, min_io_size, opt_io_size]
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index 8f7adfc..d562cd6 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -289,3 +289,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
return search_property(
[SimPlugin._sim_data_2_lsm(t) for t in sim_tgts],
search_key, search_value)
+
+ def volume_raid_info(self, volume, flags=0):
+ return self.sim_array.volume_raid_info(volume)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:24 UTC
Permalink
* Simply set XXX_UNKNOWN on output parameter.

V2:
- Add call to register volume_raid_info
- Add LSM_CAP_VOLUMERAID_INFO to capabilties

Changes in V3:
* Use lsm_register_plugin_v1_2() to register lsm_volume_raid_info() support.

Changes in V5(no change in V4):

* Sync API changes:
* argument name 'disk_count'
* argument type 'uint32_t'.
* constant name 'LSM_VOLUME_DISK_COUNT_UNKNOWN'.

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/simc/simc_lsmplugin.c | 33 ++++++++++++++++++++++++++++++---
1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index 7c4d287..422a064 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -391,6 +391,7 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_EXPORTS,
LSM_CAP_EXPORT_FS,
LSM_CAP_EXPORT_REMOVE,
+ LSM_CAP_VOLUME_RAID_INFO,
-1
);

@@ -956,6 +957,33 @@ static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
return rc;
}

+static int volume_raid_info(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
+ struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume));
+
+ if( !av) {
+ rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME,
+ "volume not found!");
+ }
+
+ *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN;
+ *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN;
+ *disk_count = LSM_VOLUME_DISK_COUNT_UNKNOWN;
+ *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN;
+ *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN;
+ return rc;
+}
+
+static struct lsm_ops_v1_2 ops_v1_2 = {
+ volume_raid_info
+};
+
static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume *v,
lsm_flag flags)
{
@@ -1527,7 +1555,6 @@ static struct lsm_san_ops_v1 san_ops = {
list_targets
};

-
static int fs_list(lsm_plugin_ptr c, const char *search_key,
const char *search_value, lsm_fs **fs[], uint32_t *count,
lsm_flag flags)
@@ -2243,8 +2270,8 @@ int load( lsm_plugin_ptr c, const char *uri, const char *password,
_unload(pd);
pd = NULL;
} else {
- rc = lsm_register_plugin_v1( c, pd, &mgm_ops,
- &san_ops, &fs_ops, &nfs_ops);
+ rc = lsm_register_plugin_v1_2(
+ c, pd, &mgm_ops, &san_ops, &fs_ops, &nfs_ops, &ops_v1_2);
}
}
return rc;
--
1.8.3.1
Gris Ge
2015-03-04 09:09:25 UTC
Permalink
* New command:
lsmcli volume-raid-info --vol <VOL_ID>

* New alias:
lsmcli vri == lsmcli volume-raid-info

Changes in V2:
* Fix output format when volume not found passed to _get_item() in
volume_raid_info()

Changes in V5(No changes in V3, V4):
* Sync API changes of name 'extent_count' to 'disk_count', column name
changed to 'Disk Count'.

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/cmdline.py | 18 +++++++++++++-
tools/lsmcli/data_display.py | 58 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index a781314..980b3a0 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,7 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,

from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- vol_provision_str_to_type, vol_rep_type_str_to_type)
+ vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo)


## Wraps the invocation to the command line
@@ -368,6 +368,14 @@ cmds = (
),

dict(
+ name='volume-raid-info',
+ help='Query volume RAID infomation',
+ args=[
+ dict(vol_id_opt),
+ ],
+ ),
+
+ dict(
name='access-group-create',
help='Create an access group',
args=[
@@ -628,6 +636,7 @@ aliases = (
['aa', 'access-group-add'],
['ar', 'access-group-remove'],
['ad', 'access-group-delete'],
+ ['vri', 'volume-raid-info'],
)


@@ -1318,6 +1327,13 @@ class CmdLine:
self._wait_for_it("volume-dependant-rm",
self.c.volume_child_dependency_rm(v), None)

+ def volume_raid_info(self, args):
+ lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume")
+ self.display_data(
+ [
+ VolumeRAIDInfo(
+ lsm_vol.id, *self.c.volume_raid_info(lsm_vol))])
+
## Displays file system dependants
def fs_dependants(self, args):
fs = _get_item(self.c.fs(), args.fs, "File System")
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 285a14f..e0524c8 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -243,6 +243,41 @@ class PlugData(object):
self.version = plugin_version


+class VolumeRAIDInfo(object):
+ _RAID_TYPE_MAP = {
+ Volume.RAID_TYPE_RAID0: 'RAID0',
+ Volume.RAID_TYPE_RAID1: 'RAID1',
+ Volume.RAID_TYPE_RAID3: 'RAID3',
+ Volume.RAID_TYPE_RAID4: 'RAID4',
+ Volume.RAID_TYPE_RAID5: 'RAID5',
+ Volume.RAID_TYPE_RAID6: 'RAID6',
+ Volume.RAID_TYPE_RAID10: 'RAID10',
+ Volume.RAID_TYPE_RAID15: 'RAID15',
+ Volume.RAID_TYPE_RAID16: 'RAID16',
+ Volume.RAID_TYPE_RAID50: 'RAID50',
+ Volume.RAID_TYPE_RAID60: 'RAID60',
+ Volume.RAID_TYPE_RAID51: 'RAID51',
+ Volume.RAID_TYPE_RAID61: 'RAID61',
+ Volume.RAID_TYPE_JBOD: 'JBOD',
+ Volume.RAID_TYPE_MIXED: 'MIXED',
+ Volume.RAID_TYPE_OTHER: 'OTHER',
+ Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ }
+
+ def __init__(self, vol_id, raid_type, strip_size, disk_count,
+ min_io_size, opt_io_size):
+ self.vol_id = vol_id
+ self.raid_type = raid_type
+ self.strip_size = strip_size
+ self.disk_count = disk_count
+ self.min_io_size = min_io_size
+ self.opt_io_size = opt_io_size
+
+ @staticmethod
+ def raid_type_to_str(raid_type):
+ return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP)
+
+
class DisplayData(object):

def __init__(self):
@@ -498,6 +533,29 @@ class DisplayData(object):
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}

+ VOL_RAID_INFO_HEADER = OrderedDict()
+ VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID'
+ VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type'
+ VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size'
+ VOL_RAID_INFO_HEADER['disk_count'] = 'Disk Count'
+ VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size'
+ VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size'
+
+ VOL_RAID_INFO_COLUMN_SKIP_KEYS = []
+
+ VOL_RAID_INFO_VALUE_CONV_ENUM = {
+ 'raid_type': VolumeRAIDInfo.raid_type_to_str,
+ }
+ VOL_RAID_INFO_VALUE_CONV_HUMAN = [
+ 'strip_size', 'min_io_size', 'opt_io_size']
+
+ VALUE_CONVERT[VolumeRAIDInfo] = {
+ 'headers': VOL_RAID_INFO_HEADER,
+ 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS,
+ 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN,
+ }
+
@staticmethod
def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human,
flag_human, flag_enum):
--
1.8.3.1
Gris Ge
2015-03-04 09:09:26 UTC
Permalink
* Simply run that command and check the volume ID of output.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/cmdtest.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/test/cmdtest.py b/test/cmdtest.py
index b603601..e80e027 100755
--- a/test/cmdtest.py
+++ b/test/cmdtest.py
@@ -676,6 +676,25 @@ def search_test(cap, system_id):
volume_delete(vol_id)
return

+def volume_raid_info_test(cap, system_id):
+ if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']:
+ test_pool_id = name_to_id(OP_POOL, test_pool_name)
+
+ if test_pool_id is None:
+ print 'Pool %s is not available!' % test_pool_name
+ exit(10)
+
+ vol_id = create_volume(test_pool_id)
+ out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1]
+ r = parse(out)
+ if len(r[0]) != 6:
+ print "volume-raid-info got expected output: %s" % out
+ exit(10)
+ if r[0][0] != vol_id:
+ print "volume-raid-info output volume ID is not requested " \
+ "volume ID %s" % out
+ exit(10)
+ return

def run_all_tests(cap, system_id):
test_display(cap, system_id)
@@ -688,6 +707,8 @@ def run_all_tests(cap, system_id):

search_test(cap, system_id)

+ volume_raid_info_test(cap, system_id)
+
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--command", action="store", type="string",
--
1.8.3.1
Gris Ge
2015-03-04 09:09:27 UTC
Permalink
* Simply invoke lsm_volume_raid_info() with no additional test.

Changes in V5(No change in V2, V3, V4):

* Sync API changes for argument name('extent_count' to 'disk_count') and data
type.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)

diff --git a/test/tester.c b/test/tester.c
index 2edd18c..1622a75 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -2858,6 +2858,35 @@ START_TEST(test_volume_vpd_check)
}
END_TEST

+START_TEST(test_volume_raid_info)
+{
+ lsm_volume *volume = NULL;
+ char *job = NULL;
+ lsm_pool *pool = get_test_pool(c);
+
+ int rc = lsm_volume_create(
+ c, pool, "volume_raid_info_test", 20000000,
+ LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD);
+
+ fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED,
+ "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c)));
+
+ if( LSM_ERR_JOB_STARTED == rc ) {
+ volume = wait_for_job_vol(c, &job);
+ }
+
+ lsm_volume_raid_type raid_type;
+ uint32_t strip_size, disk_count, min_io_size, opt_io_size;
+
+ G(
+ rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size,
+ &disk_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD);
+
+ G(rc, lsm_volume_record_free, volume);
+ volume = NULL;
+}
+END_TEST
+
Suite * lsm_suite(void)
{
Suite *s = suite_create("libStorageMgmt");
@@ -2893,6 +2922,7 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_ss);
tcase_add_test(basic, test_nfs_exports);
tcase_add_test(basic, test_invalid_input);
+ tcase_add_test(basic, test_volume_raid_info);

suite_add_tcase(s, basic);
return s;
--
1.8.3.1
Gris Ge
2015-03-04 09:09:28 UTC
Permalink
* Treating each MegaRAID DG(disk group) as LSM pool.
* Based on storcli output of:
storcli /c0/dall show all J

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -115,6 +115,47 @@ def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict):
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)


+def _mega_size_to_lsm(mega_size):
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ if re_match:
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+def _pool_status_of(dg_top):
+ """
+ Return status
+ """
+ if dg_top['State'] in _POOL_STATUS_MAP.keys():
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+def _pool_id_of(dg_id, sys_id):
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -217,7 +258,11 @@ class MegaRAID(IPlugin):
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ if real_data and 'Response Data' in real_data.keys():
+ return real_data['Response Data']
+
+ return real_data
else:
return output

@@ -317,7 +362,55 @@ class MegaRAID(IPlugin):

return search_property(rc_lsm_disks, search_key, search_value)

+ @staticmethod
+ def _dg_free_size(dg_num, free_space_list):
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ for free_space in free_space_list:
+ if int(free_space['DG']) == int(dg_num):
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num):
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ if status == Pool.STATUS_UNKNOWN:
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
flags=Client.FLAG_RSVD):
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ for ctrl_num in range(self._ctrl_count()):
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ for dg_top in dg_show_output['TOPOLOGY']:
+ if dg_top['Arr'] != '-':
+ continue
+ if dg_top['DG'] == '-':
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:30 UTC
Permalink
* In storcli of MegaRAID, 'Rbld' of disk status indicate this disk
in using for reconstructing pool data.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index ae2e953..83abf63 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -93,6 +93,7 @@ _DISK_STATE_MAP = {
'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK,
'UGood': Disk.STATUS_STOPPED | Disk.STATUS_OK,
'UBad': Disk.STATUS_STOPPED | Disk.STATUS_ERROR,
+ 'Rbld': Disk.STATUS_RECONSTRUCT,
}
--
1.8.3.1
Gris Ge
2015-03-04 09:09:29 UTC
Permalink
* Treating MegaRAID VD as LSM Volume.
* Using 'storcli /c0/vall show all' to query all volumes.
* Add new capability: Capabilities.VOLUMES

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 46 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 5e3802b..ae2e953 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin, Pool)
+ Disk, VERSION, search_property, IPlugin, Pool, Volume)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -226,6 +226,7 @@ class MegaRAID(IPlugin):
"System not found")
cap = Capabilities()
cap.set(Capabilities.DISKS)
+ cap.set(Capabilities.VOLUMES)
return cap

def _storcli_exec(self, storcli_cmds, flag_json=True):
@@ -414,3 +415,46 @@ class MegaRAID(IPlugin):
dg_top, free_space_list, ctrl_num))

return search_property(lsm_pools, search_key, search_value)
+
+ @staticmethod
+ def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list,
+ vd_prop_info, vd_path):
+
+ vol_id = "%s:VD%d" % (sys_id, vd_id)
+ name = "VD %d" % vd_id
+ vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information.
+ block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz'])
+ num_of_blocks = vd_prop_info['Number of Blocks']
+ admin_state = Volume.ADMIN_STATE_ENABLED
+ if vd_prop_info['Exposed to OS'] != 'Yes' or \
+ vd_basic_info['Access'] != 'RW':
+ admin_state = Volume.ADMIN_STATE_DISABLED
+ pool_id = _pool_id_of(dg_id, sys_id)
+ plugin_data = vd_path
+ return Volume(
+ vol_id, name, vpd83, block_size, num_of_blocks, admin_state,
+ sys_id, pool_id, plugin_data)
+
+ @_handle_errors
+ def volumes(self, search_key=None, search_value=None, flags=0):
+ lsm_vols = []
+ for ctrl_num in range(self._ctrl_count()):
+ vol_show_output = self._storcli_exec(
+ ["/c%d/vall" % ctrl_num, "show", "all"])
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ for key_name in vol_show_output.keys():
+ if key_name.startswith('/c'):
+ vd_basic_info = vol_show_output[key_name][0]
+ (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/')
+ dg_id = int(dg_id)
+ vd_id = int(vd_id)
+ vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id]
+
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ lsm_vols.append(
+ MegaRAID._vd_to_lsm_vol(
+ vd_id, dg_id, sys_id, vd_basic_info,
+ vd_pd_info_list, vd_prop_info, key_name))
+
+ return search_property(lsm_vols, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-04 09:09:31 UTC
Permalink
* Use 'storcli /c0/v1 show all' command line output to determine
RAID type, strip size and disk count.

* Calculate optimal I/O size by strip size multiple with RAID
data(not mirrot, not parity) disks count.

* Tested query on RAID 0, 1, 5, 10, 50.

* Tested the optimal I/O size on RAID 5:
[***@storageqe-08 ~]# lsmenv mega lsmcli vri --vol SV03403550:VD1
Device alias: mega
URI: megaraid://
lsmcli vri --vol SV03403550:VD1
Volume ID | RAID Type | Strip Size | Extent Count | Minimum I/O Size | Optimal I/O Size
--------------------------------------------------------------------------------------------
SV03403550:VD1 | RAID5 | 131072 | 5 | 131072 | 524288

Time: 0:00.29
[***@storageqe-08 ~]# dd if=/dev/urandom of=test.img bs=1M count=1000
1000+0 records in
1000+0 records out
1048576000 bytes (1.0 GB) copied, 153.174 s, 6.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 58.9573 s, 17.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 37.7282 s, 27.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 35.3351 s, 29.7 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 70.0779 s, 15.0 MB/s

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 83abf63..e754cd8 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -157,6 +157,33 @@ def _pool_id_of(dg_id, sys_id):
return "%s:DG%s" % (sys_id, dg_id)


+_RAID_TYPE_MAP = {
+ 'RAID0': Volume.RAID_TYPE_RAID0,
+ 'RAID1': Volume.RAID_TYPE_RAID1,
+ 'RAID5': Volume.RAID_TYPE_RAID5,
+ 'RAID6': Volume.RAID_TYPE_RAID6,
+ 'RAID00': Volume.RAID_TYPE_RAID0,
+ # Some MegaRAID only support max 16 disks in each span.
+ # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000.
+ # All of them are considered as RAID0
+ 'RAID10': Volume.RAID_TYPE_RAID10,
+ 'RAID50': Volume.RAID_TYPE_RAID50,
+ 'RAID60': Volume.RAID_TYPE_RAID60,
+}
+
+
+def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info):
+ raid_type = _RAID_TYPE_MAP.get(
+ vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN)
+
+ # In LSI, four disks or more RAID1 is actually a RAID10.
+ if raid_type == Volume.RAID_TYPE_RAID1 and \
+ int(vd_prop_info['Number of Drives Per Span']) >= 4:
+ raid_type = Volume.RAID_TYPE_RAID10
+
+ return raid_type
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -459,3 +486,52 @@ class MegaRAID(IPlugin):
vd_pd_info_list, vd_prop_info, key_name))

return search_property(lsm_vols, search_key, search_value)
+
+ @_handle_errors
+ def volume_raid_info(self, volume, flags=Client.FLAG_RSVD):
+ if not volume.plugin_data:
+ raise LsmError(
+ ErrorNumber.INVALID_ARGUMENT,
+ "Ilegal input volume argument: missing plugin_data property")
+
+ vd_path = volume.plugin_data
+ vol_show_output = self._storcli_exec([vd_path, "show", "all"])
+ vd_basic_info = vol_show_output[vd_path][0]
+ vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info)
+ strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size'])
+ disk_count = (
+ int(vd_prop_info['Number of Drives Per Span']) *
+ int(vd_prop_info['Span Depth']))
+ if raid_type == Volume.RAID_TYPE_RAID0:
+ strip_count = disk_count
+ elif raid_type == Volume.RAID_TYPE_RAID1:
+ strip_count = 1
+ elif raid_type == Volume.RAID_TYPE_RAID5:
+ strip_count = disk_count - 1
+ elif raid_type == Volume.RAID_TYPE_RAID6:
+ strip_count = disk_count - 2
+ elif raid_type == Volume.RAID_TYPE_RAID50:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 1) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID60:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 2) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID10:
+ strip_count = (
+ int(vd_prop_info['Number of Drives Per Span']) / 2 *
+ int(vd_prop_info['Span Depth']))
+ else:
+ # MegaRAID does not support 15 or 16 yet.
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unexpected RAID type: %s" %
+ vd_basic_info['TYPE'])
+
+ return [
+ raid_type, strip_size, disk_count, strip_size,
+ strip_size * strip_count]
--
1.8.3.1
Gris Ge
2015-03-04 09:09:32 UTC
Permalink
* NetApp ONTAP strip size(minimum I/O size) is 4KiB, stripe size(
optimal I/O size) is 64KiB. Both are unchangeable.

* The extent count(disk count) is taken from aggregate 'disk-count'
property.

* Changed Filer.aggregates() to accept an optional argument 'aggr_name'
which query defined aggregate only.

* Uncommented and updated the old code for converting NetApp RAID level to
libstoragemgmt RAID level.

* Tested on ONTAP simulator 8.1.1 7-mode and real ONTAP 8.0.2 7-mode.

Changes in V5(No changes in V2, V3, V4):

* Sync API changes for argument name.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/ontap/na.py | 8 ++++++--
plugin/ontap/ontap.py | 55 +++++++++++++++++++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 1e015ba..b68577c 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -231,11 +231,15 @@ class Filer(object):
disks = self._invoke('disk-list-info')
return disks['disk-details']['disk-detail-info']

- def aggregates(self):
+ def aggregates(self, aggr_name=None):
"""
Return a list of aggregates
+ If aggr_name provided, return [na_aggr]
"""
- pools = self._invoke('aggr-list-info')
+ if aggr_name:
+ pools = self._invoke('aggr-list-info', {'aggregate': aggr_name})
+ else:
+ pools = self._invoke('aggr-list-info')
tmp = pools['aggregates']['aggr-info']
return to_list(tmp)

diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index c2a2c58..b6358a8 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -121,6 +121,10 @@ class Ontap(IStorageAreaNetwork, INfs):
'restricted': 'volume is restricted to protocol accesses',
}

+ # strip size: http://www.netapp.com/us/media/tr-3001.pdf
+ _STRIP_SIZE = 4096
+ _OPT_IO_SIZE = 65536
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -310,19 +314,6 @@ class Ontap(IStorageAreaNetwork, INfs):
return search_property(
[self._lun(l) for l in luns], search_key, search_value)

-# @staticmethod
-# def _raid_type_of_na_aggr(na_aggr):
-# na_raid_statuses = na_aggr['raid-status'].split(',')
-# if 'raid0' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID0
-# if 'raid4' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID4
-# if 'raid_dp' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID6
-# if 'mixed_raid_type' in na_raid_statuses:
-# return Pool.RAID_TYPE_MIXED
-# return Pool.RAID_TYPE_UNKNOWN
-
# This is based on NetApp ONTAP Manual pages:
# https://library.netapp.com/ecmdocs/ECMP1196890/html/man1/na_aggr.1.html
_AGGR_RAID_STATUS_CONV = {
@@ -1290,3 +1281,41 @@ class Ontap(IStorageAreaNetwork, INfs):
self.sys_info.id))

return search_property(tp, search_key, search_value)
+
+ @staticmethod
+ def _raid_type_of_na_aggr(na_aggr):
+ na_raid_statuses = na_aggr['raid-status'].split(',')
+ if 'mixed_raid_type' in na_raid_statuses:
+ return Volume.RAID_TYPE_MIXED
+ elif 'raid0' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID0
+ elif 'raid4' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID4
+ elif 'raid_dp' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID6
+ return Pool.RAID_TYPE_UNKNOWN
+
+ @handle_ontap_errors
+ def volume_raid_info(self, volume, flags=0):
+ na_vol_name = Ontap._get_volume_from_path(volume.pool_id)
+ na_vol = self.f.volumes(volume_name=na_vol_name)
+ if len(na_vol) == 0:
+ # If parent pool not found, then this LSM volume should not exist.
+ raise LsmError(
+ ErrorNumber.NOT_FOUND_VOLUME,
+ "Volume not found")
+ if len(na_vol) != 1:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got 2+ na_vols from self.f.volumes() "
+ "%s" % na_vol)
+
+ na_vol = na_vol[0]
+ na_aggr_name = na_vol['containing-aggregate']
+ na_aggr = self.f.aggregates(aggr_name=na_aggr_name)[0]
+ raid_type = Ontap._raid_type_of_na_aggr(na_aggr)
+ disk_count = int(na_aggr['disk-count'])
+
+ return [
+ raid_type, Ontap._STRIP_SIZE, disk_count, Ontap._STRIP_SIZE,
+ Ontap._OPT_IO_SIZE]
--
1.8.3.1
Tony Asleson
2015-03-04 23:00:46 UTC
Permalink
Hi Gris,

I will review and commit this tomorrow AM.

Thanks,
Tony
Post by Gris Ge
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID
* The C library part might be buggy considering my C skill set.
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
No document found about strip settings.
* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
Add missing capability LSM_CAP_VOLUME_RAID_INFO
Another approach to register newly added API with full backward
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.
* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.
* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().
Full volume_raid_info() support in simulator plugin.
[PATCH] lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops
* Included ONTAP plugin support.
* Changed the value of these constants from -1 to 0 to align with
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN
* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.
* Removed unneeded import 'Volume' from simulator.py.
* Change output argument name from 'extent_count' to 'disk_count' as
it fit the most common expectation on RAID. Please check patch 3/15
for detail.
* 'Volume.EXTENT_COUNT_UNKNOWN' -> 'Volume.DISK_COUNT_UNKNOWN'
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN -> LSM_VOLUME_DISK_COUNT_UNKNOWN
* strip_size
* disk_count
* min_io_size
* opt_io_size
* Sync plugins for this change.
Python Library: Fix decorator problem with docstrings
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
Simulator Plugin: Add volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
lsmcli: Add volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
MegaRAID Plugin: Add volume_raid_info() support.
ONTAP Plugin: Add volume_raid_info() support.
lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 ++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++
c_binding/lsm_plugin_ipc.cpp | 86 ++++++--
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/ontap/na.py | 8 +-
plugin/ontap/ontap.py | 55 ++++--
plugin/sim/simarray.py | 149 +++++++++-----
plugin/sim/simulator.py | 3 +
plugin/simc/simc_lsmplugin.c | 33 +++-
python_binding/lsm/_client.py | 103 ++++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
20 files changed, 909 insertions(+), 88 deletions(-)
Gris Ge
2015-03-05 08:06:25 UTC
Permalink
Post by Tony Asleson
Hi Gris,
I will review and commit this tomorrow AM.
Thanks,
Tony
Sorry, I missed one patch for V5 patch set.

Let me resend them.
--
Gris Ge
Gris Ge
2015-03-05 08:28:36 UTC
Permalink
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.

* These plugins support this new method:
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID

* The C library part might be buggy considering my C skill set.

* Potential support by other plugin:
* Targetd:
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
* SMI-S:
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
* ONTAP:
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
* Nstor:
No document found about strip settings.

* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.

Changes in V2:
* Patch 6/13 and 10/13:
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
* Patch 10/13:
Add missing capability LSM_CAP_VOLUME_RAID_INFO

Changes in V3:
* Patch 6/13:
Another approach to register newly added API with full backward
compatibility:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Patch 9/13:
Full volume_raid_info() support in simulator plugin.

Changes in V4:

* Included Tony's bug fix patch for C API:
[PATCH] lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

* Included ONTAP plugin support.

* Patch 4/15 and 5/15:
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

* Patch 5/15:
* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.

* Patch 6/15:
* Removed unneeded import 'Volume' from simulator.py.

Changes in V5:

* Change output argument name from 'extent_count' to 'disk_count' as
it fit the most common expectation on RAID. Please check patch 3/15
for detail.

* Change constant name:
* 'Volume.EXTENT_COUNT_UNKNOWN' -> 'Volume.DISK_COUNT_UNKNOWN'
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN -> LSM_VOLUME_DISK_COUNT_UNKNOWN
* Change C API these output arguments data type from 'int32_t' to 'uint32_t':
* strip_size
* disk_count
* min_io_size
* opt_io_size

* Sync plugins for this change.

Gris Ge (14):
Python Library: Fix decorator problem with docstrings
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
lsmcli: Add volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
Constant Test: Fix missing constant with number in it.
Simulator Plugin: Add volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
MegaRAID Plugin: Add volume_raid_info() support.
ONTAP Plugin: Add volume_raid_info() support.

Tony Asleson (1):
lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops

c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 ++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++
c_binding/lsm_plugin_ipc.cpp | 86 ++++++--
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/ontap/na.py | 8 +-
plugin/ontap/ontap.py | 55 ++++--
plugin/sim/simarray.py | 149 +++++++++-----
plugin/sim/simulator.py | 3 +
plugin/simc/simc_lsmplugin.c | 33 +++-
python_binding/lsm/_client.py | 103 ++++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
tools/utility/check_const.pl | 6 +-
21 files changed, 912 insertions(+), 91 deletions(-)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:37 UTC
Permalink
From: Tony Asleson <***@redhat.com>

In a number of places in the fs handlers were were checking
if san_ops was valid before dereferencing a fs_ops pointer.

This is clearly wrong and will result is a seg fault if a
plugin implements fs operations and not block operations.

Signed-off-by: Tony Asleson <***@redhat.com>
Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/lsm_plugin_ipc.cpp | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index 7e0d034..f5374b9 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -1371,7 +1371,7 @@ static int fs(lsm_plugin_ptr p, Value &params, Value &response)
char *key = NULL;
char *val = NULL;

- if( p && p->san_ops && p->fs_ops->fs_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_list ) {
if( LSM_FLAG_EXPECTED_TYPE(params) &&
((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK )) {

@@ -1407,7 +1407,7 @@ static int fs_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_create ) {

Value v_pool = params["pool"];
Value v_name = params["name"];
@@ -1459,7 +1459,7 @@ static int fs_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_delete ) {

Value v_fs = params["fs"];

@@ -1493,7 +1493,7 @@ static int fs_resize(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_resize ) {
+ if( p && p->fs_ops && p->fs_ops->fs_resize ) {

Value v_fs = params["fs"];
Value v_size = params["new_size_bytes"];
@@ -1541,7 +1541,7 @@ static int fs_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;

- if( p && p->san_ops && p->fs_ops->fs_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_clone ) {

Value v_src_fs = params["src_fs"];
Value v_name = params["dest_fs_name"];
@@ -1597,7 +1597,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_OK;

- if( p && p->san_ops && p->fs_ops->fs_file_clone ) {
+ if( p && p->fs_ops && p->fs_ops->fs_file_clone ) {

Value v_fs = params["fs"];
Value v_src_name = params["src_file_name"];
@@ -1648,7 +1648,7 @@ static int fs_file_clone(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1686,7 +1686,7 @@ static int fs_child_dependency(lsm_plugin_ptr p, Value &params, Value &response)
static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_child_dependency_rm ) {
+ if( p && p->fs_ops && p->fs_ops->fs_child_dependency_rm ) {

Value v_fs = params["fs"];
Value v_files = params["files"];
@@ -1725,7 +1725,7 @@ static int fs_child_dependency_rm(lsm_plugin_ptr p, Value &params, Value &respon
static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_list ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_list ) {

Value v_fs = params["fs"];

@@ -1766,7 +1766,7 @@ static int ss_list(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_create ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_create ) {

Value v_fs = params["fs"];
Value v_ss_name = params["snapshot_name"];
@@ -1814,7 +1814,7 @@ static int ss_create(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_delete ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_delete ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
@@ -1851,7 +1851,7 @@ static int ss_delete(lsm_plugin_ptr p, Value &params, Value &response)
static int ss_restore(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->fs_ops->fs_ss_restore ) {
+ if( p && p->fs_ops && p->fs_ops->fs_ss_restore ) {

Value v_fs = params["fs"];
Value v_ss = params["snapshot"];
--
1.8.3.1
Gris Ge
2015-03-05 08:28:38 UTC
Permalink
* With decorator, the docstring of original method will be deleted.
* Use functools.wraps() to keep the docstring of original method.
Check
http://stackoverflow.com/questions/1782843/python-decorator-problem-with-docstrings
for detail.

* With this fix user can check method help message in interactive python with
command:
help(lsm.Client.volume_create)

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_common.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index f2fd568..4c87661 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -533,6 +533,7 @@ def return_requires(*types):
is quite important.
"""
def outer(func):
+ @functools.wraps(func)
def inner(*args, **kwargs):
r = func(*args, **kwargs)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:39 UTC
Permalink
* The docstring of lsm.Client.volume_raid_info() contains full detail
about this new method. Quick info:
Usage:
volume_raid_info(self, volume, flags=0)
Returns:
[raid_type, strip_size, extent_count, min_io_size, opt_io_size]
# strip_size is the size of strip on each disk/extent
# extent_count is the disk/extent count.
# min_io_size is minimum I/O size. Also the preferred I/O size
# of random I/O.
# opt_io_size is optimal I/O size. Also the preferred I/O size
# of sequential I/O.

* Why not use 'pool_raid_info' instead?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not implementing RAID
at pool level but at volume level.

* Why use 'extent_count' instead of 'disk_count'?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not using disk
directly to assemble RAID group.

* Why we need 'min_io_size' and 'opt_io_size' when we have 'extent_count'
and 'strip_size'?
Normally, min_io_size is strip_size, opt_io_size could be calculated by
raid_type, strip_size and extent_count. But on NetApp, I/O test[1]
indicate their optimal I/O size is 64KiB no matter how many disks in
the RAID group. It might[2] because NetApp created a WAFL filesystem on
RAID group which changed the optimal I/O size.

In general, the optimal I/O size or min_io_size of some RAID system
might not base on strip size and RAID disk/extent count.
We'd better expose those information directly instead forcing user
to guess from strip size and disk/extent count.

* New constants:
Volume.RAID_TYPE_UNKNOWN
# The plugin failed to detect the volume's RAID type.
Volume.RAID_TYPE_RAID0
# Stripe
Volume.RAID_TYPE_RAID1
# Mirror for two disks. For 4 disks or more, they are RAID10.
Volume.RAID_TYPE_RAID3
# Byte-level striping with dedicated parity
Volume.RAID_TYPE_RAID4
# Block-level striping with dedicated parity
Volume.RAID_TYPE_RAID5
# Block-level striping with distributed parity
Volume.RAID_TYPE_RAID6
# Block-level striping with two distributed parities, aka, RAID-DP
Volume.RAID_TYPE_RAID10
# Stripe of mirrors
Volume.RAID_TYPE_RAID15
# Parity of mirrors
Volume.RAID_TYPE_RAID16
# Dual parity of mirrors
Volume.RAID_TYPE_RAID50
# Stripe of parities
Volume.RAID_TYPE_RAID60
# Stripe of dual parities
Volume.RAID_TYPE_RAID51
# Mirror of parities
Volume.RAID_TYPE_RAID61
# Mirror of dual parities
Volume.RAID_TYPE_JBOD
# Just bunch of disks, no parity, no striping.
Volume.RAID_TYPE_MIXED
# This volume contains multiple RAID settings.
Volume.RAID_TYPE_OTHER
# Vendor specific RAID type

Volume.STRIP_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN

* New Capability:
lsm.Volume.VOLUME_RAID_INFO

[1] On a 24 disks RAID6(RAID-DP), 4KiB strip size(not changeable):
* With I/O size 90112(4096 * 22), write speed is 73.4 MB/s
* With I/O size 65536, write speed is 86.9 MB/s
# the optimal_io_size exposed via sysfs from SCSI BLOCK LIMITS(0xB0) VPD

[2] No NetApp official document confirm or deny it. Waiting NetApp's reply.

Changes in V2:
* Add 'New in 1.2' docstring.

Changes in V4(No change in V3):
* Change the value of these constants from -1 to 0 to align with
libblkid/sysfs:
Volume.STRIP_SIZE_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN

Changes in V5:
* Rename the return value 'extent_count' to 'disk_count' as it fit the
most common expectation on RAID.
For LVM RAID, EMC VMAX and other specific disk slice based RAID,
the 'disk_count' here means the slice count.
The constant renamed from EXTENT_COUNT_UNKNOWN to DISK_COUNT_UNKNOWN.

* Updated docstring to explain the 'disk_count' return value when facing
RAID system using disk slice.

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_client.py | 103 ++++++++++++++++++++++++++++++++++++++++++
python_binding/lsm/_data.py | 42 +++++++++++++++++
2 files changed, 145 insertions(+)

diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index e637962..a641b1d 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -971,3 +971,106 @@ class Client(INetworkAttachedStorage):
"""
_check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('target_ports', _del_self(locals()))
+
+ ## Returns the RAID information of certain volume
+ # @param self The this pointer
+ # @param raid_type The RAID type of this volume
+ # @param strip_size The size of strip of disk or other storage
+ # extent.
+ # @param disk_count The count of disks of RAID group(s) where
+ # this volume allocated from.
+ # @param min_io_size The preferred I/O size of random I/O.
+ # @param opt_io_size The preferred I/O size of sequential I/O.
+ # @returns List of target ports, else raises LsmError
+ @_return_requires([int, int, int, int, int])
+ def volume_raid_info(self, volume, flags=FLAG_RSVD):
+ """Query the RAID information of certain volume.
+
+ New in version 1.2.
+
+ Query the RAID type, strip size, extents count, minimum I/O size,
+ optimal I/O size of given volume.
+
+ This method requires this capability:
+ lsm.Capabilities.VOLUME_RAID_INFO
+
+ Args:
+ volume (Volume object): Volume to query
+ flags (int): Reserved for future use. Should be set as
+ lsm.Client.FLAG_RSVD
+ Returns:
+ [raid_type, strip_size, disk_count, min_io_size, opt_io_size]
+
+ raid_type (int): RAID Type of requested volume.
+ Could be one of these values:
+ Volume.RAID_TYPE_RAID0
+ Stripe
+ Volume.RAID_TYPE_RAID1
+ Two disks Mirror
+ Volume.RAID_TYPE_RAID3
+ Byte-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID4
+ Block-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID5
+ Block-level striping with distributed parity
+ Volume.RAID_TYPE_RAID6
+ Block-level striping with two distributed parities,
+ aka, RAID-DP
+ Volume.RAID_TYPE_RAID10
+ Stripe of mirrors
+ Volume.RAID_TYPE_RAID15
+ Parity of mirrors
+ Volume.RAID_TYPE_RAID16
+ Dual parity of mirrors
+ Volume.RAID_TYPE_RAID50
+ Stripe of parities
+ Volume.RAID_TYPE_RAID60
+ Stripe of dual parities
+ Volume.RAID_TYPE_RAID51
+ Mirror of parities
+ Volume.RAID_TYPE_RAID61
+ Mirror of dual parities
+ Volume.RAID_TYPE_JBOD
+ Just bunch of disks, no parity, no striping.
+ Volume.RAID_TYPE_UNKNOWN
+ The plugin failed to detect the volume's RAID type.
+ Volume.RAID_TYPE_MIXED
+ This volume contains multiple RAID settings.
+ Volume.RAID_TYPE_OTHER
+ Vendor specific RAID type
+ strip_size(int): The size of strip on each disk or other storage
+ extent.
+ For RAID1/JBOD, it should be set as sector size.
+ If plugin failed to detect strip size, it should be set
+ as Volume.STRIP_SIZE_UNKNOWN(0).
+ disk_count(int): The count of disks used for assembling the RAID
+ group(s) where this volume allocated from.
+ For any RAID system using the slice of disk, this value
+ indicate how many disk slices are used for the RAID.
+ For exmaple, on LVM RAID, the 'disk_count' here indicate the
+ count of PVs used for certain volume.
+ Another example, on EMC VMAX, the 'disk_count' here indicate
+ how many hyper volumes are used for this volume.
+ For any RAID system using remote LUN for data storing, each
+ remote LUN should be count as a disk.
+ If the plugin failed to detect disk_count, it should be set
+ as Volume.DISK_COUNT_UNKNOWN(0).
+ min_io_size(int): The minimum I/O size, device preferred I/O
+ size for random I/O. Any I/O size not equal to a multiple
+ of this value may get significant speed penalty.
+ Normally it refers to strip size of each disk(extent).
+ If plugin failed to detect min_io_size, it should try these
+ values in the sequence of:
+ logical sector size -> physical sector size ->
+ Volume.MIN_IO_SIZE_UNKNOWN(0).
+ opt_io_size(int): The optimal I/O size, device preferred I/O
+ size for sequential I/O. Normally it refers to RAID group
+ stripe size.
+ If plugin failed to detect opt_io_size, it should be set
+ to Volume.OPT_IO_SIZE_UNKNOWN(0).
+ Raises:
+ LsmError:
+ ErrorNumber.NO_SUPPORT
+ No support.
+ """
+ return self._tp.rpc('volume_raid_info', _del_self(locals()))
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 067c766..6fb2325 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -258,6 +258,46 @@ class Volume(IData):
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1

+ RAID_TYPE_UNKNOWN = -1
+ # The plugin failed to detect the volume's RAID type.
+ RAID_TYPE_RAID0 = 0
+ # Stripe
+ RAID_TYPE_RAID1 = 1
+ # Mirror for two disks. For 4 disks or more, they are RAID10.
+ RAID_TYPE_RAID3 = 3
+ # Byte-level striping with dedicated parity
+ RAID_TYPE_RAID4 = 4
+ # Block-level striping with dedicated parity
+ RAID_TYPE_RAID5 = 5
+ # Block-level striping with distributed parity
+ RAID_TYPE_RAID6 = 6
+ # Block-level striping with two distributed parities, aka, RAID-DP
+ RAID_TYPE_RAID10 = 10
+ # Stripe of mirrors
+ RAID_TYPE_RAID15 = 15
+ # Parity of mirrors
+ RAID_TYPE_RAID16 = 16
+ # Dual parity of mirrors
+ RAID_TYPE_RAID50 = 50
+ # Stripe of parities
+ RAID_TYPE_RAID60 = 60
+ # Stripe of dual parities
+ RAID_TYPE_RAID51 = 51
+ # Mirror of parities
+ RAID_TYPE_RAID61 = 61
+ # Mirror of dual parities
+ RAID_TYPE_JBOD = 20
+ # Just bunch of disks, no parity, no striping.
+ RAID_TYPE_MIXED = 21
+ # This volume contains multiple RAID settings.
+ RAID_TYPE_OTHER = 22
+ # Vendor specific RAID type
+
+ STRIP_SIZE_UNKNOWN = 0
+ DISK_COUNT_UNKNOWN = 0
+ MIN_IO_SIZE_UNKNOWN = 0
+ OPT_IO_SIZE_UNKNOWN = 0
+
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
@@ -669,6 +709,8 @@ class Capabilities(IData):

VOLUME_ISCSI_CHAP_AUTHENTICATION = 53

+ VOLUME_RAID_INFO = 54
+
VOLUME_THIN = 55

#File system
--
1.8.3.1
Gris Ge
2015-03-05 08:28:40 UTC
Permalink
* Please check python API document for detail about lsm_volume_raid_info()
method. Quick info:

Retrieves the pool id that the volume is derived from.
@param[in] c Valid connection
@param[in] v Volume ptr.
@param[out] raid_type Enum of lsm_volume_raid_type
@param[out] strip_size Size of the strip on disk or other storage extent.
@param[out] extent_count Count of disks or other storage extents in this
RAID group.
@param[out] min_io_size Minimum I/O size, also the preferred I/O size
of random I/O.
@param[out] opt_io_size Optimal I/O size, also the preferred I/O size
of sequential I/O.
@param[in] flags Reserved, set to 0
@return LSM_ERR_OK on success else error reason.

* New plugin interface: lsm_plug_volume_raid_info

* New enum type: lsm_volume_raid_type

* New capability:
LSM_CAP_VOLUME_RAID_INFO

* New constants:
LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
/**^ Unknown */
LSM_VOLUME_RAID_TYPE_RAID0 = 0,
/**^ Stripe */
LSM_VOLUME_RAID_TYPE_RAID1 = 1,
/**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
LSM_VOLUME_RAID_TYPE_RAID3 = 3,
/**^ Byte-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID4 = 4,
/**^ Block-level striping with dedicated parity */
/**^ Block-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID5 = 5,
/**^ Block-level striping with distributed parity */
LSM_VOLUME_RAID_TYPE_RAID6 = 6,
/**^ Block-level striping with two distributed parities, aka, RAID-DP */
LSM_VOLUME_RAID_TYPE_RAID10 = 10,
/**^ Stripe of mirrors */
LSM_VOLUME_RAID_TYPE_RAID15 = 15,
/**^ Parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID16 = 16,
/**^ Dual parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID50 = 50,
/**^ Stripe of parities */
LSM_VOLUME_RAID_TYPE_RAID60 = 60,
/**^ Stripe of dual parities */
LSM_VOLUME_RAID_TYPE_RAID51 = 51,
/**^ Mirror of parities */
LSM_VOLUME_RAID_TYPE_RAID61 = 61,
/**^ Mirror of dual parities */
LSM_VOLUME_RAID_TYPE_JBOD = 20,
/**^ Just bunch of disks, no parity, no striping. */
LSM_VOLUME_RAID_TYPE_MIXED = 21,
/**^ This volume contains multiple RAID settings. */
LSM_VOLUME_RAID_TYPE_OTHER = 22,
/**^ Vendor specific RAID type */

LSM_VOLUME_STRIP_SIZE_UNKNOWN
LSM_VOLUME_EXTENT_COUNT_UNKNOWN
LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

V2: Change call back registration

Changes in V3:

* New implementation for adding new methods:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Add 'New in version 1.2' comment of lsm_volume_raid_info() function.

Changes in V4:

* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.
* Changed the value of these constants from -1 to 0 to align with
libblkid/sysfs:
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

Changes in V5:
* Rename output argument 'extent_count' to 'disk_count'.
* Change constant name from LSM_VOLUME_EXTENT_COUNT_UNKNOWN to
LSM_VOLUME_DISK_COUNT_UNKNOWN.
* Change data type from 'int32_t' to 'uint32_t' for these output arugments:
* strip_size
* disk_count
* min_io_size
* opt_io_size

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 +++++++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 ++
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 +++++++++++++++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 +++++++++++++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++++++++++++++
c_binding/lsm_plugin_ipc.cpp | 62 +++++++++++++++++++++-
7 files changed, 231 insertions(+), 1 deletion(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 879f184..6e03f78 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -844,6 +844,26 @@ extern "C" {
uint32_t *count,
lsm_flag flags);

+/**
+ * Retrieves the pool id that the volume is derived from. New in version 1.2.
+ * @param[in] c Valid connection
+ * @param[in] v Volume ptr.
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on disk or other storage extent.
+ * @param[out] disk_count Count of disks of RAID group(s) where this volume
+ * allocated from.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved, set to 0
+ * @return LSM_ERR_OK on success else error reason.
+ */
+int LSM_DLL_EXPORT lsm_volume_raid_info(
+ lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size, lsm_flag flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 7d6182c..18490f3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -77,6 +77,9 @@ typedef enum {

LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**< If you can configure iSCSI chap authentication */

+ LSM_CAP_VOLUME_RAID_INFO = 54,
+ /** ^ If you can query RAID information from volume */
+
LSM_CAP_VOLUME_THIN = 55, /**< Thin provisioned volumes are supported */

LSM_CAP_FS = 100, /**< List file systems */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index e7874f7..b36586c 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -745,6 +745,8 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
lsm_flag flags);
/** \struct lsm_san_ops_v1
* \brief Block array oriented functions (callback functions)
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
@@ -774,6 +776,8 @@ struct lsm_san_ops_v1 {

/** \struct lsm_fs_ops_v1
* \brief File system oriented functionality
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_fs_ops_v1 {
lsm_plug_fs_list fs_list; /**< list file systems */
@@ -792,6 +796,8 @@ struct lsm_fs_ops_v1 {

/** \struct lsm_nas_ops_v1
* \brief NAS system oriented functionality call back functions
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_nas_ops_v1 {
lsm_plug_nfs_auth_types nfs_auth_types; /**< List nfs authentication types */
@@ -801,6 +807,37 @@ struct lsm_nas_ops_v1 {
};

/**
+ * Query the RAID information of a volume
+ * @param[in] c Valid lsm plug-in pointer
+ * @param[in] volume Volume to be deleted
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on each disk or other
+ * storage extent.
+ * @param[out] disk_count Count of of disks of RAID group(s) where this
+ * volume allocated from.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved
+ * @return LSM_ERR_OK, else error reason
+ */
+typedef int (*lsm_plug_volume_raid_info)(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type, uint32_t *strip_size,
+ uint32_t *disk_count, uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags);
+
+/** \struct lsm_ops_v1_2
+ * \brief Functions added in version 1.2
+ * NOTE: This structure will change during the developement util version 1.2
+ * released.
+ */
+struct lsm_ops_v1_2 {
+ lsm_plug_volume_raid_info vol_raid_info;
+ /**^ Query volume RAID information*/
+};
+
+/**
* Copies the memory pointed to by item with given type t.
* @param t Type of item to copy
* @param item Pointer to src
@@ -839,6 +876,24 @@ int LSM_DLL_EXPORT lsm_register_plugin_v1( lsm_plugin_ptr plug,
struct lsm_nas_ops_v1 *nas_ops );

/**
+ * Used to register version 1.2 APIs plug-in operation.
+ * @param plug Pointer provided by the framework
+ * @param private_data Private data to be used for whatever the plug-in
+ * needs
+ * @param mgm_ops Function pointers for struct lsm_mgmt_ops_v1
+ * @param san_ops Function pointers for struct lsm_san_ops_v1
+ * @param fs_ops Function pointers for struct lsm_fs_ops_v1
+ * @param nas_ops Function pointers for struct lsm_nas_ops_v1
+ * @param ops_v1_2 Function pointers for struct lsm_ops_v1_2
+ * @return LSM_ERR_OK on success, else error reason.
+ */
+int LSM_DLL_EXPORT lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug,
+ void * private_data, struct lsm_mgmt_ops_v1 *mgm_ops,
+ struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops,
+ struct lsm_nas_ops_v1 *nas_ops, struct lsm_ops_v1_2 *ops_v1_2);
+
+/**
* Used to retrieve private data for plug-in operation.
* @param plug Opaque plug-in pointer.
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 309a5e8..562fcff 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -131,6 +131,49 @@ typedef enum {
LSM_VOLUME_PROVISION_DEFAULT = 3 /**< Default provisioning */
} lsm_volume_provision_type;

+/**< \enum lsm_volume_raid_type Different types of RAID */
+typedef enum {
+ LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
+ /**^ Unknown */
+ LSM_VOLUME_RAID_TYPE_RAID0 = 0,
+ /**^ Stripe */
+ LSM_VOLUME_RAID_TYPE_RAID1 = 1,
+ /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
+ LSM_VOLUME_RAID_TYPE_RAID3 = 3,
+ /**^ Byte-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID4 = 4,
+ /**^ Block-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID5 = 5,
+ /**^ Block-level striping with distributed parity */
+ LSM_VOLUME_RAID_TYPE_RAID6 = 6,
+ /**^ Block-level striping with two distributed parities, aka, RAID-DP */
+ LSM_VOLUME_RAID_TYPE_RAID10 = 10,
+ /**^ Stripe of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID15 = 15,
+ /**^ Parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID16 = 16,
+ /**^ Dual parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID50 = 50,
+ /**^ Stripe of parities */
+ LSM_VOLUME_RAID_TYPE_RAID60 = 60,
+ /**^ Stripe of dual parities */
+ LSM_VOLUME_RAID_TYPE_RAID51 = 51,
+ /**^ Mirror of parities */
+ LSM_VOLUME_RAID_TYPE_RAID61 = 61,
+ /**^ Mirror of dual parities */
+ LSM_VOLUME_RAID_TYPE_JBOD = 20,
+ /**^ Just bunch of disks, no parity, no striping. */
+ LSM_VOLUME_RAID_TYPE_MIXED = 21,
+ /**^ This volume contains multiple RAID settings. */
+ LSM_VOLUME_RAID_TYPE_OTHER = 22,
+ /**^ Vendor specific RAID type */
+} lsm_volume_raid_type;
+
+#define LSM_VOLUME_STRIP_SIZE_UNKNOWN 0
+#define LSM_VOLUME_DISK_COUNT_UNKNOWN 0
+#define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN 0
+#define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN 0
+
/**
* Admin state for volume, enabled or disabled
*/
diff --git a/c_binding/lsm_datatypes.hpp b/c_binding/lsm_datatypes.hpp
index aed6891..6a6271f 100644
--- a/c_binding/lsm_datatypes.hpp
+++ b/c_binding/lsm_datatypes.hpp
@@ -193,6 +193,7 @@ struct LSM_DLL_LOCAL _lsm_plugin {
struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */
struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */
struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */
+ struct lsm_ops_v1_2 *ops_v1_2; /**< Callbacks for v1.2 ops */
};


diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 37faed4..cb2665a 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -1171,6 +1171,54 @@ int lsm_volume_delete(lsm_connect *c, lsm_volume *volume, char **job,

}

+int lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume,
+ lsm_volume_raid_type * raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags)
+{
+ if( LSM_FLAG_UNUSED_CHECK(flags) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ int rc = LSM_ERR_OK;
+ CONN_SETUP(c);
+
+ if( !LSM_IS_VOL(volume) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ if( !raid_type || !strip_size || !disk_count || !min_io_size ||
+ !opt_io_size) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ try {
+ std::map<std::string, Value> p;
+ p["volume"] = volume_to_value(volume);
+ p["flags"] = Value(flags);
+
+ Value parameters(p);
+ Value response;
+
+ rc = rpc(c, "volume_raid_info", parameters, response);
+ if( LSM_ERR_OK == rc ) {
+ //We get a value back, either null or job id.
+ std::vector<Value> j = response.asArray();
+ *raid_type = (lsm_volume_raid_type) j[0].asInt32_t();
+ *strip_size = j[1].asUint32_t();
+ *disk_count = j[2].asUint32_t();
+ *min_io_size = j[3].asUint32_t();
+ *opt_io_size = j[4].asUint32_t();
+ }
+ } catch( const ValueException &ve ) {
+ rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
+ ve.what());
+ }
+ return rc;
+
+}
+
int lsm_iscsi_chap_auth(lsm_connect *c, const char *init_id,
const char *username, const char *password,
const char *out_user, const char *out_password,
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index f5374b9..d2a43d4 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -123,6 +123,21 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
return rc;
}

+int lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op,
+ struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op,
+ struct lsm_nas_ops_v1 *nas_op, struct lsm_ops_v1_2 *ops_v1_2)
+{
+ int rc = lsm_register_plugin_v1(
+ plug, private_data, mgm_op, san_op, fs_op, nas_op);
+
+ if (rc != LSM_ERR_OK){
+ return rc;
+ }
+ plug->ops_v1_2 = ops_v1_2;
+ return rc;
+}
+
void *lsm_private_data_get(lsm_plugin_ptr plug)
{
if (!LSM_IS_PLUGIN(plug)) {
@@ -956,6 +971,50 @@ static int handle_volume_disable(lsm_plugin_ptr p, Value &params, Value &respons
return handle_vol_enable_disable(p, params, response, 0);
}

+static int handle_volume_raid_info(lsm_plugin_ptr p, Value &params,
+ Value &response)
+{
+ int rc = LSM_ERR_NO_SUPPORT;
+ if( p && p->ops_v1_2 && p->ops_v1_2->vol_raid_info) {
+ Value v_vol = params["volume"];
+
+ if(IS_CLASS_VOLUME(v_vol) &&
+ LSM_FLAG_EXPECTED_TYPE(params) ) {
+ lsm_volume *vol = value_to_volume(v_vol);
+ std::vector<Value> result;
+
+ if( vol ) {
+ lsm_volume_raid_type raid_type;
+ uint32_t strip_size;
+ uint32_t disk_count;
+ uint32_t min_io_size;
+ uint32_t opt_io_size;
+
+ rc = p->ops_v1_2->vol_raid_info(
+ p, vol, &raid_type, &strip_size, &disk_count,
+ &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params));
+
+ if( LSM_ERR_OK == rc ) {
+ result.push_back(Value((int32_t)raid_type));
+ result.push_back(Value(strip_size));
+ result.push_back(Value(disk_count));
+ result.push_back(Value(min_io_size));
+ result.push_back(Value(opt_io_size));
+ response = Value(result);
+ }
+
+ lsm_volume_record_free(vol);
+ } else {
+ rc = LSM_ERR_NO_MEMORY;
+ }
+
+ } else {
+ rc = LSM_ERR_TRANSPORT_INVALID_ARG;
+ }
+ }
+ return rc;
+}
+
static int ag_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2153,7 +2212,8 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("volume_replicate_range", handle_volume_replicate_range)
("volume_resize", handle_volume_resize)
("volumes_accessible_by_access_group", vol_accessible_by_ag)
- ("volumes", handle_volumes);
+ ("volumes", handle_volumes)
+ ("volume_raid_info", handle_volume_raid_info);

static int process_request(lsm_plugin_ptr p, const std::string &method, Value &request,
Value &response)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:41 UTC
Permalink
* New command:
lsmcli volume-raid-info --vol <VOL_ID>

* New alias:
lsmcli vri == lsmcli volume-raid-info

Changes in V2:
* Fix output format when volume not found passed to _get_item() in
volume_raid_info()

Changes in V5(No changes in V3, V4):
* Sync API changes of name 'extent_count' to 'disk_count', column name
changed to 'Disk Count'.

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/cmdline.py | 18 +++++++++++++-
tools/lsmcli/data_display.py | 58 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index a781314..980b3a0 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,7 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,

from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- vol_provision_str_to_type, vol_rep_type_str_to_type)
+ vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo)


## Wraps the invocation to the command line
@@ -368,6 +368,14 @@ cmds = (
),

dict(
+ name='volume-raid-info',
+ help='Query volume RAID infomation',
+ args=[
+ dict(vol_id_opt),
+ ],
+ ),
+
+ dict(
name='access-group-create',
help='Create an access group',
args=[
@@ -628,6 +636,7 @@ aliases = (
['aa', 'access-group-add'],
['ar', 'access-group-remove'],
['ad', 'access-group-delete'],
+ ['vri', 'volume-raid-info'],
)


@@ -1318,6 +1327,13 @@ class CmdLine:
self._wait_for_it("volume-dependant-rm",
self.c.volume_child_dependency_rm(v), None)

+ def volume_raid_info(self, args):
+ lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume")
+ self.display_data(
+ [
+ VolumeRAIDInfo(
+ lsm_vol.id, *self.c.volume_raid_info(lsm_vol))])
+
## Displays file system dependants
def fs_dependants(self, args):
fs = _get_item(self.c.fs(), args.fs, "File System")
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 285a14f..e0524c8 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -243,6 +243,41 @@ class PlugData(object):
self.version = plugin_version


+class VolumeRAIDInfo(object):
+ _RAID_TYPE_MAP = {
+ Volume.RAID_TYPE_RAID0: 'RAID0',
+ Volume.RAID_TYPE_RAID1: 'RAID1',
+ Volume.RAID_TYPE_RAID3: 'RAID3',
+ Volume.RAID_TYPE_RAID4: 'RAID4',
+ Volume.RAID_TYPE_RAID5: 'RAID5',
+ Volume.RAID_TYPE_RAID6: 'RAID6',
+ Volume.RAID_TYPE_RAID10: 'RAID10',
+ Volume.RAID_TYPE_RAID15: 'RAID15',
+ Volume.RAID_TYPE_RAID16: 'RAID16',
+ Volume.RAID_TYPE_RAID50: 'RAID50',
+ Volume.RAID_TYPE_RAID60: 'RAID60',
+ Volume.RAID_TYPE_RAID51: 'RAID51',
+ Volume.RAID_TYPE_RAID61: 'RAID61',
+ Volume.RAID_TYPE_JBOD: 'JBOD',
+ Volume.RAID_TYPE_MIXED: 'MIXED',
+ Volume.RAID_TYPE_OTHER: 'OTHER',
+ Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ }
+
+ def __init__(self, vol_id, raid_type, strip_size, disk_count,
+ min_io_size, opt_io_size):
+ self.vol_id = vol_id
+ self.raid_type = raid_type
+ self.strip_size = strip_size
+ self.disk_count = disk_count
+ self.min_io_size = min_io_size
+ self.opt_io_size = opt_io_size
+
+ @staticmethod
+ def raid_type_to_str(raid_type):
+ return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP)
+
+
class DisplayData(object):

def __init__(self):
@@ -498,6 +533,29 @@ class DisplayData(object):
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}

+ VOL_RAID_INFO_HEADER = OrderedDict()
+ VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID'
+ VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type'
+ VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size'
+ VOL_RAID_INFO_HEADER['disk_count'] = 'Disk Count'
+ VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size'
+ VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size'
+
+ VOL_RAID_INFO_COLUMN_SKIP_KEYS = []
+
+ VOL_RAID_INFO_VALUE_CONV_ENUM = {
+ 'raid_type': VolumeRAIDInfo.raid_type_to_str,
+ }
+ VOL_RAID_INFO_VALUE_CONV_HUMAN = [
+ 'strip_size', 'min_io_size', 'opt_io_size']
+
+ VALUE_CONVERT[VolumeRAIDInfo] = {
+ 'headers': VOL_RAID_INFO_HEADER,
+ 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS,
+ 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN,
+ }
+
@staticmethod
def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human,
flag_human, flag_enum):
--
1.8.3.1
Gris Ge
2015-03-05 08:28:42 UTC
Permalink
* Simply run that command and check the volume ID of output.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/cmdtest.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/test/cmdtest.py b/test/cmdtest.py
index b603601..e80e027 100755
--- a/test/cmdtest.py
+++ b/test/cmdtest.py
@@ -676,6 +676,25 @@ def search_test(cap, system_id):
volume_delete(vol_id)
return

+def volume_raid_info_test(cap, system_id):
+ if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']:
+ test_pool_id = name_to_id(OP_POOL, test_pool_name)
+
+ if test_pool_id is None:
+ print 'Pool %s is not available!' % test_pool_name
+ exit(10)
+
+ vol_id = create_volume(test_pool_id)
+ out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1]
+ r = parse(out)
+ if len(r[0]) != 6:
+ print "volume-raid-info got expected output: %s" % out
+ exit(10)
+ if r[0][0] != vol_id:
+ print "volume-raid-info output volume ID is not requested " \
+ "volume ID %s" % out
+ exit(10)
+ return

def run_all_tests(cap, system_id):
test_display(cap, system_id)
@@ -688,6 +707,8 @@ def run_all_tests(cap, system_id):

search_test(cap, system_id)

+ volume_raid_info_test(cap, system_id)
+
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--command", action="store", type="string",
--
1.8.3.1
Gris Ge
2015-03-05 08:28:43 UTC
Permalink
* Simply invoke lsm_volume_raid_info() with no additional test.

Changes in V5(No change in V2, V3, V4):

* Sync API changes for argument name('extent_count' to 'disk_count') and data
type.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)

diff --git a/test/tester.c b/test/tester.c
index 2edd18c..1622a75 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -2858,6 +2858,35 @@ START_TEST(test_volume_vpd_check)
}
END_TEST

+START_TEST(test_volume_raid_info)
+{
+ lsm_volume *volume = NULL;
+ char *job = NULL;
+ lsm_pool *pool = get_test_pool(c);
+
+ int rc = lsm_volume_create(
+ c, pool, "volume_raid_info_test", 20000000,
+ LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD);
+
+ fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED,
+ "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c)));
+
+ if( LSM_ERR_JOB_STARTED == rc ) {
+ volume = wait_for_job_vol(c, &job);
+ }
+
+ lsm_volume_raid_type raid_type;
+ uint32_t strip_size, disk_count, min_io_size, opt_io_size;
+
+ G(
+ rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size,
+ &disk_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD);
+
+ G(rc, lsm_volume_record_free, volume);
+ volume = NULL;
+}
+END_TEST
+
Suite * lsm_suite(void)
{
Suite *s = suite_create("libStorageMgmt");
@@ -2893,6 +2922,7 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_ss);
tcase_add_test(basic, test_nfs_exports);
tcase_add_test(basic, test_invalid_input);
+ tcase_add_test(basic, test_volume_raid_info);

suite_add_tcase(s, basic);
return s;
--
1.8.3.1
Gris Ge
2015-03-05 08:28:44 UTC
Permalink
* Allowing check_const.pl to check constants with number in it.
Example:
LSM_VOLUME_RAID_TYPE_RAID1

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/utility/check_const.pl | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/utility/check_const.pl b/tools/utility/check_const.pl
index 9e5a700..e41c1e9 100644
--- a/tools/utility/check_const.pl
+++ b/tools/utility/check_const.pl
@@ -101,7 +101,7 @@ my $REGEX_C_CONST_FORMAT = qr/
(?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT)
)
(?<CNAME_PAT>
- [A-Z][A-Z_]+
+ [A-Z][A-Z_0-9]+
)
(?<HEADER1>
[\ \t]*
@@ -179,7 +179,7 @@ sub py_name_2_c_name($) {
# 2. Convert System to SYSTEM
# 3. Convert Capabilities to CAP and etc using %PY_CLASS_NAME_CONV;
my $py_name = shift;
- if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_]+)$/ ) {
+ if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_][A-Z_0-9]+)$/ ) {
my $py_class_name = $1;
my $py_var_name = $2;

@@ -308,7 +308,7 @@ sub _get_py_class_consts($$){
}
if ($line =~ /^$current_idention
[\ ]+
- ([A-Z][A-Z\_]+)
+ ([A-Z][A-Z\_0-9]+)
[\ ]*=[\ ]*
($REGEX_VALUE_FORMAT)/x){
my $var_name = $1;
--
1.8.3.1
Gris Ge
2015-03-05 08:28:45 UTC
Permalink
* Introduced full support of volume_raid_info().
* For sub-pool, use raid info from parent pool.
* For RAID 1 and JBOD, set strip_size, min_io_size, and opt_io_size
as block size(512).
* For other RAID, calculate out opt_io_size with data disk count.
* For RAID_TYPE_MIXED, raise PLUGIN_BUG LsmError.

* Replaced PoolRAID.RAID_TYPE_RAID_XXX with Volume.RAID_TYPE_RAID_XXX.

* Replaced PoolRAID.RAID_TYPE_NOT_APPLICABLE with Volume.RAID_TYPE_OTHER.

* Bumped simulator data version to 3.1 due to previous Volume.RAID_TYPE_OTHER
change.

Changes in V4(No changes in V2 and V3):

* Removed unneeded import 'Volume' in simulator.py.

Changes in V5:

* Sync changes of API changes for return argument name 'disk_count' and
constant name Volume.DISK_COUNT_UNKNOWN.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/sim/simarray.py | 149 +++++++++++++++++++++++++++++++-----------------
plugin/sim/simulator.py | 3 +
2 files changed, 99 insertions(+), 53 deletions(-)

diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index 73f4492..3fa7782 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -67,26 +67,6 @@ def _random_vpd():


class PoolRAID(object):
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_DISK = 1
MEMBER_TYPE_DISK_MIX = 10
@@ -136,37 +116,37 @@ class PoolRAID(object):
return PoolRAID.MEMBER_TYPE_UNKNOWN

_RAID_DISK_CHK = {
- RAID_TYPE_JBOD: lambda x: x > 0,
- RAID_TYPE_RAID0: lambda x: x > 0,
- RAID_TYPE_RAID1: lambda x: x == 2,
- RAID_TYPE_RAID3: lambda x: x >= 3,
- RAID_TYPE_RAID4: lambda x: x >= 3,
- RAID_TYPE_RAID5: lambda x: x >= 3,
- RAID_TYPE_RAID6: lambda x: x >= 4,
- RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
- RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_JBOD: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID0: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID1: lambda x: x == 2,
+ Volume.RAID_TYPE_RAID3: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID4: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID5: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID6: lambda x: x >= 4,
+ Volume.RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
}

_RAID_PARITY_DISK_COUNT_FUNC = {
- RAID_TYPE_JBOD: lambda x: x,
- RAID_TYPE_RAID0: lambda x: x,
- RAID_TYPE_RAID1: lambda x: 1,
- RAID_TYPE_RAID3: lambda x: x - 1,
- RAID_TYPE_RAID4: lambda x: x - 1,
- RAID_TYPE_RAID5: lambda x: x - 1,
- RAID_TYPE_RAID6: lambda x: x - 2,
- RAID_TYPE_RAID10: lambda x: x / 2,
- RAID_TYPE_RAID15: lambda x: x / 2 - 1,
- RAID_TYPE_RAID16: lambda x: x / 2 - 2,
- RAID_TYPE_RAID50: lambda x: x - 2,
- RAID_TYPE_RAID60: lambda x: x - 4,
- RAID_TYPE_RAID51: lambda x: x / 2 - 1,
- RAID_TYPE_RAID61: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_JBOD: lambda x: x,
+ Volume.RAID_TYPE_RAID0: lambda x: x,
+ Volume.RAID_TYPE_RAID1: lambda x: 1,
+ Volume.RAID_TYPE_RAID3: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID4: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID5: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID6: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID10: lambda x: x / 2,
+ Volume.RAID_TYPE_RAID15: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID16: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_RAID50: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID60: lambda x: x - 4,
+ Volume.RAID_TYPE_RAID51: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID61: lambda x: x / 2 - 2,
}

@staticmethod
@@ -191,7 +171,7 @@ class PoolRAID(object):


class BackStore(object):
- VERSION = "3.0"
+ VERSION = "3.1"
VERSION_SIGNATURE = 'LSM_SIMULATOR_DATA_%s_%s' % (VERSION, md5(VERSION))
JOB_DEFAULT_DURATION = 1
JOB_DATA_TYPE_VOL = 1
@@ -201,6 +181,7 @@ class BackStore(object):
SYS_ID = "sim-01"
SYS_NAME = "LSM simulated storage plug-in"
BLK_SIZE = 512
+ STRIP_SIZE = 131072 # 128 KiB

_LIST_SPLITTER = '#'

@@ -724,7 +705,7 @@ class BackStore(object):

pool_1_id = self.sim_pool_create_from_disk(
name='Pool 1',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=pool_1_disks,
element_type=Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
@@ -744,7 +725,7 @@ class BackStore(object):

self.sim_pool_create_from_disk(
name='Pool 3',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=ssd_pool_disks,
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
@@ -755,7 +736,7 @@ class BackStore(object):
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
Pool.ELEMENT_TYPE_DELTA,
- raid_type=PoolRAID.RAID_TYPE_RAID0,
+ raid_type=Volume.RAID_TYPE_RAID0,
sim_disk_ids=test_pool_disks)

self._data_add(
@@ -1009,13 +990,23 @@ class BackStore(object):
'status_info': '',
'element_type': element_type,
'unsupported_actions': unsupported_actions,
- 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': Volume.RAID_TYPE_OTHER,
'member_type': PoolRAID.MEMBER_TYPE_POOL,
'parent_pool_id': parent_pool_id,
'total_space': size,
})
return self.lastrowid

+ def sim_pool_disks_count(self, sim_pool_id):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE owner_pool_id=%s;" %
+ sim_pool_id)[0][0]
+
+ def sim_pool_data_disks_count(self, sim_pool_id=None):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE "
+ "owner_pool_id=%s and role='DATA';" % sim_pool_id)[0][0]
+
def sim_vols(self, sim_ag_id=None):
"""
Return a list of sim_vol dict.
@@ -2231,3 +2222,55 @@ class SimArray(object):
@_handle_errors
def target_ports(self):
return list(SimArray._sim_tgt_2_lsm(t) for t in self.bs_obj.sim_tgts())
+
+ @_handle_errors
+ def volume_raid_info(self, lsm_vol):
+ sim_pool = self.bs_obj.sim_pool_of_id(
+ SimArray._lsm_id_to_sim_id(
+ lsm_vol.pool_id,
+ LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")))
+
+ raid_type = sim_pool['raid_type']
+ strip_size = Volume.STRIP_SIZE_UNKNOWN
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = Volume.OPT_IO_SIZE_UNKNOWN
+ disk_count = Volume.DISK_COUNT_UNKNOWN
+
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
+ parent_sim_pool = self.bs_obj.sim_pool_of_id(
+ sim_pool['parent_pool_id'])
+ raid_type = parent_sim_pool['raid_type']
+
+ disk_count = self.bs_obj.sim_pool_disks_count(
+ parent_sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ parent_sim_pool['id'])
+ else:
+ disk_count = self.bs_obj.sim_pool_disks_count(
+ sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_UNKNOWN or \
+ raid_type == Volume.RAID_TYPE_OTHER:
+ return [
+ raid_type, strip_size, disk_count, min_io_size,
+ opt_io_size]
+
+ if raid_type == Volume.RAID_TYPE_MIXED:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unsupported RAID_TYPE_MIXED pool "
+ "%s" % sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_RAID1 or \
+ raid_type == Volume.RAID_TYPE_JBOD:
+ strip_size = BackStore.BLK_SIZE
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = BackStore.BLK_SIZE
+ else:
+ strip_size = BackStore.STRIP_SIZE
+ min_io_size = BackStore.STRIP_SIZE
+ opt_io_size = int(data_disk_count * BackStore.STRIP_SIZE)
+
+ return [raid_type, strip_size, disk_count, min_io_size, opt_io_size]
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index 8f7adfc..d562cd6 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -289,3 +289,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
return search_property(
[SimPlugin._sim_data_2_lsm(t) for t in sim_tgts],
search_key, search_value)
+
+ def volume_raid_info(self, volume, flags=0):
+ return self.sim_array.volume_raid_info(volume)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:46 UTC
Permalink
* Simply set XXX_UNKNOWN on output parameter.

V2:
- Add call to register volume_raid_info
- Add LSM_CAP_VOLUMERAID_INFO to capabilties

Changes in V3:
* Use lsm_register_plugin_v1_2() to register lsm_volume_raid_info() support.

Changes in V5(no change in V4):

* Sync API changes:
* argument name 'disk_count'
* argument type 'uint32_t'.
* constant name 'LSM_VOLUME_DISK_COUNT_UNKNOWN'.

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/simc/simc_lsmplugin.c | 33 ++++++++++++++++++++++++++++++---
1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index 7c4d287..422a064 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -391,6 +391,7 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_EXPORTS,
LSM_CAP_EXPORT_FS,
LSM_CAP_EXPORT_REMOVE,
+ LSM_CAP_VOLUME_RAID_INFO,
-1
);

@@ -956,6 +957,33 @@ static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
return rc;
}

+static int volume_raid_info(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type,
+ uint32_t *strip_size, uint32_t *disk_count,
+ uint32_t *min_io_size, uint32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
+ struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume));
+
+ if( !av) {
+ rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME,
+ "volume not found!");
+ }
+
+ *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN;
+ *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN;
+ *disk_count = LSM_VOLUME_DISK_COUNT_UNKNOWN;
+ *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN;
+ *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN;
+ return rc;
+}
+
+static struct lsm_ops_v1_2 ops_v1_2 = {
+ volume_raid_info
+};
+
static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume *v,
lsm_flag flags)
{
@@ -1527,7 +1555,6 @@ static struct lsm_san_ops_v1 san_ops = {
list_targets
};

-
static int fs_list(lsm_plugin_ptr c, const char *search_key,
const char *search_value, lsm_fs **fs[], uint32_t *count,
lsm_flag flags)
@@ -2243,8 +2270,8 @@ int load( lsm_plugin_ptr c, const char *uri, const char *password,
_unload(pd);
pd = NULL;
} else {
- rc = lsm_register_plugin_v1( c, pd, &mgm_ops,
- &san_ops, &fs_ops, &nfs_ops);
+ rc = lsm_register_plugin_v1_2(
+ c, pd, &mgm_ops, &san_ops, &fs_ops, &nfs_ops, &ops_v1_2);
}
}
return rc;
--
1.8.3.1
Gris Ge
2015-03-05 08:28:47 UTC
Permalink
* Treating each MegaRAID DG(disk group) as LSM pool.
* Based on storcli output of:
storcli /c0/dall show all J

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 99 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 96 insertions(+), 3 deletions(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index e1e7e8d..5e3802b 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin)
+ Disk, VERSION, search_property, IPlugin, Pool)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -115,6 +115,47 @@ def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict):
disk_show_basic_dict['State'], Disk.STATUS_UNKNOWN)


+def _mega_size_to_lsm(mega_size):
+ """
+ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc.
+ Return int of block bytes
+ """
+ re_regex = re.compile("^([0-9\.]+) ([EPTGMK])B$")
+ re_match = re_regex.match(mega_size)
+ if re_match:
+ return size_human_2_size_bytes(
+ "%s%siB" % (re_match.group(1), re_match.group(2)))
+
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "_mega_size_to_lsm(): Got unexpected LSI size string %s" %
+ mega_size)
+
+
+_POOL_STATUS_MAP = {
+ 'Onln': Pool.STATUS_OK,
+ 'Dgrd': Pool.STATUS_DEGRADED,
+ 'Pdgd': Pool.STATUS_DEGRADED,
+ 'Offln': Pool.STATUS_ERROR,
+ 'Rbld': Pool.STATUS_RECONSTRUCTING,
+ 'Optl': Pool.STATUS_OK,
+ # TODO(Gris Ge): The 'Optl' is undocumented, check with LSI.
+}
+
+
+def _pool_status_of(dg_top):
+ """
+ Return status
+ """
+ if dg_top['State'] in _POOL_STATUS_MAP.keys():
+ return _POOL_STATUS_MAP[dg_top['State']]
+ return Pool.STATUS_UNKNOWN
+
+
+def _pool_id_of(dg_id, sys_id):
+ return "%s:DG%s" % (sys_id, dg_id)
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -217,7 +258,11 @@ class MegaRAID(IPlugin):
ErrorNumber.PLUGIN_BUG,
"MegaRAID storcli failed with error %d: %s" %
(rc_status['Status Code'], rc_status['Description']))
- return ctrl_output[0].get('Response Data')
+ real_data = ctrl_output[0].get('Response Data')
+ if real_data and 'Response Data' in real_data.keys():
+ return real_data['Response Data']
+
+ return real_data
else:
return output

@@ -317,7 +362,55 @@ class MegaRAID(IPlugin):

return search_property(rc_lsm_disks, search_key, search_value)

+ @staticmethod
+ def _dg_free_size(dg_num, free_space_list):
+ """
+ Get information from 'FREE SPACE DETAILS' of /c0/dall show all.
+ """
+ for free_space in free_space_list:
+ if int(free_space['DG']) == int(dg_num):
+ return _mega_size_to_lsm(free_space['Size'])
+
+ return 0
+
+ def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num):
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ pool_id = _pool_id_of(dg_top['DG'], sys_id)
+ name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG'])
+ elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL
+ unsupported_actions = 0
+ # TODO(Gris Ge): contact LSI to get accurate total space and free
+ # space. The size we are using here is not what host
+ # got.
+ total_space = _mega_size_to_lsm(dg_top['Size'])
+ free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list)
+ status = _pool_status_of(dg_top)
+ status_info = ''
+ if status == Pool.STATUS_UNKNOWN:
+ status_info = dg_top['State']
+
+ plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG'])
+
+ return Pool(
+ pool_id, name, elem_type, unsupported_actions,
+ total_space, free_space, status, status_info,
+ sys_id, plugin_data)
+
@_handle_errors
def pools(self, search_key=None, search_value=None,
flags=Client.FLAG_RSVD):
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet")
+ lsm_pools = []
+ for ctrl_num in range(self._ctrl_count()):
+ dg_show_output = self._storcli_exec(
+ ["/c%d/dall" % ctrl_num, "show", "all"])
+ free_space_list = dg_show_output.get('FREE SPACE DETAILS', [])
+ for dg_top in dg_show_output['TOPOLOGY']:
+ if dg_top['Arr'] != '-':
+ continue
+ if dg_top['DG'] == '-':
+ continue
+ lsm_pools.append(
+ self._dg_top_to_lsm_pool(
+ dg_top, free_space_list, ctrl_num))
+
+ return search_property(lsm_pools, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:48 UTC
Permalink
* Treating MegaRAID VD as LSM Volume.
* Using 'storcli /c0/vall show all' to query all volumes.
* Add new capability: Capabilities.VOLUMES

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 46 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 5e3802b..ae2e953 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin, Pool)
+ Disk, VERSION, search_property, IPlugin, Pool, Volume)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -226,6 +226,7 @@ class MegaRAID(IPlugin):
"System not found")
cap = Capabilities()
cap.set(Capabilities.DISKS)
+ cap.set(Capabilities.VOLUMES)
return cap

def _storcli_exec(self, storcli_cmds, flag_json=True):
@@ -414,3 +415,46 @@ class MegaRAID(IPlugin):
dg_top, free_space_list, ctrl_num))

return search_property(lsm_pools, search_key, search_value)
+
+ @staticmethod
+ def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list,
+ vd_prop_info, vd_path):
+
+ vol_id = "%s:VD%d" % (sys_id, vd_id)
+ name = "VD %d" % vd_id
+ vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information.
+ block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz'])
+ num_of_blocks = vd_prop_info['Number of Blocks']
+ admin_state = Volume.ADMIN_STATE_ENABLED
+ if vd_prop_info['Exposed to OS'] != 'Yes' or \
+ vd_basic_info['Access'] != 'RW':
+ admin_state = Volume.ADMIN_STATE_DISABLED
+ pool_id = _pool_id_of(dg_id, sys_id)
+ plugin_data = vd_path
+ return Volume(
+ vol_id, name, vpd83, block_size, num_of_blocks, admin_state,
+ sys_id, pool_id, plugin_data)
+
+ @_handle_errors
+ def volumes(self, search_key=None, search_value=None, flags=0):
+ lsm_vols = []
+ for ctrl_num in range(self._ctrl_count()):
+ vol_show_output = self._storcli_exec(
+ ["/c%d/vall" % ctrl_num, "show", "all"])
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ for key_name in vol_show_output.keys():
+ if key_name.startswith('/c'):
+ vd_basic_info = vol_show_output[key_name][0]
+ (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/')
+ dg_id = int(dg_id)
+ vd_id = int(vd_id)
+ vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id]
+
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ lsm_vols.append(
+ MegaRAID._vd_to_lsm_vol(
+ vd_id, dg_id, sys_id, vd_basic_info,
+ vd_pd_info_list, vd_prop_info, key_name))
+
+ return search_property(lsm_vols, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-03-05 08:28:49 UTC
Permalink
* In storcli of MegaRAID, 'Rbld' of disk status indicate this disk
in using for reconstructing pool data.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index ae2e953..83abf63 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -93,6 +93,7 @@ _DISK_STATE_MAP = {
'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK,
'UGood': Disk.STATUS_STOPPED | Disk.STATUS_OK,
'UBad': Disk.STATUS_STOPPED | Disk.STATUS_ERROR,
+ 'Rbld': Disk.STATUS_RECONSTRUCT,
}
--
1.8.3.1
Gris Ge
2015-03-05 08:28:50 UTC
Permalink
* Use 'storcli /c0/v1 show all' command line output to determine
RAID type, strip size and disk count.

* Calculate optimal I/O size by strip size multiple with RAID
data(not mirrot, not parity) disks count.

* Tested query on RAID 0, 1, 5, 10, 50.

* Tested the optimal I/O size on RAID 5:
[***@storageqe-08 ~]# lsmenv mega lsmcli vri --vol SV03403550:VD1
Device alias: mega
URI: megaraid://
lsmcli vri --vol SV03403550:VD1
Volume ID | RAID Type | Strip Size | Extent Count | Minimum I/O Size | Optimal I/O Size
--------------------------------------------------------------------------------------------
SV03403550:VD1 | RAID5 | 131072 | 5 | 131072 | 524288

Time: 0:00.29
[***@storageqe-08 ~]# dd if=/dev/urandom of=test.img bs=1M count=1000
1000+0 records in
1000+0 records out
1048576000 bytes (1.0 GB) copied, 153.174 s, 6.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 58.9573 s, 17.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 37.7282 s, 27.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 35.3351 s, 29.7 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 70.0779 s, 15.0 MB/s

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 83abf63..e754cd8 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -157,6 +157,33 @@ def _pool_id_of(dg_id, sys_id):
return "%s:DG%s" % (sys_id, dg_id)


+_RAID_TYPE_MAP = {
+ 'RAID0': Volume.RAID_TYPE_RAID0,
+ 'RAID1': Volume.RAID_TYPE_RAID1,
+ 'RAID5': Volume.RAID_TYPE_RAID5,
+ 'RAID6': Volume.RAID_TYPE_RAID6,
+ 'RAID00': Volume.RAID_TYPE_RAID0,
+ # Some MegaRAID only support max 16 disks in each span.
+ # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000.
+ # All of them are considered as RAID0
+ 'RAID10': Volume.RAID_TYPE_RAID10,
+ 'RAID50': Volume.RAID_TYPE_RAID50,
+ 'RAID60': Volume.RAID_TYPE_RAID60,
+}
+
+
+def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info):
+ raid_type = _RAID_TYPE_MAP.get(
+ vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN)
+
+ # In LSI, four disks or more RAID1 is actually a RAID10.
+ if raid_type == Volume.RAID_TYPE_RAID1 and \
+ int(vd_prop_info['Number of Drives Per Span']) >= 4:
+ raid_type = Volume.RAID_TYPE_RAID10
+
+ return raid_type
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -459,3 +486,52 @@ class MegaRAID(IPlugin):
vd_pd_info_list, vd_prop_info, key_name))

return search_property(lsm_vols, search_key, search_value)
+
+ @_handle_errors
+ def volume_raid_info(self, volume, flags=Client.FLAG_RSVD):
+ if not volume.plugin_data:
+ raise LsmError(
+ ErrorNumber.INVALID_ARGUMENT,
+ "Ilegal input volume argument: missing plugin_data property")
+
+ vd_path = volume.plugin_data
+ vol_show_output = self._storcli_exec([vd_path, "show", "all"])
+ vd_basic_info = vol_show_output[vd_path][0]
+ vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info)
+ strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size'])
+ disk_count = (
+ int(vd_prop_info['Number of Drives Per Span']) *
+ int(vd_prop_info['Span Depth']))
+ if raid_type == Volume.RAID_TYPE_RAID0:
+ strip_count = disk_count
+ elif raid_type == Volume.RAID_TYPE_RAID1:
+ strip_count = 1
+ elif raid_type == Volume.RAID_TYPE_RAID5:
+ strip_count = disk_count - 1
+ elif raid_type == Volume.RAID_TYPE_RAID6:
+ strip_count = disk_count - 2
+ elif raid_type == Volume.RAID_TYPE_RAID50:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 1) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID60:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 2) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID10:
+ strip_count = (
+ int(vd_prop_info['Number of Drives Per Span']) / 2 *
+ int(vd_prop_info['Span Depth']))
+ else:
+ # MegaRAID does not support 15 or 16 yet.
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unexpected RAID type: %s" %
+ vd_basic_info['TYPE'])
+
+ return [
+ raid_type, strip_size, disk_count, strip_size,
+ strip_size * strip_count]
--
1.8.3.1
Gris Ge
2015-03-05 08:28:51 UTC
Permalink
* NetApp ONTAP strip size(minimum I/O size) is 4KiB, stripe size(
optimal I/O size) is 64KiB. Both are unchangeable.

* The extent count(disk count) is taken from aggregate 'disk-count'
property.

* Changed Filer.aggregates() to accept an optional argument 'aggr_name'
which query defined aggregate only.

* Uncommented and updated the old code for converting NetApp RAID level to
libstoragemgmt RAID level.

* Tested on ONTAP simulator 8.1.1 7-mode and real ONTAP 8.0.2 7-mode.

Changes in V5(No changes in V2, V3, V4):

* Sync API changes for argument name.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/ontap/na.py | 8 ++++++--
plugin/ontap/ontap.py | 55 +++++++++++++++++++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 15 deletions(-)

diff --git a/plugin/ontap/na.py b/plugin/ontap/na.py
index 1e015ba..b68577c 100644
--- a/plugin/ontap/na.py
+++ b/plugin/ontap/na.py
@@ -231,11 +231,15 @@ class Filer(object):
disks = self._invoke('disk-list-info')
return disks['disk-details']['disk-detail-info']

- def aggregates(self):
+ def aggregates(self, aggr_name=None):
"""
Return a list of aggregates
+ If aggr_name provided, return [na_aggr]
"""
- pools = self._invoke('aggr-list-info')
+ if aggr_name:
+ pools = self._invoke('aggr-list-info', {'aggregate': aggr_name})
+ else:
+ pools = self._invoke('aggr-list-info')
tmp = pools['aggregates']['aggr-info']
return to_list(tmp)

diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index c2a2c58..b6358a8 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -121,6 +121,10 @@ class Ontap(IStorageAreaNetwork, INfs):
'restricted': 'volume is restricted to protocol accesses',
}

+ # strip size: http://www.netapp.com/us/media/tr-3001.pdf
+ _STRIP_SIZE = 4096
+ _OPT_IO_SIZE = 65536
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -310,19 +314,6 @@ class Ontap(IStorageAreaNetwork, INfs):
return search_property(
[self._lun(l) for l in luns], search_key, search_value)

-# @staticmethod
-# def _raid_type_of_na_aggr(na_aggr):
-# na_raid_statuses = na_aggr['raid-status'].split(',')
-# if 'raid0' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID0
-# if 'raid4' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID4
-# if 'raid_dp' in na_raid_statuses:
-# return Pool.RAID_TYPE_RAID6
-# if 'mixed_raid_type' in na_raid_statuses:
-# return Pool.RAID_TYPE_MIXED
-# return Pool.RAID_TYPE_UNKNOWN
-
# This is based on NetApp ONTAP Manual pages:
# https://library.netapp.com/ecmdocs/ECMP1196890/html/man1/na_aggr.1.html
_AGGR_RAID_STATUS_CONV = {
@@ -1290,3 +1281,41 @@ class Ontap(IStorageAreaNetwork, INfs):
self.sys_info.id))

return search_property(tp, search_key, search_value)
+
+ @staticmethod
+ def _raid_type_of_na_aggr(na_aggr):
+ na_raid_statuses = na_aggr['raid-status'].split(',')
+ if 'mixed_raid_type' in na_raid_statuses:
+ return Volume.RAID_TYPE_MIXED
+ elif 'raid0' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID0
+ elif 'raid4' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID4
+ elif 'raid_dp' in na_raid_statuses:
+ return Volume.RAID_TYPE_RAID6
+ return Pool.RAID_TYPE_UNKNOWN
+
+ @handle_ontap_errors
+ def volume_raid_info(self, volume, flags=0):
+ na_vol_name = Ontap._get_volume_from_path(volume.pool_id)
+ na_vol = self.f.volumes(volume_name=na_vol_name)
+ if len(na_vol) == 0:
+ # If parent pool not found, then this LSM volume should not exist.
+ raise LsmError(
+ ErrorNumber.NOT_FOUND_VOLUME,
+ "Volume not found")
+ if len(na_vol) != 1:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got 2+ na_vols from self.f.volumes() "
+ "%s" % na_vol)
+
+ na_vol = na_vol[0]
+ na_aggr_name = na_vol['containing-aggregate']
+ na_aggr = self.f.aggregates(aggr_name=na_aggr_name)[0]
+ raid_type = Ontap._raid_type_of_na_aggr(na_aggr)
+ disk_count = int(na_aggr['disk-count'])
+
+ return [
+ raid_type, Ontap._STRIP_SIZE, disk_count, Ontap._STRIP_SIZE,
+ Ontap._OPT_IO_SIZE]
--
1.8.3.1
Tony Asleson
2015-03-05 20:43:24 UTC
Permalink
Patch set committed, great work!

Thanks,
Tony
Post by Gris Ge
* New method volume_raid_info() to query RAID type, disk count,
minimum I/O size, optimal I/O size.
* sim
# Simple return UNKNOWN
* simc
# Simple set UNKNOWN on output parameter.
* MegaRAID
* The C library part might be buggy considering my C skill set.
We could use PE size of LVM for minimum I/O size and strip size.
And set RAID type as JBOD and extent count as 1.
Once LVM RAID supported, it could provide real RAID type and other
information.
In SMI-S spec, each StorageVolume has StorageSetting associated,
but no definition mentioned ExtentStripeLength is the optimal I/O
size. In stead of guess or mess with SNIA, simply 'no support' would
works better.
Patch for ONTAP plugin is ready but not included in this patch set
since that was based on my test and guess.
Waiting NetApp's official answer about their optimal I/O size.
No document found about strip settings.
* This is the best design and naming scheme I got.
PLEASE let me know if you got better.
Thank you very much in advance.
Tony introduced a new way for plugin to register newly added API with
full backward compatibility. Simulator C plugin implemented this change.
Add missing capability LSM_CAP_VOLUME_RAID_INFO
Another approach to register newly added API with full backward
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.
* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.
* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().
Full volume_raid_info() support in simulator plugin.
[PATCH] lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops
* Included ONTAP plugin support.
* Changed the value of these constants from -1 to 0 to align with
* Volume.STRIP_SIZE_UNKNOWN
* Volume.MIN_IO_SIZE_UNKNOWN
* Volume.OPT_IO_SIZE_UNKNOWN
* Volume.EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_STRIP_SIZE_UNKNOWN
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN
* LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
* LSM_VOLUME_OPT_IO_SIZE_UNKNOWN
* Add LSM_FLAG_UNUSED_CHECK in public lsm_volume_raid_info() function.
* Removed unneeded import 'Volume' from simulator.py.
* Change output argument name from 'extent_count' to 'disk_count' as
it fit the most common expectation on RAID. Please check patch 3/15
for detail.
* 'Volume.EXTENT_COUNT_UNKNOWN' -> 'Volume.DISK_COUNT_UNKNOWN'
* LSM_VOLUME_EXTENT_COUNT_UNKNOWN -> LSM_VOLUME_DISK_COUNT_UNKNOWN
* strip_size
* disk_count
* min_io_size
* opt_io_size
* Sync plugins for this change.
Python Library: Fix decorator problem with docstrings
Python Library: New method volume_raid_info()
C Library: New method lsm_volume_raid_info()
lsmcli: Add volume_raid_info() support.
lsmcli Test: Add test for volume-raid-info command.
C Unit Test: Add test for lsm_volume_raid_info() method
Constant Test: Fix missing constant with number in it.
Simulator Plugin: Add volume_raid_info() support
Simulator C Plugin: Add lsm_volume_raid_info() support.
MegaRAID plugin: Add pools() method support.
MegaRAID Plugin: Add volumes() support.
MegaRAID Plugin: Add Disk.STATUS_RECONSTRUCT support.
MegaRAID Plugin: Add volume_raid_info() support.
ONTAP Plugin: Add volume_raid_info() support.
lsm_plugin_ipc.cpp: Bug Fix, using san_ops for fs_ops
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 ++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 +
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 ++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 ++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 48 +++++
c_binding/lsm_plugin_ipc.cpp | 86 ++++++--
plugin/megaraid/megaraid.py | 220 ++++++++++++++++++++-
plugin/ontap/na.py | 8 +-
plugin/ontap/ontap.py | 55 ++++--
plugin/sim/simarray.py | 149 +++++++++-----
plugin/sim/simulator.py | 3 +
plugin/simc/simc_lsmplugin.c | 33 +++-
python_binding/lsm/_client.py | 103 ++++++++++
python_binding/lsm/_common.py | 1 +
python_binding/lsm/_data.py | 42 ++++
test/cmdtest.py | 21 ++
test/tester.c | 30 +++
tools/lsmcli/cmdline.py | 18 +-
tools/lsmcli/data_display.py | 58 ++++++
tools/utility/check_const.pl | 6 +-
21 files changed, 912 insertions(+), 91 deletions(-)
Gris Ge
2015-02-26 12:35:37 UTC
Permalink
* Treating MegaRAID VD as LSM Volume.
* Using 'storcli /c0/vall show all' to query all volumes.
* Add new capability: Capabilities.VOLUMES

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 46 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 5e3802b..ae2e953 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -23,7 +23,7 @@ import errno

from lsm import (uri_parse, search_property, size_human_2_size_bytes,
Capabilities, LsmError, ErrorNumber, System, Client,
- Disk, VERSION, search_property, IPlugin, Pool)
+ Disk, VERSION, search_property, IPlugin, Pool, Volume)

from lsm.plugin.megaraid.utils import cmd_exec, ExecError

@@ -226,6 +226,7 @@ class MegaRAID(IPlugin):
"System not found")
cap = Capabilities()
cap.set(Capabilities.DISKS)
+ cap.set(Capabilities.VOLUMES)
return cap

def _storcli_exec(self, storcli_cmds, flag_json=True):
@@ -414,3 +415,46 @@ class MegaRAID(IPlugin):
dg_top, free_space_list, ctrl_num))

return search_property(lsm_pools, search_key, search_value)
+
+ @staticmethod
+ def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list,
+ vd_prop_info, vd_path):
+
+ vol_id = "%s:VD%d" % (sys_id, vd_id)
+ name = "VD %d" % vd_id
+ vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information.
+ block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz'])
+ num_of_blocks = vd_prop_info['Number of Blocks']
+ admin_state = Volume.ADMIN_STATE_ENABLED
+ if vd_prop_info['Exposed to OS'] != 'Yes' or \
+ vd_basic_info['Access'] != 'RW':
+ admin_state = Volume.ADMIN_STATE_DISABLED
+ pool_id = _pool_id_of(dg_id, sys_id)
+ plugin_data = vd_path
+ return Volume(
+ vol_id, name, vpd83, block_size, num_of_blocks, admin_state,
+ sys_id, pool_id, plugin_data)
+
+ @_handle_errors
+ def volumes(self, search_key=None, search_value=None, flags=0):
+ lsm_vols = []
+ for ctrl_num in range(self._ctrl_count()):
+ vol_show_output = self._storcli_exec(
+ ["/c%d/vall" % ctrl_num, "show", "all"])
+ sys_id = self._sys_id_of_ctrl_num(ctrl_num)
+ for key_name in vol_show_output.keys():
+ if key_name.startswith('/c'):
+ vd_basic_info = vol_show_output[key_name][0]
+ (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/')
+ dg_id = int(dg_id)
+ vd_id = int(vd_id)
+ vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id]
+
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ lsm_vols.append(
+ MegaRAID._vd_to_lsm_vol(
+ vd_id, dg_id, sys_id, vd_basic_info,
+ vd_pd_info_list, vd_prop_info, key_name))
+
+ return search_property(lsm_vols, search_key, search_value)
--
1.8.3.1
Gris Ge
2015-02-26 12:35:38 UTC
Permalink
* In storcli of MegaRAID, 'Rbld' of disk status indicate this disk
in using for reconstructing pool data.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index ae2e953..83abf63 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -93,6 +93,7 @@ _DISK_STATE_MAP = {
'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK,
'UGood': Disk.STATUS_STOPPED | Disk.STATUS_OK,
'UBad': Disk.STATUS_STOPPED | Disk.STATUS_ERROR,
+ 'Rbld': Disk.STATUS_RECONSTRUCT,
}
--
1.8.3.1
Gris Ge
2015-02-26 12:35:39 UTC
Permalink
* With decorator, the docstring of original method will be deleted.
* Use functools.wraps() to keep the docstring of original method.
Check
http://stackoverflow.com/questions/1782843/python-decorator-problem-with-docstrings
for detail.

* With this fix user can check method help message in interactive python with
command:
help(lsm.Client.volume_create)

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_common.py | 1 +
1 file changed, 1 insertion(+)

diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index f2fd568..4c87661 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -533,6 +533,7 @@ def return_requires(*types):
is quite important.
"""
def outer(func):
+ @functools.wraps(func)
def inner(*args, **kwargs):
r = func(*args, **kwargs)
--
1.8.3.1
Gris Ge
2015-02-26 12:35:40 UTC
Permalink
* The docstring of lsm.Client.volume_raid_info() contains full detail
about this new method. Quick info:
Usage:
volume_raid_info(self, volume, flags=0)
Returns:
[raid_type, strip_size, extent_count, min_io_size, opt_io_size]
# strip_size is the size of strip on each disk/extent
# extent_count is the disk/extent count.
# min_io_size is minimum I/O size. Also the preferred I/O size
# of random I/O.
# opt_io_size is optimal I/O size. Also the preferred I/O size
# of sequential I/O.

* Why not use 'pool_raid_info' instead?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not implementing RAID
at pool level but at volume level.

* Why use 'extent_count' instead of 'disk_count'?
Some RAID systems(EMC VMAX/DMX and LVM RAID) are not using disk
directly to assemble RAID group.

* Why we need 'min_io_size' and 'opt_io_size' when we have 'extent_count'
and 'strip_size'?
Normally, min_io_size is strip_size, opt_io_size could be calculated by
raid_type, strip_size and extent_count. But on NetApp, I/O test[1]
indicate their optimal I/O size is 64KiB no matter how many disks in
the RAID group. It might[2] because NetApp created a WAFL filesystem on
RAID group which changed the optimal I/O size.

In general, the optimal I/O size or min_io_size of some RAID system
might not base on strip size and RAID disk/extent count.
We'd better expose those information directly instead forcing user
to guess from strip size and disk/extent count.

* New constants:
Volume.RAID_TYPE_UNKNOWN
# The plugin failed to detect the volume's RAID type.
Volume.RAID_TYPE_RAID0
# Stripe
Volume.RAID_TYPE_RAID1
# Mirror for two disks. For 4 disks or more, they are RAID10.
Volume.RAID_TYPE_RAID3
# Byte-level striping with dedicated parity
Volume.RAID_TYPE_RAID4
# Block-level striping with dedicated parity
Volume.RAID_TYPE_RAID5
# Block-level striping with distributed parity
Volume.RAID_TYPE_RAID6
# Block-level striping with two distributed parities, aka, RAID-DP
Volume.RAID_TYPE_RAID10
# Stripe of mirrors
Volume.RAID_TYPE_RAID15
# Parity of mirrors
Volume.RAID_TYPE_RAID16
# Dual parity of mirrors
Volume.RAID_TYPE_RAID50
# Stripe of parities
Volume.RAID_TYPE_RAID60
# Stripe of dual parities
Volume.RAID_TYPE_RAID51
# Mirror of parities
Volume.RAID_TYPE_RAID61
# Mirror of dual parities
Volume.RAID_TYPE_JBOD
# Just bunch of disks, no parity, no striping.
Volume.RAID_TYPE_MIXED
# This volume contains multiple RAID settings.
Volume.RAID_TYPE_OTHER
# Vendor specific RAID type

Volume.STRIP_SIZE_UNKNOWN
Volume.EXTENT_COUNT_UNKNOWN
Volume.MIN_IO_SIZE_UNKNOWN
Volume.OPT_IO_SIZE_UNKNOWN

* New Capability:
lsm.Volume.VOLUME_RAID_INFO

[1] On a 24 disks RAID6(RAID-DP), 4KiB strip size(not changeable):
* With I/O size 90112(4096 * 22), write speed is 73.4 MB/s
* With I/O size 65536, write speed is 86.9 MB/s
# the optimal_io_size exposed via sysfs from SCSI BLOCK LIMITS(0xB0) VPD

[2] No NetApp official document confirm or deny it. Waiting NetApp's reply.

Changes in V2:
* Add 'New in 1.2' docstring.

Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_client.py | 94 +++++++++++++++++++++++++++++++++++++++++++
python_binding/lsm/_data.py | 42 +++++++++++++++++++
2 files changed, 136 insertions(+)

diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index e637962..0fdedd3 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -971,3 +971,97 @@ class Client(INetworkAttachedStorage):
"""
_check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('target_ports', _del_self(locals()))
+
+ ## Returns the RAID information of certain volume
+ # @param self The this pointer
+ # @param raid_type The RAID type of this volume
+ # @param strip_size The size of strip of disk or other storage
+ # extent.
+ # @param extent_count The count of disks or other storage extent
+ # in this RAID group.
+ # @param min_io_size The preferred I/O size of random I/O.
+ # @param opt_io_size The preferred I/O size of sequential I/O.
+ # @returns List of target ports, else raises LsmError
+ @_return_requires([int, int, int, int, int])
+ def volume_raid_info(self, volume, flags=FLAG_RSVD):
+ """Query the RAID information of certain volume.
+
+ New in version 1.2.
+
+ Query the RAID type, strip size, extents count, minimum I/O size,
+ optimal I/O size of given volume.
+ This method requires this capability:
+ lsm.Capabilities.VOLUME_RAID_INFO
+
+ Args:
+ volume (Volume object): Volume to query
+ flags (int): Reserved for future use. Should be set as
+ lsm.Client.FLAG_RSVD
+ Returns:
+ [raid_type, strip_size, extent_count, min_io_size, opt_io_size]
+
+ raid_type (int): RAID Type of requested volume.
+ Could be one of these values:
+ Volume.RAID_TYPE_RAID0
+ Stripe
+ Volume.RAID_TYPE_RAID1
+ Two disks Mirror
+ Volume.RAID_TYPE_RAID3
+ Byte-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID4
+ Block-level striping with dedicated parity
+ Volume.RAID_TYPE_RAID5
+ Block-level striping with distributed parity
+ Volume.RAID_TYPE_RAID6
+ Block-level striping with two distributed parities,
+ aka, RAID-DP
+ Volume.RAID_TYPE_RAID10
+ Stripe of mirrors
+ Volume.RAID_TYPE_RAID15
+ Parity of mirrors
+ Volume.RAID_TYPE_RAID16
+ Dual parity of mirrors
+ Volume.RAID_TYPE_RAID50
+ Stripe of parities
+ Volume.RAID_TYPE_RAID60
+ Stripe of dual parities
+ Volume.RAID_TYPE_RAID51
+ Mirror of parities
+ Volume.RAID_TYPE_RAID61
+ Mirror of dual parities
+ Volume.RAID_TYPE_JBOD
+ Just bunch of disks, no parity, no striping.
+ Volume.RAID_TYPE_UNKNOWN
+ The plugin failed to detect the volume's RAID type.
+ Volume.RAID_TYPE_MIXED
+ This volume contains multiple RAID settings.
+ Volume.RAID_TYPE_OTHER
+ Vendor specific RAID type
+ strip_size(int): The size of strip on each disk or other storage
+ extent.
+ For RAID1/JBOD, it should be set as sector size.
+ If plugin failed to detect strip size, it should be set
+ as Volume.STRIP_SIZE_UNKNOWN(-1).
+ extent_count(int): The count of disks or other storage extents
+ assembled in the RAID group.
+ If plugin failed to detect extent_count, it should be set
+ as Volume.EXTENT_COUNT_UNKNOWN(-1).
+ min_io_size(int): The minimum I/O size, device preferred I/O
+ size for random I/O. Any I/O size not equal to a multiple
+ of this value may get significant speed penalty.
+ Normally it refers to strip size of each disk(extent).
+ If plugin failed to detect min_io_size, it should try these
+ values in the sequence of:
+ logical sector size -> physical sector size ->
+ Volume.MIN_IO_SIZE_UNKNOWN(-1).
+ opt_io_size(int): The optimal I/O size, device preferred I/O
+ size for sequential I/O. Normally it refers to RAID group
+ stripe size.
+ If plugin failed to detect opt_io_size, it should be set
+ to Volume.OPT_IO_SIZE_UNKNOWN
+ Raises:
+ LsmError:
+ ErrorNumber.NO_SUPPORT
+ No support.
+ """
+ return self._tp.rpc('volume_raid_info', _del_self(locals()))
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 067c766..8606f61 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -258,6 +258,46 @@ class Volume(IData):
ADMIN_STATE_DISABLED = 0
ADMIN_STATE_ENABLED = 1

+ RAID_TYPE_UNKNOWN = -1
+ # The plugin failed to detect the volume's RAID type.
+ RAID_TYPE_RAID0 = 0
+ # Stripe
+ RAID_TYPE_RAID1 = 1
+ # Mirror for two disks. For 4 disks or more, they are RAID10.
+ RAID_TYPE_RAID3 = 3
+ # Byte-level striping with dedicated parity
+ RAID_TYPE_RAID4 = 4
+ # Block-level striping with dedicated parity
+ RAID_TYPE_RAID5 = 5
+ # Block-level striping with distributed parity
+ RAID_TYPE_RAID6 = 6
+ # Block-level striping with two distributed parities, aka, RAID-DP
+ RAID_TYPE_RAID10 = 10
+ # Stripe of mirrors
+ RAID_TYPE_RAID15 = 15
+ # Parity of mirrors
+ RAID_TYPE_RAID16 = 16
+ # Dual parity of mirrors
+ RAID_TYPE_RAID50 = 50
+ # Stripe of parities
+ RAID_TYPE_RAID60 = 60
+ # Stripe of dual parities
+ RAID_TYPE_RAID51 = 51
+ # Mirror of parities
+ RAID_TYPE_RAID61 = 61
+ # Mirror of dual parities
+ RAID_TYPE_JBOD = 20
+ # Just bunch of disks, no parity, no striping.
+ RAID_TYPE_MIXED = 21
+ # This volume contains multiple RAID settings.
+ RAID_TYPE_OTHER = 22
+ # Vendor specific RAID type
+
+ STRIP_SIZE_UNKNOWN = -1
+ EXTENT_COUNT_UNKNOWN = -1
+ MIN_IO_SIZE_UNKNOWN = -1
+ OPT_IO_SIZE_UNKNOWN = -1
+
def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks,
_admin_state, _system_id, _pool_id, _plugin_data=None):
self._id = _id # Identifier
@@ -669,6 +709,8 @@ class Capabilities(IData):

VOLUME_ISCSI_CHAP_AUTHENTICATION = 53

+ VOLUME_RAID_INFO = 54
+
VOLUME_THIN = 55

#File system
--
1.8.3.1
Gris Ge
2015-02-26 12:35:41 UTC
Permalink
* Please check python API document for detail about lsm_volume_raid_info()
method. Quick info:

Retrieves the pool id that the volume is derived from.
@param[in] c Valid connection
@param[in] v Volume ptr.
@param[out] raid_type Enum of lsm_volume_raid_type
@param[out] strip_size Size of the strip on disk or other storage extent.
@param[out] extent_count Count of disks or other storage extents in this
RAID group.
@param[out] min_io_size Minimum I/O size, also the preferred I/O size
of random I/O.
@param[out] opt_io_size Optimal I/O size, also the preferred I/O size
of sequential I/O.
@param[in] flags Reserved, set to 0
@return LSM_ERR_OK on success else error reason.

* New plugin interface: lsm_plug_volume_raid_info

* New enum type: lsm_volume_raid_type

* New capability:
LSM_CAP_VOLUME_RAID_INFO

* New constants:
LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
/**^ Unknown */
LSM_VOLUME_RAID_TYPE_RAID0 = 0,
/**^ Stripe */
LSM_VOLUME_RAID_TYPE_RAID1 = 1,
/**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
LSM_VOLUME_RAID_TYPE_RAID3 = 3,
/**^ Byte-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID4 = 4,
/**^ Block-level striping with dedicated parity */
/**^ Block-level striping with dedicated parity */
LSM_VOLUME_RAID_TYPE_RAID5 = 5,
/**^ Block-level striping with distributed parity */
LSM_VOLUME_RAID_TYPE_RAID6 = 6,
/**^ Block-level striping with two distributed parities, aka, RAID-DP */
LSM_VOLUME_RAID_TYPE_RAID10 = 10,
/**^ Stripe of mirrors */
LSM_VOLUME_RAID_TYPE_RAID15 = 15,
/**^ Parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID16 = 16,
/**^ Dual parity of mirrors */
LSM_VOLUME_RAID_TYPE_RAID50 = 50,
/**^ Stripe of parities */
LSM_VOLUME_RAID_TYPE_RAID60 = 60,
/**^ Stripe of dual parities */
LSM_VOLUME_RAID_TYPE_RAID51 = 51,
/**^ Mirror of parities */
LSM_VOLUME_RAID_TYPE_RAID61 = 61,
/**^ Mirror of dual parities */
LSM_VOLUME_RAID_TYPE_JBOD = 20,
/**^ Just bunch of disks, no parity, no striping. */
LSM_VOLUME_RAID_TYPE_MIXED = 21,
/**^ This volume contains multiple RAID settings. */
LSM_VOLUME_RAID_TYPE_OTHER = 22,
/**^ Vendor specific RAID type */

LSM_VOLUME_STRIP_SIZE_UNKNOWN
LSM_VOLUME_EXTENT_COUNT_UNKNOWN
LSM_VOLUME_MIN_IO_SIZE_UNKNOWN
LSM_VOLUME_OPT_IO_SIZE_UNKNOWN

V2: Change call back registration

Changes in V3:

* New implementation for adding new methods:
* New struct lsm_ops_v1_2:
Free to change during version 1.2 development phase.
Will frozen it once 1.2 released.

* New plugin register method: lsm_register_plugin_v1_2()
It takes all arguments required by old lsm_register_plugin_v1()
addition to struct lsm_ops_v1_2 pointer.

* Once version 1.2 released, we could work on struct lsm_ops_v1_3 and
lsm_register_plugin_v1_3().

* Add 'New in version 1.2' comment of lsm_volume_raid_info() function.

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 20 +++++++
.../libstoragemgmt/libstoragemgmt_capabilities.h | 3 ++
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 55 +++++++++++++++++++
.../include/libstoragemgmt/libstoragemgmt_types.h | 43 +++++++++++++++
c_binding/lsm_datatypes.hpp | 1 +
c_binding/lsm_mgmt.cpp | 44 +++++++++++++++
c_binding/lsm_plugin_ipc.cpp | 62 +++++++++++++++++++++-
7 files changed, 227 insertions(+), 1 deletion(-)

diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 879f184..b7e7e5b 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -844,6 +844,26 @@ extern "C" {
uint32_t *count,
lsm_flag flags);

+/**
+ * Retrieves the pool id that the volume is derived from. New in version 1.2.
+ * @param[in] c Valid connection
+ * @param[in] v Volume ptr.
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on disk or other storage extent.
+ * @param[out] extent_count Count of disks or other storage extents in this
+ * RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved, set to 0
+ * @return LSM_ERR_OK on success else error reason.
+ */
+int LSM_DLL_EXPORT lsm_volume_raid_info(
+ lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size, lsm_flag flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index 7d6182c..18490f3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -77,6 +77,9 @@ typedef enum {

LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**< If you can configure iSCSI chap authentication */

+ LSM_CAP_VOLUME_RAID_INFO = 54,
+ /** ^ If you can query RAID information from volume */
+
LSM_CAP_VOLUME_THIN = 55, /**< Thin provisioned volumes are supported */

LSM_CAP_FS = 100, /**< List file systems */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index e7874f7..11c6653 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -745,6 +745,8 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
lsm_flag flags);
/** \struct lsm_san_ops_v1
* \brief Block array oriented functions (callback functions)
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
@@ -774,6 +776,8 @@ struct lsm_san_ops_v1 {

/** \struct lsm_fs_ops_v1
* \brief File system oriented functionality
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_fs_ops_v1 {
lsm_plug_fs_list fs_list; /**< list file systems */
@@ -792,6 +796,8 @@ struct lsm_fs_ops_v1 {

/** \struct lsm_nas_ops_v1
* \brief NAS system oriented functionality call back functions
+ * NOTE: This structure cannot change as we need to maintain backwards
+ * compatibility
*/
struct lsm_nas_ops_v1 {
lsm_plug_nfs_auth_types nfs_auth_types; /**< List nfs authentication types */
@@ -801,6 +807,37 @@ struct lsm_nas_ops_v1 {
};

/**
+ * Query the RAID information of a volume
+ * @param[in] c Valid lsm plug-in pointer
+ * @param[in] volume Volume to be deleted
+ * @param[out] raid_type Enum of lsm_volume_raid_type
+ * @param[out] strip_size Size of the strip on each disk or other
+ * storage extent.
+ * @param[out] extent_count Count of of disks of other storage extents in
+ * this RAID group.
+ * @param[out] min_io_size Minimum I/O size, also the preferred I/O size
+ * of random I/O.
+ * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size
+ * of sequential I/O.
+ * @param[in] flags Reserved
+ * @return LSM_ERR_OK, else error reason
+ */
+typedef int (*lsm_plug_volume_raid_info)(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type, int32_t *strip_size,
+ int32_t *extent_count, int32_t *min_io_size,
+ int32_t *opt_io_size, lsm_flag flags);
+
+/** \struct lsm_ops_v1_2
+ * \brief Functions added in version 1.2
+ * NOTE: This structure will change during the developement util version 1.2
+ * released.
+ */
+struct lsm_ops_v1_2 {
+ lsm_plug_volume_raid_info vol_raid_info;
+ /**^ Query volume RAID information*/
+};
+
+/**
* Copies the memory pointed to by item with given type t.
* @param t Type of item to copy
* @param item Pointer to src
@@ -839,6 +876,24 @@ int LSM_DLL_EXPORT lsm_register_plugin_v1( lsm_plugin_ptr plug,
struct lsm_nas_ops_v1 *nas_ops );

/**
+ * Used to register version 1.2 APIs plug-in operation.
+ * @param plug Pointer provided by the framework
+ * @param private_data Private data to be used for whatever the plug-in
+ * needs
+ * @param mgm_ops Function pointers for struct lsm_mgmt_ops_v1
+ * @param san_ops Function pointers for struct lsm_san_ops_v1
+ * @param fs_ops Function pointers for struct lsm_fs_ops_v1
+ * @param nas_ops Function pointers for struct lsm_nas_ops_v1
+ * @param ops_v1_2 Function pointers for struct lsm_ops_v1_2
+ * @return LSM_ERR_OK on success, else error reason.
+ */
+int LSM_DLL_EXPORT lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug,
+ void * private_data, struct lsm_mgmt_ops_v1 *mgm_ops,
+ struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops,
+ struct lsm_nas_ops_v1 *nas_ops, struct lsm_ops_v1_2 *ops_v1_2);
+
+/**
* Used to retrieve private data for plug-in operation.
* @param plug Opaque plug-in pointer.
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 309a5e8..5465dad 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -131,6 +131,49 @@ typedef enum {
LSM_VOLUME_PROVISION_DEFAULT = 3 /**< Default provisioning */
} lsm_volume_provision_type;

+/**< \enum lsm_volume_raid_type Different types of RAID */
+typedef enum {
+ LSM_VOLUME_RAID_TYPE_UNKNOWN = -1,
+ /**^ Unknown */
+ LSM_VOLUME_RAID_TYPE_RAID0 = 0,
+ /**^ Stripe */
+ LSM_VOLUME_RAID_TYPE_RAID1 = 1,
+ /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/
+ LSM_VOLUME_RAID_TYPE_RAID3 = 3,
+ /**^ Byte-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID4 = 4,
+ /**^ Block-level striping with dedicated parity */
+ LSM_VOLUME_RAID_TYPE_RAID5 = 5,
+ /**^ Block-level striping with distributed parity */
+ LSM_VOLUME_RAID_TYPE_RAID6 = 6,
+ /**^ Block-level striping with two distributed parities, aka, RAID-DP */
+ LSM_VOLUME_RAID_TYPE_RAID10 = 10,
+ /**^ Stripe of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID15 = 15,
+ /**^ Parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID16 = 16,
+ /**^ Dual parity of mirrors */
+ LSM_VOLUME_RAID_TYPE_RAID50 = 50,
+ /**^ Stripe of parities */
+ LSM_VOLUME_RAID_TYPE_RAID60 = 60,
+ /**^ Stripe of dual parities */
+ LSM_VOLUME_RAID_TYPE_RAID51 = 51,
+ /**^ Mirror of parities */
+ LSM_VOLUME_RAID_TYPE_RAID61 = 61,
+ /**^ Mirror of dual parities */
+ LSM_VOLUME_RAID_TYPE_JBOD = 20,
+ /**^ Just bunch of disks, no parity, no striping. */
+ LSM_VOLUME_RAID_TYPE_MIXED = 21,
+ /**^ This volume contains multiple RAID settings. */
+ LSM_VOLUME_RAID_TYPE_OTHER = 22,
+ /**^ Vendor specific RAID type */
+} lsm_volume_raid_type;
+
+#define LSM_VOLUME_STRIP_SIZE_UNKNOWN -1
+#define LSM_VOLUME_EXTENT_COUNT_UNKNOWN -1
+#define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN -1
+#define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN -1
+
/**
* Admin state for volume, enabled or disabled
*/
diff --git a/c_binding/lsm_datatypes.hpp b/c_binding/lsm_datatypes.hpp
index aed6891..6a6271f 100644
--- a/c_binding/lsm_datatypes.hpp
+++ b/c_binding/lsm_datatypes.hpp
@@ -193,6 +193,7 @@ struct LSM_DLL_LOCAL _lsm_plugin {
struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */
struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */
struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */
+ struct lsm_ops_v1_2 *ops_v1_2; /**< Callbacks for v1.2 ops */
};


diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 37faed4..b8734db 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -1171,6 +1171,50 @@ int lsm_volume_delete(lsm_connect *c, lsm_volume *volume, char **job,

}

+int lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume,
+ lsm_volume_raid_type * raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ CONN_SETUP(c);
+
+ if( !LSM_IS_VOL(volume) ) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ if( !raid_type || !strip_size || !extent_count || !min_io_size ||
+ !opt_io_size) {
+ return LSM_ERR_INVALID_ARGUMENT;
+ }
+
+ try {
+ std::map<std::string, Value> p;
+ p["volume"] = volume_to_value(volume);
+ p["flags"] = Value(flags);
+
+ Value parameters(p);
+ Value response;
+
+ rc = rpc(c, "volume_raid_info", parameters, response);
+ if( LSM_ERR_OK == rc ) {
+ //We get a value back, either null or job id.
+ std::vector<Value> j = response.asArray();
+ *raid_type = (lsm_volume_raid_type) j[0].asInt32_t();
+ *strip_size = j[1].asInt32_t();
+ *extent_count = j[2].asInt32_t();
+ *min_io_size = j[3].asInt32_t();
+ *opt_io_size = j[4].asInt32_t();
+ }
+ } catch( const ValueException &ve ) {
+ rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
+ ve.what());
+ }
+ return rc;
+
+}
+
int lsm_iscsi_chap_auth(lsm_connect *c, const char *init_id,
const char *username, const char *password,
const char *out_user, const char *out_password,
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index 7e0d034..0c27269 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -123,6 +123,21 @@ int lsm_register_plugin_v1(lsm_plugin_ptr plug,
return rc;
}

+int lsm_register_plugin_v1_2(
+ lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op,
+ struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op,
+ struct lsm_nas_ops_v1 *nas_op, struct lsm_ops_v1_2 *ops_v1_2)
+{
+ int rc = lsm_register_plugin_v1(
+ plug, private_data, mgm_op, san_op, fs_op, nas_op);
+
+ if (rc != LSM_ERR_OK){
+ return rc;
+ }
+ plug->ops_v1_2 = ops_v1_2;
+ return rc;
+}
+
void *lsm_private_data_get(lsm_plugin_ptr plug)
{
if (!LSM_IS_PLUGIN(plug)) {
@@ -956,6 +971,50 @@ static int handle_volume_disable(lsm_plugin_ptr p, Value &params, Value &respons
return handle_vol_enable_disable(p, params, response, 0);
}

+static int handle_volume_raid_info(lsm_plugin_ptr p, Value &params,
+ Value &response)
+{
+ int rc = LSM_ERR_NO_SUPPORT;
+ if( p && p->ops_v1_2 && p->ops_v1_2->vol_raid_info) {
+ Value v_vol = params["volume"];
+
+ if(IS_CLASS_VOLUME(v_vol) &&
+ LSM_FLAG_EXPECTED_TYPE(params) ) {
+ lsm_volume *vol = value_to_volume(v_vol);
+ std::vector<Value> result;
+
+ if( vol ) {
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size;
+ int32_t extent_count;
+ int32_t min_io_size;
+ int32_t opt_io_size;
+
+ rc = p->ops_v1_2->vol_raid_info(
+ p, vol, &raid_type, &strip_size, &extent_count,
+ &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params));
+
+ if( LSM_ERR_OK == rc ) {
+ result.push_back(Value((int32_t)raid_type));
+ result.push_back(Value(strip_size));
+ result.push_back(Value(extent_count));
+ result.push_back(Value(min_io_size));
+ result.push_back(Value(opt_io_size));
+ response = Value(result);
+ }
+
+ lsm_volume_record_free(vol);
+ } else {
+ rc = LSM_ERR_NO_MEMORY;
+ }
+
+ } else {
+ rc = LSM_ERR_TRANSPORT_INVALID_ARG;
+ }
+ }
+ return rc;
+}
+
static int ag_list(lsm_plugin_ptr p, Value &params, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2153,7 +2212,8 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("volume_replicate_range", handle_volume_replicate_range)
("volume_resize", handle_volume_resize)
("volumes_accessible_by_access_group", vol_accessible_by_ag)
- ("volumes", handle_volumes);
+ ("volumes", handle_volumes)
+ ("volume_raid_info", handle_volume_raid_info);

static int process_request(lsm_plugin_ptr p, const std::string &method, Value &request,
Value &response)
--
1.8.3.1
Gris Ge
2015-02-26 12:35:42 UTC
Permalink
* Allowing check_const.pl to check constants with number in it.
Example:
LSM_VOLUME_RAID_TYPE_RAID1

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/utility/check_const.pl | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/utility/check_const.pl b/tools/utility/check_const.pl
index 9e5a700..e41c1e9 100644
--- a/tools/utility/check_const.pl
+++ b/tools/utility/check_const.pl
@@ -101,7 +101,7 @@ my $REGEX_C_CONST_FORMAT = qr/
(?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT)
)
(?<CNAME_PAT>
- [A-Z][A-Z_]+
+ [A-Z][A-Z_0-9]+
)
(?<HEADER1>
[\ \t]*
@@ -179,7 +179,7 @@ sub py_name_2_c_name($) {
# 2. Convert System to SYSTEM
# 3. Convert Capabilities to CAP and etc using %PY_CLASS_NAME_CONV;
my $py_name = shift;
- if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_]+)$/ ) {
+ if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_][A-Z_0-9]+)$/ ) {
my $py_class_name = $1;
my $py_var_name = $2;

@@ -308,7 +308,7 @@ sub _get_py_class_consts($$){
}
if ($line =~ /^$current_idention
[\ ]+
- ([A-Z][A-Z\_]+)
+ ([A-Z][A-Z\_0-9]+)
[\ ]*=[\ ]*
($REGEX_VALUE_FORMAT)/x){
my $var_name = $1;
--
1.8.3.1
Gris Ge
2015-02-26 12:35:44 UTC
Permalink
* Introduced full support of volume_raid_info().
* For sub-pool, use raid info from parent pool.
* For RAID 1 and JBOD, set strip_size, min_io_size, and opt_io_size
as block size(512).
* For other RAID, calculate out opt_io_size with data disk count.
* For RAID_TYPE_MIXED, raise PLUGIN_BUG LsmError.

* Replaced PoolRAID.RAID_TYPE_RAID_XXX with Volume.RAID_TYPE_RAID_XXX.

* Replaced PoolRAID.RAID_TYPE_NOT_APPLICABLE with Volume.RAID_TYPE_OTHER.

* Bumped simulator data version to 3.1 due to previous Volume.RAID_TYPE_OTHER
change.

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/sim/simarray.py | 151 +++++++++++++++++++++++++++++++-----------------
plugin/sim/simulator.py | 5 +-
2 files changed, 102 insertions(+), 54 deletions(-)

diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index 73f4492..d4feb7f 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -67,26 +67,6 @@ def _random_vpd():


class PoolRAID(object):
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
MEMBER_TYPE_UNKNOWN = 0
MEMBER_TYPE_DISK = 1
MEMBER_TYPE_DISK_MIX = 10
@@ -136,37 +116,37 @@ class PoolRAID(object):
return PoolRAID.MEMBER_TYPE_UNKNOWN

_RAID_DISK_CHK = {
- RAID_TYPE_JBOD: lambda x: x > 0,
- RAID_TYPE_RAID0: lambda x: x > 0,
- RAID_TYPE_RAID1: lambda x: x == 2,
- RAID_TYPE_RAID3: lambda x: x >= 3,
- RAID_TYPE_RAID4: lambda x: x >= 3,
- RAID_TYPE_RAID5: lambda x: x >= 3,
- RAID_TYPE_RAID6: lambda x: x >= 4,
- RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
- RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
- RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
- RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_JBOD: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID0: lambda x: x > 0,
+ Volume.RAID_TYPE_RAID1: lambda x: x == 2,
+ Volume.RAID_TYPE_RAID3: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID4: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID5: lambda x: x >= 3,
+ Volume.RAID_TYPE_RAID6: lambda x: x >= 4,
+ Volume.RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0,
+ Volume.RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0,
}

_RAID_PARITY_DISK_COUNT_FUNC = {
- RAID_TYPE_JBOD: lambda x: x,
- RAID_TYPE_RAID0: lambda x: x,
- RAID_TYPE_RAID1: lambda x: 1,
- RAID_TYPE_RAID3: lambda x: x - 1,
- RAID_TYPE_RAID4: lambda x: x - 1,
- RAID_TYPE_RAID5: lambda x: x - 1,
- RAID_TYPE_RAID6: lambda x: x - 2,
- RAID_TYPE_RAID10: lambda x: x / 2,
- RAID_TYPE_RAID15: lambda x: x / 2 - 1,
- RAID_TYPE_RAID16: lambda x: x / 2 - 2,
- RAID_TYPE_RAID50: lambda x: x - 2,
- RAID_TYPE_RAID60: lambda x: x - 4,
- RAID_TYPE_RAID51: lambda x: x / 2 - 1,
- RAID_TYPE_RAID61: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_JBOD: lambda x: x,
+ Volume.RAID_TYPE_RAID0: lambda x: x,
+ Volume.RAID_TYPE_RAID1: lambda x: 1,
+ Volume.RAID_TYPE_RAID3: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID4: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID5: lambda x: x - 1,
+ Volume.RAID_TYPE_RAID6: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID10: lambda x: x / 2,
+ Volume.RAID_TYPE_RAID15: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID16: lambda x: x / 2 - 2,
+ Volume.RAID_TYPE_RAID50: lambda x: x - 2,
+ Volume.RAID_TYPE_RAID60: lambda x: x - 4,
+ Volume.RAID_TYPE_RAID51: lambda x: x / 2 - 1,
+ Volume.RAID_TYPE_RAID61: lambda x: x / 2 - 2,
}

@staticmethod
@@ -191,7 +171,7 @@ class PoolRAID(object):


class BackStore(object):
- VERSION = "3.0"
+ VERSION = "3.1"
VERSION_SIGNATURE = 'LSM_SIMULATOR_DATA_%s_%s' % (VERSION, md5(VERSION))
JOB_DEFAULT_DURATION = 1
JOB_DATA_TYPE_VOL = 1
@@ -201,6 +181,7 @@ class BackStore(object):
SYS_ID = "sim-01"
SYS_NAME = "LSM simulated storage plug-in"
BLK_SIZE = 512
+ STRIP_SIZE = 131072 # 128 KiB

_LIST_SPLITTER = '#'

@@ -724,7 +705,7 @@ class BackStore(object):

pool_1_id = self.sim_pool_create_from_disk(
name='Pool 1',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=pool_1_disks,
element_type=Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
@@ -744,7 +725,7 @@ class BackStore(object):

self.sim_pool_create_from_disk(
name='Pool 3',
- raid_type=PoolRAID.RAID_TYPE_RAID1,
+ raid_type=Volume.RAID_TYPE_RAID1,
sim_disk_ids=ssd_pool_disks,
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
@@ -755,7 +736,7 @@ class BackStore(object):
element_type=Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME |
Pool.ELEMENT_TYPE_DELTA,
- raid_type=PoolRAID.RAID_TYPE_RAID0,
+ raid_type=Volume.RAID_TYPE_RAID0,
sim_disk_ids=test_pool_disks)

self._data_add(
@@ -1009,13 +990,23 @@ class BackStore(object):
'status_info': '',
'element_type': element_type,
'unsupported_actions': unsupported_actions,
- 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': Volume.RAID_TYPE_OTHER,
'member_type': PoolRAID.MEMBER_TYPE_POOL,
'parent_pool_id': parent_pool_id,
'total_space': size,
})
return self.lastrowid

+ def sim_pool_disks_count(self, sim_pool_id):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE owner_pool_id=%s;" %
+ sim_pool_id)[0][0]
+
+ def sim_pool_data_disks_count(self, sim_pool_id=None):
+ return self._sql_exec(
+ "SELECT COUNT(id) FROM disks WHERE "
+ "owner_pool_id=%s and role='DATA';" % sim_pool_id)[0][0]
+
def sim_vols(self, sim_ag_id=None):
"""
Return a list of sim_vol dict.
@@ -2231,3 +2222,57 @@ class SimArray(object):
@_handle_errors
def target_ports(self):
return list(SimArray._sim_tgt_2_lsm(t) for t in self.bs_obj.sim_tgts())
+
+ @_handle_errors
+ def volume_raid_info(self, lsm_vol):
+ sim_pool = self.bs_obj.sim_pool_of_id(
+ SimArray._lsm_id_to_sim_id(
+ lsm_vol.pool_id,
+ LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")))
+
+ raid_type = sim_pool['raid_type']
+ strip_size = Volume.STRIP_SIZE_UNKNOWN
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = Volume.OPT_IO_SIZE_UNKNOWN
+ extent_count = Volume.EXTENT_COUNT_UNKNOWN
+
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
+ parent_sim_pool = self.bs_obj.sim_pool_of_id(
+ sim_pool['parent_pool_id'])
+ raid_type = parent_sim_pool['raid_type']
+
+ extent_count = self.bs_obj.sim_pool_disks_count(
+ parent_sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ parent_sim_pool['id'])
+ else:
+ extent_count = self.bs_obj.sim_pool_disks_count(
+ sim_pool['id'])
+ data_disk_count = self.bs_obj.sim_pool_data_disks_count(
+ sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_UNKNOWN or \
+ raid_type == Volume.RAID_TYPE_OTHER:
+ return [
+ raid_type, strip_size, extent_count, min_io_size,
+ opt_io_size]
+
+ if raid_type == Volume.RAID_TYPE_MIXED:
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unsupported RAID_TYPE_MIXED pool "
+ "%s" % sim_pool['id'])
+
+ if raid_type == Volume.RAID_TYPE_RAID1 or \
+ raid_type == Volume.RAID_TYPE_JBOD:
+ strip_size = BackStore.BLK_SIZE
+ min_io_size = BackStore.BLK_SIZE
+ opt_io_size = BackStore.BLK_SIZE
+ else:
+ strip_size = BackStore.STRIP_SIZE
+ min_io_size = BackStore.STRIP_SIZE
+ opt_io_size = int(data_disk_count * BackStore.STRIP_SIZE)
+
+ return [
+ raid_type, strip_size, extent_count, min_io_size,
+ opt_io_size]
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index 8f7adfc..1c81cf3 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -17,7 +17,7 @@
# Gris Ge <***@redhat.com>

from lsm import (uri_parse, VERSION, Capabilities, INfs,
- IStorageAreaNetwork, search_property)
+ IStorageAreaNetwork, search_property, Volume)

from simarray import SimArray

@@ -289,3 +289,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
return search_property(
[SimPlugin._sim_data_2_lsm(t) for t in sim_tgts],
search_key, search_value)
+
+ def volume_raid_info(self, volume, flags=0):
+ return self.sim_array.volume_raid_info(volume)
--
1.8.3.1
Gris Ge
2015-02-26 12:35:43 UTC
Permalink
* New command:
lsmcli volume-raid-info --vol <VOL_ID>

* New alias:
lsmcli vri == lsmcli volume-raid-info

Changes in V2:
* Fix output format when volume not found passed to _get_item() in
volume_raid_info()

Signed-off-by: Gris Ge <***@redhat.com>
---
tools/lsmcli/cmdline.py | 18 +++++++++++++-
tools/lsmcli/data_display.py | 58 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index a781314..980b3a0 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,7 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,

from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- vol_provision_str_to_type, vol_rep_type_str_to_type)
+ vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo)


## Wraps the invocation to the command line
@@ -368,6 +368,14 @@ cmds = (
),

dict(
+ name='volume-raid-info',
+ help='Query volume RAID infomation',
+ args=[
+ dict(vol_id_opt),
+ ],
+ ),
+
+ dict(
name='access-group-create',
help='Create an access group',
args=[
@@ -628,6 +636,7 @@ aliases = (
['aa', 'access-group-add'],
['ar', 'access-group-remove'],
['ad', 'access-group-delete'],
+ ['vri', 'volume-raid-info'],
)


@@ -1318,6 +1327,13 @@ class CmdLine:
self._wait_for_it("volume-dependant-rm",
self.c.volume_child_dependency_rm(v), None)

+ def volume_raid_info(self, args):
+ lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume")
+ self.display_data(
+ [
+ VolumeRAIDInfo(
+ lsm_vol.id, *self.c.volume_raid_info(lsm_vol))])
+
## Displays file system dependants
def fs_dependants(self, args):
fs = _get_item(self.c.fs(), args.fs, "File System")
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 285a14f..6dd5ffa 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -243,6 +243,41 @@ class PlugData(object):
self.version = plugin_version


+class VolumeRAIDInfo(object):
+ _RAID_TYPE_MAP = {
+ Volume.RAID_TYPE_RAID0: 'RAID0',
+ Volume.RAID_TYPE_RAID1: 'RAID1',
+ Volume.RAID_TYPE_RAID3: 'RAID3',
+ Volume.RAID_TYPE_RAID4: 'RAID4',
+ Volume.RAID_TYPE_RAID5: 'RAID5',
+ Volume.RAID_TYPE_RAID6: 'RAID6',
+ Volume.RAID_TYPE_RAID10: 'RAID10',
+ Volume.RAID_TYPE_RAID15: 'RAID15',
+ Volume.RAID_TYPE_RAID16: 'RAID16',
+ Volume.RAID_TYPE_RAID50: 'RAID50',
+ Volume.RAID_TYPE_RAID60: 'RAID60',
+ Volume.RAID_TYPE_RAID51: 'RAID51',
+ Volume.RAID_TYPE_RAID61: 'RAID61',
+ Volume.RAID_TYPE_JBOD: 'JBOD',
+ Volume.RAID_TYPE_MIXED: 'MIXED',
+ Volume.RAID_TYPE_OTHER: 'OTHER',
+ Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ }
+
+ def __init__(self, vol_id, raid_type, strip_size, extent_count,
+ min_io_size, opt_io_size):
+ self.vol_id = vol_id
+ self.raid_type = raid_type
+ self.strip_size = strip_size
+ self.extent_count = extent_count
+ self.min_io_size = min_io_size
+ self.opt_io_size = opt_io_size
+
+ @staticmethod
+ def raid_type_to_str(raid_type):
+ return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP)
+
+
class DisplayData(object):

def __init__(self):
@@ -498,6 +533,29 @@ class DisplayData(object):
'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN,
}

+ VOL_RAID_INFO_HEADER = OrderedDict()
+ VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID'
+ VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type'
+ VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size'
+ VOL_RAID_INFO_HEADER['extent_count'] = 'Extent Count'
+ VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size'
+ VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size'
+
+ VOL_RAID_INFO_COLUMN_SKIP_KEYS = []
+
+ VOL_RAID_INFO_VALUE_CONV_ENUM = {
+ 'raid_type': VolumeRAIDInfo.raid_type_to_str,
+ }
+ VOL_RAID_INFO_VALUE_CONV_HUMAN = [
+ 'strip_size', 'min_io_size', 'opt_io_size']
+
+ VALUE_CONVERT[VolumeRAIDInfo] = {
+ 'headers': VOL_RAID_INFO_HEADER,
+ 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS,
+ 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN,
+ }
+
@staticmethod
def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human,
flag_human, flag_enum):
--
1.8.3.1
Gris Ge
2015-02-26 12:35:45 UTC
Permalink
* Simply set XXX_UNKNOWN on output parameter.

V2:
- Add call to register volume_raid_info
- Add LSM_CAP_VOLUMERAID_INFO to capabilties

Changes in V3:
* Use lsm_register_plugin_v1_2() to register lsm_volume_raid_info() support.

Signed-off-by: Gris Ge <***@redhat.com>
Signed-off-by: Tony Asleson <***@redhat.com>
---
plugin/simc/simc_lsmplugin.c | 33 ++++++++++++++++++++++++++++++---
1 file changed, 30 insertions(+), 3 deletions(-)

diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index 7c4d287..987d096 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -391,6 +391,7 @@ static int cap(lsm_plugin_ptr c, lsm_system *system,
LSM_CAP_EXPORTS,
LSM_CAP_EXPORT_FS,
LSM_CAP_EXPORT_REMOVE,
+ LSM_CAP_VOLUME_RAID_INFO,
-1
);

@@ -956,6 +957,33 @@ static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
return rc;
}

+static int volume_raid_info(lsm_plugin_ptr c, lsm_volume *volume,
+ lsm_volume_raid_type *raid_type,
+ int32_t *strip_size, int32_t *extent_count,
+ int32_t *min_io_size, int32_t *opt_io_size,
+ lsm_flag flags)
+{
+ int rc = LSM_ERR_OK;
+ struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
+ struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume));
+
+ if( !av) {
+ rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME,
+ "volume not found!");
+ }
+
+ *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN;
+ *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN;
+ *extent_count = LSM_VOLUME_EXTENT_COUNT_UNKNOWN;
+ *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN;
+ *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN;
+ return rc;
+}
+
+static struct lsm_ops_v1_2 ops_v1_2 = {
+ volume_raid_info
+};
+
static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume *v,
lsm_flag flags)
{
@@ -1527,7 +1555,6 @@ static struct lsm_san_ops_v1 san_ops = {
list_targets
};

-
static int fs_list(lsm_plugin_ptr c, const char *search_key,
const char *search_value, lsm_fs **fs[], uint32_t *count,
lsm_flag flags)
@@ -2243,8 +2270,8 @@ int load( lsm_plugin_ptr c, const char *uri, const char *password,
_unload(pd);
pd = NULL;
} else {
- rc = lsm_register_plugin_v1( c, pd, &mgm_ops,
- &san_ops, &fs_ops, &nfs_ops);
+ rc = lsm_register_plugin_v1_2(
+ c, pd, &mgm_ops, &san_ops, &fs_ops, &nfs_ops, &ops_v1_2);
}
}
return rc;
--
1.8.3.1
Gris Ge
2015-02-26 12:35:46 UTC
Permalink
* Simply run that command and check the volume ID of output.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/cmdtest.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)

diff --git a/test/cmdtest.py b/test/cmdtest.py
index b603601..e80e027 100755
--- a/test/cmdtest.py
+++ b/test/cmdtest.py
@@ -676,6 +676,25 @@ def search_test(cap, system_id):
volume_delete(vol_id)
return

+def volume_raid_info_test(cap, system_id):
+ if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']:
+ test_pool_id = name_to_id(OP_POOL, test_pool_name)
+
+ if test_pool_id is None:
+ print 'Pool %s is not available!' % test_pool_name
+ exit(10)
+
+ vol_id = create_volume(test_pool_id)
+ out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1]
+ r = parse(out)
+ if len(r[0]) != 6:
+ print "volume-raid-info got expected output: %s" % out
+ exit(10)
+ if r[0][0] != vol_id:
+ print "volume-raid-info output volume ID is not requested " \
+ "volume ID %s" % out
+ exit(10)
+ return

def run_all_tests(cap, system_id):
test_display(cap, system_id)
@@ -688,6 +707,8 @@ def run_all_tests(cap, system_id):

search_test(cap, system_id)

+ volume_raid_info_test(cap, system_id)
+
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c", "--command", action="store", type="string",
--
1.8.3.1
Gris Ge
2015-02-26 12:35:47 UTC
Permalink
* Simply invoke lsm_volume_raid_info() with no additional test.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)

diff --git a/test/tester.c b/test/tester.c
index 2edd18c..6cae568 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -2858,6 +2858,35 @@ START_TEST(test_volume_vpd_check)
}
END_TEST

+START_TEST(test_volume_raid_info)
+{
+ lsm_volume *volume = NULL;
+ char *job = NULL;
+ lsm_pool *pool = get_test_pool(c);
+
+ int rc = lsm_volume_create(
+ c, pool, "volume_raid_info_test", 20000000,
+ LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD);
+
+ fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED,
+ "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c)));
+
+ if( LSM_ERR_JOB_STARTED == rc ) {
+ volume = wait_for_job_vol(c, &job);
+ }
+
+ lsm_volume_raid_type raid_type;
+ int32_t strip_size, extent_count, min_io_size, opt_io_size;
+
+ G(
+ rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size,
+ &extent_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD);
+
+ G(rc, lsm_volume_record_free, volume);
+ volume = NULL;
+}
+END_TEST
+
Suite * lsm_suite(void)
{
Suite *s = suite_create("libStorageMgmt");
@@ -2893,6 +2922,7 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_ss);
tcase_add_test(basic, test_nfs_exports);
tcase_add_test(basic, test_invalid_input);
+ tcase_add_test(basic, test_volume_raid_info);

suite_add_tcase(s, basic);
return s;
--
1.8.3.1
Gris Ge
2015-02-26 12:35:48 UTC
Permalink
* Use 'storcli /c0/v1 show all' command line output to determine
RAID type, strip size and disk count.

* Calculate optimal I/O size by strip size multiple with RAID
data(not mirrot, not parity) disks count.

* Tested query on RAID 0, 1, 5, 10, 50.

* Tested the optimal I/O size on RAID 5:
[***@storageqe-08 ~]# lsmenv mega lsmcli vri --vol SV03403550:VD1
Device alias: mega
URI: megaraid://
lsmcli vri --vol SV03403550:VD1
Volume ID | RAID Type | Strip Size | Extent Count | Minimum I/O Size | Optimal I/O Size
--------------------------------------------------------------------------------------------
SV03403550:VD1 | RAID5 | 131072 | 5 | 131072 | 524288

Time: 0:00.29
[***@storageqe-08 ~]# dd if=/dev/urandom of=test.img bs=1M count=1000
1000+0 records in
1000+0 records out
1048576000 bytes (1.0 GB) copied, 153.174 s, 6.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 58.9573 s, 17.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 37.7282 s, 27.8 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=524288 oflag=direct
2000+0 records in
2000+0 records out
1048576000 bytes (1.0 GB) copied, 35.3351 s, 29.7 MB/s
[***@storageqe-08 ~]# dd if=./test.img of=/dev/sdb bs=131072 oflag=direct
8000+0 records in
8000+0 records out
1048576000 bytes (1.0 GB) copied, 70.0779 s, 15.0 MB/s

Signed-off-by: Gris Ge <***@redhat.com>
---
plugin/megaraid/megaraid.py | 76 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)

diff --git a/plugin/megaraid/megaraid.py b/plugin/megaraid/megaraid.py
index 83abf63..e754cd8 100644
--- a/plugin/megaraid/megaraid.py
+++ b/plugin/megaraid/megaraid.py
@@ -157,6 +157,33 @@ def _pool_id_of(dg_id, sys_id):
return "%s:DG%s" % (sys_id, dg_id)


+_RAID_TYPE_MAP = {
+ 'RAID0': Volume.RAID_TYPE_RAID0,
+ 'RAID1': Volume.RAID_TYPE_RAID1,
+ 'RAID5': Volume.RAID_TYPE_RAID5,
+ 'RAID6': Volume.RAID_TYPE_RAID6,
+ 'RAID00': Volume.RAID_TYPE_RAID0,
+ # Some MegaRAID only support max 16 disks in each span.
+ # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000.
+ # All of them are considered as RAID0
+ 'RAID10': Volume.RAID_TYPE_RAID10,
+ 'RAID50': Volume.RAID_TYPE_RAID50,
+ 'RAID60': Volume.RAID_TYPE_RAID60,
+}
+
+
+def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info):
+ raid_type = _RAID_TYPE_MAP.get(
+ vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN)
+
+ # In LSI, four disks or more RAID1 is actually a RAID10.
+ if raid_type == Volume.RAID_TYPE_RAID1 and \
+ int(vd_prop_info['Number of Drives Per Span']) >= 4:
+ raid_type = Volume.RAID_TYPE_RAID10
+
+ return raid_type
+
+
class MegaRAID(IPlugin):
_DEFAULT_MDADM_BIN_PATHS = [
"/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli"]
@@ -459,3 +486,52 @@ class MegaRAID(IPlugin):
vd_pd_info_list, vd_prop_info, key_name))

return search_property(lsm_vols, search_key, search_value)
+
+ @_handle_errors
+ def volume_raid_info(self, volume, flags=Client.FLAG_RSVD):
+ if not volume.plugin_data:
+ raise LsmError(
+ ErrorNumber.INVALID_ARGUMENT,
+ "Ilegal input volume argument: missing plugin_data property")
+
+ vd_path = volume.plugin_data
+ vol_show_output = self._storcli_exec([vd_path, "show", "all"])
+ vd_basic_info = vol_show_output[vd_path][0]
+ vd_id = int(vd_basic_info['DG/VD'].split('/')[-1])
+ vd_prop_info = vol_show_output['VD%d Properties' % vd_id]
+
+ raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info)
+ strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size'])
+ disk_count = (
+ int(vd_prop_info['Number of Drives Per Span']) *
+ int(vd_prop_info['Span Depth']))
+ if raid_type == Volume.RAID_TYPE_RAID0:
+ strip_count = disk_count
+ elif raid_type == Volume.RAID_TYPE_RAID1:
+ strip_count = 1
+ elif raid_type == Volume.RAID_TYPE_RAID5:
+ strip_count = disk_count - 1
+ elif raid_type == Volume.RAID_TYPE_RAID6:
+ strip_count = disk_count - 2
+ elif raid_type == Volume.RAID_TYPE_RAID50:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 1) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID60:
+ strip_count = (
+ (int(vd_prop_info['Number of Drives Per Span']) - 2) *
+ int(vd_prop_info['Span Depth']))
+ elif raid_type == Volume.RAID_TYPE_RAID10:
+ strip_count = (
+ int(vd_prop_info['Number of Drives Per Span']) / 2 *
+ int(vd_prop_info['Span Depth']))
+ else:
+ # MegaRAID does not support 15 or 16 yet.
+ raise LsmError(
+ ErrorNumber.PLUGIN_BUG,
+ "volume_raid_info(): Got unexpected RAID type: %s" %
+ vd_basic_info['TYPE'])
+
+ return [
+ raid_type, strip_size, disk_count, strip_size,
+ strip_size * strip_count]
--
1.8.3.1
Loading...