Gris Ge
2014-05-16 14:20:11 UTC
* Removed abstract method in IData for value converting.
* Moving all value converting methods to data_display.py
* Use file level method in stead of @staticmethod as Tony suggested.
* Moved lsm.PlugData to data_display.py as it is only for lsmcli.
* Volume.STATUS_ERR is kept as old one. We will change it in another patch.
* In stead of use enumerate number for unknown, we will raise error when
got illegal string for string_to_type converting:
LsmError(ErrorNumber.INVALID_ARGUMENT, "balalala")
* As git summery said, we have more deletions than insertions, I will
consider this new display method as improvement.
There are still some duplicate code for VALUE_CONVERT, but I have no idea
how to do that(I don't want to use variable as variable name to same these
duplicate code, because that's why hash/dictionary invented for.)
* Tested with all 'lsmcli list' types on 'sim://'.
* Introduced 'IData.OPT_PROPERTIES' list, it could be use for checking
whether provided OptionalData is supported or not. Check
lsm.Disk.__init__() for detail.
Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_cmdline.py | 26 +-
python_binding/lsm/_data.py | 532 +---------------------------------------
tools/lsmcli/data_display.py | 544 ++++++++++++++++++++++++++++++++++-------
3 files changed, 478 insertions(+), 624 deletions(-)
diff --git a/python_binding/lsm/_cmdline.py b/python_binding/lsm/_cmdline.py
index 1845e82..22f1a9a 100644
--- a/python_binding/lsm/_cmdline.py
+++ b/python_binding/lsm/_cmdline.py
@@ -26,9 +26,13 @@ from lsm import (Client, Pool, VERSION, LsmError, Capabilities, Disk,
Initiator, Volume, JobStatus, ErrorNumber, BlockRange,
uri_parse)
-from _data import PlugData
from _common import getch, size_human_2_size_bytes, Proxy
-from lsm.lsmcli.data_display import DisplayData
+from lsm.lsmcli.data_display import (
+ DisplayData, PlugData,
+ pool_raid_type_str_to_type, pool_member_type_str_to_type,
+ vol_provision_str_to_type, vol_rep_type_str_to_type,
+ vol_access_type_str_to_type)
+
##@package lsm.cmdline
@@ -1390,7 +1394,7 @@ class CmdLine:
p,
args.name,
self._size(args.size),
- Volume._prov_string_to_type(args.provisioning)))
+ vol_provision_str_to_type(args.provisioning)))
self.display_data([vol])
## Creates a snapshot
@@ -1493,7 +1497,7 @@ class CmdLine:
v = _get_item(self.c.volumes(), args.vol, "volume id")
- rep_type = Volume._rep_string_to_type(args.rep_type)
+ rep_type = vol_rep_type_str_to_type(args.rep_type)
if rep_type == Volume.REPLICATE_UNKNOWN:
raise ArgError("invalid replication type= %s" % rep_type)
@@ -1508,7 +1512,7 @@ class CmdLine:
dst = _get_item(self.c.volumes(), args.dst_vol,
"destination volume id")
- rep_type = Volume._rep_string_to_type(args.rep_type)
+ rep_type = vol_rep_type_str_to_type(args.rep_type)
if rep_type == Volume.REPLICATE_UNKNOWN:
raise ArgError("invalid replication type= %s" % rep_type)
@@ -1545,7 +1549,7 @@ class CmdLine:
i_type = CmdLine._init_type_to_enum(args.init_type)
access = 'DEFAULT'
if args.access is not None:
- access = Volume._access_string_to_type(args.access)
+ access = vol_access_type_str_to_type(args.access)
self.c.initiator_grant(initiator_id, i_type, v, access)
else:
@@ -1571,7 +1575,7 @@ class CmdLine:
access = 'RW'
if args.access is not None:
access = args.access
- access = Volume._access_string_to_type(args.access)
+ access = vol_access_type_str_to_type(args.access)
self.c.access_group_grant(group, v, access)
else:
self.c.access_group_revoke(group, v)
@@ -1661,14 +1665,14 @@ class CmdLine:
size_bytes = self._size(self.args.size)
if args.raid_type:
- raid_type = Pool._raid_type_str_to_type(
+ raid_type = pool_raid_type_str_to_type(
self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
args.raid_type)
if args.member_type:
- member_type = Pool._member_type_str_to_type(
+ member_type = pool_member_type_str_to_type(
args.member_type)
if member_type == Pool.MEMBER_TYPE_UNKNOWN:
raise ArgError("Unkonwn member type specified: %s" %
@@ -1699,7 +1703,7 @@ class CmdLine:
else:
disks_to_use.append(disk_ids[member_id])
- raid_type = Pool._raid_type_str_to_type(self.args.raid_type)
+ raid_type = pool_raid_type_str_to_type(self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
self.args.raid_type)
@@ -1724,7 +1728,7 @@ class CmdLine:
raise ArgError("Invalid volumes ID specified in " +
"--member-id %s " % member_id)
- raid_type = Pool._raid_type_str_to_type(
+ raid_type = pool_raid_type_str_to_type(
self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 0853b6c..db63d38 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -114,6 +114,8 @@ class IData(object):
"""
__metaclass__ = _ABCMeta
+ OPT_PROPERTIES = []
+
def _to_dict(self):
"""
Represent the class as a dictionary
@@ -157,111 +159,6 @@ class IData(object):
"""
return str(self._to_dict())
- _MAN_PROPERTIES_2_HEADER = dict()
- _OPT_PROPERTIES_2_HEADER = dict()
- _MAN_PROPERTIES_SEQUENCE = []
- _OPT_PROPERTIES_SEQUENCE = []
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- return value
-
- def _str_of_key(self, key_name=None):
- """
- If key_name == None or not provided:
- Return a dictionary providing the mandatory properties key name to
- human friendly string mapping:
- {
- 'id': 'ID',
- 'member_type': 'Member Type',
- .
- .
- .
- }
- else provide the human friendly string of certain key.
- """
- if key_name is None:
- return dict(list(self._MAN_PROPERTIES_2_HEADER.items()) +
- list(self._OPT_PROPERTIES_2_HEADER.items()))
-
- man_pros_header = self._MAN_PROPERTIES_2_HEADER
- opt_pros_header = self._OPT_PROPERTIES_2_HEADER
- if key_name in man_pros_header.keys():
- return man_pros_header[key_name]
- elif key_name in opt_pros_header.keys():
- return opt_pros_header[key_name]
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "%s class does not provide %s property" %
- (self.__name__, key_name))
-
- def _value_of_key(self, key_name=None, human=False, enum_as_number=False,
- list_convert=False):
- """
- Return the value of certain key, allowing do humanize converting,
- list converting, or enumerate as number.
- For optional properties, if requesting key is not valid for current
- instance(but is valid for class definition), return None
- If key_name == None, we return a dictionary like this:
- {
- # key_name: converted_value
- id: 1232424abcef,
- raid_type: 'RAID6',
- .
- .
- .
- }
- """
- man_pros_header = self._MAN_PROPERTIES_2_HEADER
- opt_pros_header = self._OPT_PROPERTIES_2_HEADER
- if key_name is None:
- all_value = {}
- for cur_key_name in man_pros_header.keys():
- all_value[cur_key_name] = self._value_of_key(
- key_name=cur_key_name,
- human=human,
- enum_as_number=enum_as_number,
- list_convert=list_convert)
- for cur_key_name in opt_pros_header.keys():
- cur_value = self._value_of_key(
- key_name=cur_key_name,
- human=human,
- enum_as_number=enum_as_number,
- list_convert=list_convert)
- if cur_value is None:
- continue
- else:
- all_value[cur_key_name] = cur_value
- return all_value
-
- if key_name in man_pros_header.keys():
- value = getattr(self, key_name)
-
- return self._value_convert(key_name, value, human, enum_as_number,
- list_convert)
-
- elif (hasattr(self, '_optional_data') and
- key_name in opt_pros_header.keys()):
- if key_name not in self._optional_data.list():
- return None
-
- value = self._optional_data.get(key_name)
- return self._value_convert(key_name, value, human, enum_as_number,
- list_convert)
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "%s class does not provide %s property" %
- (self.__name__, key_name))
-
- def _key_display_sequence(self):
- """
- Return a List with suggested data displaying order of properties.
- """
- key = self._MAN_PROPERTIES_SEQUENCE
- key.extend(self._OPT_PROPERTIES_SEQUENCE)
- return key
-
-
@default_property('id', doc="Unique identifier")
@default_property('type', doc="Enumerated initiator type")
@default_property('name', doc="User supplied name")
@@ -272,28 +169,6 @@ class Initiator(IData):
(TYPE_OTHER, TYPE_PORT_WWN, TYPE_NODE_WWN, TYPE_HOSTNAME, TYPE_ISCSI,
TYPE_SAS) = (1, 2, 3, 4, 5, 7)
- _type_map = {1: 'Other', 2: 'Port WWN', 3: 'Node WWN', 4: 'Hostname',
- 5: 'iSCSI', 7: "SAS"}
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'type': 'Type',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'type']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if not enum_as_number:
- if key_name == 'type':
- value = Initiator._type_to_str(value)
- return value
-
- @staticmethod
- def _type_to_str(init_type):
- return Initiator._type_map[init_type]
-
def __init__(self, _id, _type, _name):
if not _name or not len(_name):
@@ -345,21 +220,6 @@ class Disk(IData):
DISK_TYPE_SSD = 53 # Solid State Drive
DISK_TYPE_HYBRID = 54 # uses a combination of HDD and SSD
- _DISK_TYPE = {
- DISK_TYPE_UNKNOWN: 'UNKNOWN',
- DISK_TYPE_OTHER: 'OTHER',
- DISK_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- DISK_TYPE_ATA: 'ATA',
- DISK_TYPE_SATA: 'SATA',
- DISK_TYPE_SAS: 'SAS',
- DISK_TYPE_FC: 'FC',
- DISK_TYPE_SOP: 'SOP',
- DISK_TYPE_NL_SAS: 'NL_SAS',
- DISK_TYPE_HDD: 'HDD',
- DISK_TYPE_SSD: 'SSD',
- DISK_TYPE_HYBRID: 'HYBRID',
- }
-
MAX_DISK_STATUS_BITS = 64
# Disk status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
@@ -399,61 +259,8 @@ class Disk(IData):
# Disk is in reconstructing date from other RAID member.
# Should explain progress in Disk.status_info
- _STATUS = {
- STATUS_UNKNOWN: 'UNKNOWN',
- STATUS_OK: 'OK',
- STATUS_OTHER: 'OTHER',
- STATUS_PREDICTIVE_FAILURE: 'PREDICTIVE_FAILURE',
- STATUS_ERROR: 'ERROR',
- STATUS_OFFLINE: 'OFFLINE',
- STATUS_STARTING: 'STARTING',
- STATUS_STOPPING: 'STOPPING',
- STATUS_STOPPED: 'STOPPED',
- STATUS_INITIALIZING: 'INITIALIZING',
- STATUS_RECONSTRUCTING: 'RECONSTRUCTING',
- }
-
- @staticmethod
- def status_to_str(status):
- """
- Convert status to a string
- When having multiple status, will use a comma between them
- """
- status_str = ''
- for x in Disk._STATUS.keys():
- if x & status:
- status_str = txt_a(status_str, Disk._STATUS[x])
- if status_str:
- return status_str
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Invalid Disk.status: %d" % status)
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'disk_type': 'Disk Type',
- 'block_size': 'Block Size',
- 'num_of_blocks': '#blocks',
- 'size_bytes': 'Size',
- 'status': 'Status',
- 'system_id': 'System ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'disk_type', 'block_size',
- 'num_of_blocks', 'size_bytes', 'status',
- 'system_id']
-
- _OPT_PROPERTIES_2_HEADER = {
- 'sn': 'SN',
- 'part_num': 'Part Number',
- 'vendor': 'Vendor',
- 'model': 'Model',
- 'status_info': 'Status Info',
- 'owner_ctrler_id': 'Controller Owner',
- }
-
- _OPT_PROPERTIES_SEQUENCE = ['sn', 'part_num', 'vendor', 'model',
- 'status_info', 'owner_ctrler_id']
+ OPT_PROPERTIES = ['sn', 'part_num', 'vendor', 'model', 'status_info',
+ 'owner_ctrler_id']
def _value_convert(self, key_name, value, human, enum_as_number,
list_convert):
@@ -483,7 +290,7 @@ class Disk(IData):
self._optional_data = OptionalData()
else:
#Make sure the properties only contain ones we permit
- allowed = set(Disk._OPT_PROPERTIES_2_HEADER.keys())
+ allowed = set(Disk.OPT_PROPERTIES)
actual = set(_optional_data.list())
if actual <= allowed:
@@ -500,41 +307,9 @@ class Disk(IData):
"""
return self.block_size * self.num_of_blocks
- @staticmethod
- def disk_type_to_str(disk_type):
- if disk_type in Disk._DISK_TYPE.keys():
- return Disk._DISK_TYPE[disk_type]
- return Disk._DISK_TYPE[Disk.DISK_TYPE_UNKNOWN]
-
- @staticmethod
- def disk_type_str_to_type(disk_type_str):
- key = get_key(Disk._DISK_TYPE, disk_type_str)
- if key or key == 0:
- return key
- return Disk.DISK_TYPE_UNKNOWN
-
def __str__(self):
return self.name
- def _opt_column_headers(self):
- opt_headers = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_headers.extend([Disk._OPT_PROPERTIES_2_HEADER[opt_pro]])
- return opt_headers
-
- def _opt_column_data(self, human=False, enum_as_number=False):
- opt_data_values = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_pro_value = self._optional_data.get(opt_pro)
- if enum_as_number is False:
- pass
-
- opt_data_values.extend([opt_pro_value])
- return opt_data_values
-
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('vpd83', doc="Vital product page 0x83 identifier")
@@ -547,7 +322,6 @@ class Volume(IData):
"""
Represents a volume.
"""
-
# Volume status Note: Volumes can have multiple status bits set at same
# time.
(STATUS_UNKNOWN, STATUS_OK, STATUS_DEGRADED, STATUS_ERR, STATUS_STARTING,
@@ -562,56 +336,10 @@ class Volume(IData):
(PROVISION_UNKNOWN, PROVISION_THIN, PROVISION_FULL, PROVISION_DEFAULT) = \
(-1, 1, 2, 3)
- @staticmethod
- def _prov_string_to_type(prov_type):
- if prov_type == 'DEFAULT':
- return Volume.PROVISION_DEFAULT
- elif prov_type == "FULL":
- return Volume.PROVISION_FULL
- elif prov_type == "THIN":
- return Volume.PROVISION_THIN
- else:
- return Volume.PROVISION_UNKNOWN
-
- @staticmethod
- def _rep_string_to_type(rt):
- if rt == "SNAPSHOT":
- return Volume.REPLICATE_SNAPSHOT
- elif rt == "CLONE":
- return Volume.REPLICATE_CLONE
- elif rt == "COPY":
- return Volume.REPLICATE_COPY
- elif rt == "MIRROR_SYNC":
- return Volume.REPLICATE_MIRROR_SYNC
- elif rt == "MIRROR_ASYNC":
- return Volume.REPLICATE_MIRROR_ASYNC
- else:
- return Volume.REPLICATE_UNKNOWN
-
#Initiator access
(ACCESS_READ_ONLY, ACCESS_READ_WRITE, ACCESS_NONE) = (1, 2, 3)
@staticmethod
- def _status_to_str(status):
- if status == 1:
- return "OK"
- elif status == 0:
- return "Unknown"
- else:
- rc = ""
- if status & Volume.STATUS_OK:
- rc = txt_a(rc, "OK")
- if status & Volume.STATUS_DEGRADED:
- rc = txt_a(rc, "Degraded")
- if status & Volume.STATUS_DORMANT:
- rc = txt_a(rc, "Dormant")
- if status & Volume.STATUS_ERR:
- rc = txt_a(rc, "Error")
- if status & Volume.STATUS_STARTING:
- rc = txt_a(rc, "Starting")
- return rc
-
- @staticmethod
def _access_string_to_type(access):
if access == "RW":
return Volume.ACCESS_READ_WRITE
@@ -639,35 +367,6 @@ class Volume(IData):
def __str__(self):
return self.name
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'vpd83': 'VPD83',
- 'block_size': 'Block Size',
- 'num_of_blocks': '#blocks',
- 'size_bytes': 'Size',
- 'status': 'Status',
- 'system_id': 'System ID',
- 'pool_id': 'Pool ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'vpd83', 'block_size',
- 'num_of_blocks', 'size_bytes', 'status',
- 'system_id', 'pool_id']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
-
- if enum_as_number is False:
- if key_name == 'status':
- value = self._status_to_str(value)
- if human:
- if key_name == 'size_bytes':
- value = sh(value, human)
- elif key_name == 'block_size':
- value = sh(value, human)
- return value
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User defined system name")
@@ -753,33 +452,13 @@ The lsm.System class does not have class methods.
STATUS_STOPPED = 1 << 8
STATUS_OTHER = 1 << 9
- @staticmethod
- def _status_to_str(status):
- if status == 0:
- return "Unknown"
- elif status == 1:
- return "OK"
- else:
- rc = ""
- if status & System.STATUS_OK:
- rc = txt_a(rc, "OK")
- if status & System.STATUS_DEGRADED:
- rc = txt_a(rc, "Degraded")
- if status & System.STATUS_ERROR:
- rc = txt_a(rc, "Error")
- if status & System.STATUS_PREDICTIVE_FAILURE:
- rc = txt_a(rc, "Predictive failure")
- if status & System.STATUS_VENDOR_SPECIFIC:
- rc = txt_a(rc, "Vendor specific status")
-
- return rc
-
def __init__(self, _id, _name, _status, _status_info):
self._id = _id
self._name = _name
self._status = _status
self._status_info = _status_info
+
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User supplied name")
@default_property('total_space', doc="Total space in bytes")
@@ -976,82 +655,9 @@ class Pool(IData):
# DESTROYING:
# Array is removing current pool.
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- # id: Identifier of Pool.
- 'name': 'Name',
- # name: Human readable name of Pool.
- 'total_space': 'Total Space',
- # total_space: All spaces in bytes could be allocated to user.
- 'free_space': 'Free Space',
- # free_space: Free spaces in bytes could be allocated to user.
- 'status': 'Status',
- # status: Indicate the status of Pool.
- 'status_info': 'Status Info',
- # status_info: A string explaining the detail of current status.
- # Check comments above about Pool.STATUS_XXX for
- # what info you should save in it.
- 'system_id': 'System ID',
- # system_id: Identifier of belonging system.
- }
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'total_space', 'free_space',
- 'status', 'status_info', 'system_id']
-
- _OPT_PROPERTIES_2_HEADER = {
- 'raid_type': 'RAID Type',
- # raid_type: RAID Type of this pool's RAID Group(s):
- # RAID_TYPE_XXXX, check constants above.
- 'member_type': 'Member Type',
- # member_type: What kind of items assembled this pool:
- # MEMBER_TYPE_DISK/MEMBER_TYPE_POOL/MEMBER_TYPE_VOLUME
- 'member_ids': 'Member IDs',
- # member_ids: The list of items' ID assembled this pool:
- # [Pool.id, ] or [Disk.id, ] or [Volume.id, ]
- 'thinp_type': 'Thin Provision Type',
- # thinp_type: Can this pool support Thin Provisioning or not:
- # THINP_TYPE_THIN vs THINP_TYPE_THICK
- # THINP_TYPE_NOT_APPLICABLE for those pool can create
- # THICK sub_pool or THIN sub_pool. That means, ThinP is
- # not implemented at current pool level.
- # If we really need to identify the under algorithm some
- # day, we will expand to THINP_TYPE_THIN_ALLOCATED and etc
- 'element_type': 'Element Type',
- # element_type: That kind of items can this pool create:
- # ELEMENT_TYPE_VOLUME
- # ELEMENT_TYPE_POOL
- # ELEMENT_TYPE_FS
- # For those system reserved pool, use
- # ELEMENT_TYPE_SYS_RESERVED
- # For example, pools for replication or spare.
- # We will split them out once support spare and
- # replication. Those system pool should be neither
- # filtered or mark as ELEMENT_TYPE_SYS_RESERVED.
- }
-
- _OPT_PROPERTIES_SEQUENCE = ['raid_type', 'member_type', 'member_ids',
- 'element_type', 'thinp_type']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if human:
- if key_name == 'total_space' or key_name == 'free_space':
- value = sh(value, human)
- if list_convert:
- if key_name == 'member_ids':
- value = self._member_ids_to_str(value)
- if enum_as_number is False:
- if key_name == 'raid_type':
- value = self.raid_type_to_str(value)
- elif key_name == 'member_type':
- value = self._member_type_to_str(value)
- elif key_name == 'thinp_type':
- value = self.thinp_type_to_str(value)
- elif key_name == 'status':
- value = self._status_to_str(value)
- elif key_name == 'element_type':
- value = self._element_type_to_str(value)
- return value
+ OPT_PROPERTIES = ['raid_type', 'member_type', 'member_ids',
+ 'element_type', 'thinp_type']
def __init__(self, _id, _name, _total_space, _free_space, _status,
_status_info, _system_id, _optional_data=None):
@@ -1067,7 +673,7 @@ class Pool(IData):
self._optional_data = OptionalData()
else:
#Make sure the properties only contain ones we permit
- allowed = set(Pool._OPT_PROPERTIES_2_HEADER.keys())
+ allowed = set(Pool.OPT_PROPERTIES)
actual = set(_optional_data.list())
if actual <= allowed:
@@ -1077,35 +683,6 @@ class Pool(IData):
"Property keys are invalid: %s" %
"".join(actual - allowed))
- def _opt_column_headers(self):
- opt_headers = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_headers.extend([Pool._OPT_PROPERTIES_2_HEADER[opt_pro]])
- return opt_headers
-
- def _opt_column_data(self, human=False, enum_as_number=False):
- opt_data_values = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_pro_value = self._optional_data.get(opt_pro)
- if enum_as_number:
- pass # no byte size needed to humanize
- else:
- if opt_pro == 'member_ids':
- opt_pro_value = Pool._member_ids_to_str(opt_pro_value)
- elif opt_pro == 'raid_type':
- opt_pro_value = Pool.raid_type_to_str(opt_pro_value)
- elif opt_pro == 'member_type':
- opt_pro_value = Pool._member_type_to_str(opt_pro_value)
- elif opt_pro == 'thinp_type':
- opt_pro_value = Pool.thinp_type_to_str(opt_pro_value)
- elif opt_pro == 'element_type':
- opt_pro_value = Pool._element_type_to_str(opt_pro_value)
-
- opt_data_values.extend([opt_pro_value])
- return opt_data_values
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="File system name")
@@ -1123,26 +700,6 @@ class FileSystem(IData):
self._pool_id = _pool_id
self._system_id = _system_id
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'total_space': 'Total Space',
- 'free_space': 'Free Space',
- 'pool_id': 'Pool ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'total_space', 'free_space',
- 'pool_id']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if human:
- if key_name == 'total_space':
- value = sh(value, human)
- elif key_name == 'free_space':
- value = sh(value, human)
- return value
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Snapshot name")
@@ -1153,20 +710,6 @@ class FsSnapshot(IData):
self._name = _name
self._ts = int(_ts)
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if key_name == 'ts':
- value = datetime.fromtimestamp(value)
- return value
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'ts': 'Created',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'ts']
-
@default_property('id', doc="Unique identifier")
@default_property('fs_id', doc="Filesystem that is exported")
@@ -1198,22 +741,6 @@ class NfsExport(IData):
self._anongid = _anongid # gid for anonymous group id
self._options = _options # NFS options
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'fs_id': 'File system ID',
- 'export_path': 'Export Path',
- 'auth': 'Authentication',
- 'root': 'Root',
- 'rw': 'Read/Write',
- 'ro': 'Read Only',
- 'anonuid': 'Anon UID',
- 'anongid': 'Anon GID',
- 'options': 'Options'
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'fs_id', 'export_path', 'auth', 'root',
- 'rw', 'ro', 'anonuid', 'anongid', 'options']
-
@default_property('src_block', doc="Source logical block address")
@default_property('dest_block', doc="Destination logical block address")
@@ -1224,9 +751,6 @@ class BlockRange(IData):
self._dest_block = _dest_block
self._block_count = _block_count
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
@default_property('id', doc="Unique instance identifier")
@default_property('name', doc="Access group name")
@@ -1239,32 +763,12 @@ class AccessGroup(IData):
self._initiators = _initiators # List of initiators
self._system_id = _system_id # System id this group belongs
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'initiators': 'Initiator IDs',
- 'system_id': 'System ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'initiators', 'system_id']
- _OPT_PROPERTIES_SEQUENCE = []
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if list_convert:
- if key_name == 'initiators':
- value = ','.join(str(x) for x in value)
- return value
-
class OptionalData(IData):
def _column_data(self, human=False, enum_as_number=False):
return [sorted(self._values.iterkeys(),
key=lambda k: self._values[k][1])]
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
def __init__(self, _values=None):
if _values is not None:
self._values = _values
@@ -1430,24 +934,6 @@ class Capabilities(IData):
for i in range(len(self._cap)):
self._cap[i] = Capabilities.SUPPORTED
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
-
-# This data is actually never serialized across the RPC, but is used only
-# for displaying the data.
-class PlugData(IData):
- _MAN_PROPERTIES_2_HEADER = {
- "desc": "Description",
- "version": "Version",
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['desc', 'version']
-
- def __init__(self, description, plugin_version):
- self.desc = description
- self.version = plugin_version
-
if __name__ == '__main__':
#TODO Need some unit tests that encode/decode all the types with nested
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index c4f410e..82f84b8 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -17,39 +17,56 @@
# Author: Gris Ge <***@redhat.com>
import sys
from collections import OrderedDict
+from datetime import datetime
from lsm import (size_bytes_2_size_human, LsmError, ErrorNumber,
- System, Pool, Disk, Volume)
+ System, Pool, Disk, Volume, AccessGroup, Initiator,
+ FileSystem, FsSnapshot, NfsExport)
BIT_MAP_STRING_SPLITTER = ','
+
+def _print_out(msg):
+ try:
+ sys.stdout.write(str(msg))
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+ except IOError:
+ sys.exit(1)
+
+
def _txt_a(txt, append):
if len(txt):
return txt + BIT_MAP_STRING_SPLITTER + append
else:
return append
-def _enum_conv_to_str(enum, conv_dict):
+
+def _bit_map_to_str(bit_map, conv_dict):
rc = ''
for cur_enum in conv_dict.keys():
- if cur_enum & enum:
+ if cur_enum & bit_map:
rc = _txt_a(rc, conv_dict[cur_enum])
if rc == '':
return 'Unknown(%s)' % hex(enum)
+ return rc
-def _int_type_conv_to_str(int_type, conv_dict):
+
+def _enum_type_to_str(int_type, conv_dict):
rc = ''
if int_type in conv_dict.keys():
return conv_dict[int_type]
return 'Unknown(%d)' % int_type
-def _str_to_int_type_conv(type_str, conv_dict):
+
+def _str_to_enum(type_str, conv_dict):
keys = [k for k, v in conv_dict.items() if v.lower() == type_str.lower()]
if len(keys) > 0:
return keys[0]
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Failed to convert %s to lsm type" % type_str)
+
_SYSTEM_STATUS_CONV = {
System.STATUS_UNKNOWN: 'Unknown',
System.STATUS_OK: 'OK',
@@ -63,8 +80,10 @@ _SYSTEM_STATUS_CONV = {
System.STATUS_OTHER: 'Other',
}
+
def system_status_to_str(system_status):
- return _enum_conv_to_str(system_status, _SYSTEM_STATUS_CONV)
+ return _bit_map_to_str(system_status, _SYSTEM_STATUS_CONV)
+
_POOL_STATUS_CONV = {
Pool.STATUS_UNKNOWN: 'UNKNOWN',
@@ -86,8 +105,10 @@ _POOL_STATUS_CONV = {
Pool.STATUS_DESTROYING: 'DESTROYING',
}
+
def pool_status_to_str(pool_status):
- return _enum_conv_to_str(pool_status, _POOL_STATUS_CONV)
+ return _bit_map_to_str(pool_status, _POOL_STATUS_CONV)
+
_POOL_ELEMENT_TYPE_CONV = {
Pool.ELEMENT_TYPE_UNKNOWN: 'UNKNOWN',
@@ -97,38 +118,43 @@ _POOL_ELEMENT_TYPE_CONV = {
Pool.ELEMENT_TYPE_SYS_RESERVED: 'SYSTEM_RESERVED',
}
+
def pool_element_type_to_str(element_type):
- return _enum_conv_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
-
-_RAID_TYPE_CONV = {
- RAID_TYPE_RAID0: 'RAID0', # stripe
- RAID_TYPE_RAID1: 'RAID1', # mirror
- RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
- # parity
- RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
- # parity
- RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
- # parity
- RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
- RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
- RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
- RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
- RAID_TYPE_RAID50: 'RAID50', # stripe of parities
- RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
- RAID_TYPE_RAID51: 'RAID51', # mirror of parities
- RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
- RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
- RAID_TYPE_UNKNOWN: 'UNKNOWN',
- RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
- # different RAID type
+ return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
+
+
+_POOL_RAID_TYPE_CONV = {
+ Pool.RAID_TYPE_RAID0: 'RAID0', # stripe
+ Pool.RAID_TYPE_RAID1: 'RAID1', # mirror
+ Pool.RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
+ # parity
+ Pool.RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
+ # parity
+ Pool.RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
+ # parity
+ Pool.RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
+ Pool.RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
+ Pool.RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
+ Pool.RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
+ Pool.RAID_TYPE_RAID50: 'RAID50', # stripe of parities
+ Pool.RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
+ Pool.RAID_TYPE_RAID51: 'RAID51', # mirror of parities
+ Pool.RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
+ Pool.RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
+ Pool.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ Pool.RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
+ Pool.RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
+ # different RAID type
}
+
def pool_raid_type_to_str(raid_type):
- return _int_type_conv_to_str(raid_type, _RAID_TYPE_CONV)
+ return _enum_type_to_str(raid_type, _POOL_RAID_TYPE_CONV)
+
def pool_raid_type_str_to_type(raid_type_str):
- return _str_to_int_type_conv(raid_type_str, _RAID_TYPE_CONV)
+ return _str_to_enum(raid_type_str, _POOL_RAID_TYPE_CONV)
+
_POOL_MEMBER_TYPE_CONV = {
Pool.MEMBER_TYPE_UNKNOWN: 'UNKNOWN',
@@ -148,11 +174,14 @@ _POOL_MEMBER_TYPE_CONV = {
Pool.MEMBER_TYPE_VOLUME: 'VOLUME', # Pool was created from Volume(s).
}
+
def pool_member_type_to_str(member_type):
- return _int_type_conv_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
+ return _enum_type_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
+
def pool_member_type_str_to_type(member_type_str):
- return _str_to_int_type_conv(member_type_str, _POOL_MEMBER_TYPE_CONV)
+ return _str_to_enum(member_type_str, _POOL_MEMBER_TYPE_CONV)
+
_POOL_THINP_TYPE_CONV = {
Pool.THINP_TYPE_UNKNOWN: 'UNKNOWN',
@@ -161,11 +190,64 @@ _POOL_THINP_TYPE_CONV = {
Pool.THINP_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
}
+
def pool_thinp_type_to_str(thinp_type):
- return _int_type_conv_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
+ return _enum_type_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
+
def pool_thinp_type_str_to_type(thinp_type_str):
- return _str_to_int_type_conv(thinp_type_str, _POOL_THINP_TYPE_CONV)
+ return _str_to_enum(thinp_type_str, _POOL_THINP_TYPE_CONV)
+
+
+_VOL_STATUS_CONV = {
+ Volume.STATUS_UNKNOWN: 'Unknown',
+ Volume.STATUS_OK: 'OK',
+ Volume.STATUS_DEGRADED: 'Degraded',
+ Volume.STATUS_DORMANT: 'Dormant',
+ Volume.STATUS_ERR: 'Error',
+ Volume.STATUS_STARTING: 'Starting',
+}
+
+
+_VOL_PROVISION_CONV = {
+ Volume.PROVISION_DEFAULT: 'DEFAULT',
+ Volume.PROVISION_FULL: 'FULL',
+ Volume.PROVISION_THIN: 'THIN',
+ Volume.PROVISION_UNKNOWN: 'UNKNOWN',
+}
+
+
+def vol_provision_str_to_type(vol_provision_str):
+ return _str_to_enum(vol_provision_str, _VOL_PROVISION_CONV)
+
+
+_VOL_REP_TYPE_CONV = {
+ Volume.REPLICATE_SNAPSHOT: 'SNAPSHOT',
+ Volume.REPLICATE_CLONE: 'CLONE',
+ Volume.REPLICATE_COPY: 'COPY',
+ Volume.REPLICATE_MIRROR_SYNC: 'MIRROR_SYNC',
+ Volume.REPLICATE_MIRROR_ASYNC: 'MIRROR_ASYNC',
+ Volume.REPLICATE_UNKNOWN: 'UNKNOWN',
+}
+
+
+def vol_rep_type_str_to_type(vol_rep_type_str):
+ return _str_to_enum(vol_rep_type_str, _VOL_REP_TYPE_CONV)
+
+
+_VOL_ACCESS_TYPE_CONV = {
+ Volume.ACCESS_READ_WRITE: 'RW',
+ Volume.ACCESS_READ_ONLY: 'RO'
+}
+
+
+def vol_access_type_str_to_type(vol_access_type_str):
+ return _str_to_enum(vol_access_type_str, _VOL_ACCESS_TYPE_CONV)
+
+
+def vol_status_to_str(vol_status):
+ return _bit_map_to_str(vol_status, _VOL_STATUS_CONV)
+
_DISK_TYPE_CONV = {
Disk.DISK_TYPE_UNKNOWN: 'UNKNOWN',
@@ -182,10 +264,12 @@ _DISK_TYPE_CONV = {
Disk.DISK_TYPE_HYBRID: 'HYBRID',
}
+
def disk_type_to_str(disk_type):
- return _int_type_conv_to_str(disk_type, _DISK_TYPE_CONV)
+ return _enum_type_to_str(disk_type, _DISK_TYPE_CONV)
-_DISK_STATUS = {
+
+_DISK_STATUS_CONV = {
Disk.STATUS_UNKNOWN: 'UNKNOWN',
Disk.STATUS_OK: 'OK',
Disk.STATUS_OTHER: 'OTHER',
@@ -199,8 +283,29 @@ _DISK_STATUS = {
Disk.STATUS_RECONSTRUCTING: 'RECONSTRUCTING',
}
+
def disk_status_to_str(disk_status):
- return _enum_conv_to_str(disk_status, _DISK_STATUS)
+ return _bit_map_to_str(disk_status, _DISK_STATUS_CONV)
+
+
+_INIT_TYPE_CONV = {
+ Initiator.TYPE_OTHER: 'Other',
+ Initiator.TYPE_PORT_WWN: 'Port WWN',
+ Initiator.TYPE_NODE_WWN: 'Node WWN',
+ Initiator.TYPE_HOSTNAME: 'Hostname',
+ Initiator.TYPE_ISCSI: 'iSCSI',
+ Initiator.TYPE_SAS: "SAS"
+}
+
+
+def init_type_to_str(init_type):
+ return _enum_type_to_str(init_type, _INIT_TYPE_CONV)
+
+
+class PlugData(object):
+ def __init__(self, description, plugin_version):
+ self.desc = description
+ self.version = plugin_version
class DisplayData(object):
@@ -208,15 +313,6 @@ class DisplayData(object):
def __init__(self):
pass
- @staticmethod
- def _out(msg):
- try:
- sys.stdout.write(str(msg))
- sys.stdout.write("\n")
- sys.stdout.flush()
- except IOError:
- sys.exit(1)
-
DISPLAY_WAY_COLUMN = 0
DISPLAY_WAY_SCRIPT = 1
@@ -224,6 +320,9 @@ class DisplayData(object):
DEFAULT_SPLITTER = ' | '
+ VALUE_CONVERT = {}
+
+ # lsm.System
SYSTEM_MAN_HEADER = OrderedDict()
SYSTEM_MAN_HEADER['id'] = 'ID'
SYSTEM_MAN_HEADER['name'] = 'Name'
@@ -232,23 +331,288 @@ class DisplayData(object):
SYSTEM_OPT_HEADER = OrderedDict()
- SYSTEM_DSP_HEADER = SYSTEM_MAN_HEADER # SYSTEM_DSP_HEADER should be
- # subset of SYSTEM_MAN_HEADER
+ SYSTEM_COLUME_KEYS = SYSTEM_MAN_HEADER.keys()
+ # SYSTEM_COLUME_KEYS should be subset of SYSTEM_MAN_HEADER.keys()
+ # XXX_COLUME_KEYS contain a list of mandatory properties which will be
+ # displayed in column way. It was used to limit the output of properties
+ # in sure the colume display way does not exceeded the column width 78.
+ # All mandatory_headers will be displayed in script way.
+ # if '-o' define, both mandatory_headers and optional_headers will be
+ # displayed in script way.
SYSTEM_VALUE_CONV_ENUM = {
- 'status': EnumConvert.system_status_to_str,
+ 'status': system_status_to_str,
}
SYSTEM_VALUE_CONV_HUMAN = []
- VALUE_CONVERT = {
- System: {
- 'mandatory_headers': SYSTEM_MAN_HEADER,
- 'display_headers': SYSTEM_DSP_HEADER,
- 'optional_headers': SYSTEM_OPT_HEADER,
- 'value_conv_enum': SYSTEM_VALUE_CONV_ENUM,
- 'value_conv_human': SYSTEM_VALUE_CONV_HUMAN,
- }
+ VALUE_CONVERT[System] = {
+ 'mandatory_headers': SYSTEM_MAN_HEADER,
+ 'column_keys': SYSTEM_COLUME_KEYS,
+ 'optional_headers': SYSTEM_OPT_HEADER,
+ 'value_conv_enum': SYSTEM_VALUE_CONV_ENUM,
+ 'value_conv_human': SYSTEM_VALUE_CONV_HUMAN,
+ }
+
+ PLUG_DATA_MAN_HEADER = OrderedDict()
+ PLUG_DATA_MAN_HEADER['desc'] = 'Description'
+ PLUG_DATA_MAN_HEADER['version'] = 'Version'
+
+ PLUG_DATA_COLUME_KEYS = PLUG_DATA_MAN_HEADER.keys()
+
+ PLUG_DATA_OPT_HEADER = OrderedDict()
+ PLUG_DATA_VALUE_CONV_ENUM = {}
+ PLUG_DATA_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[PlugData] = {
+ 'mandatory_headers': PLUG_DATA_MAN_HEADER,
+ 'column_keys': PLUG_DATA_COLUME_KEYS,
+ 'optional_headers': PLUG_DATA_OPT_HEADER,
+ 'value_conv_enum': PLUG_DATA_VALUE_CONV_ENUM,
+ 'value_conv_human': PLUG_DATA_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Pool
+ POOL_MAN_HEADER = OrderedDict()
+ POOL_MAN_HEADER['id'] = 'ID'
+ POOL_MAN_HEADER['name'] = 'Name'
+ POOL_MAN_HEADER['total_space'] = 'Total Space'
+ POOL_MAN_HEADER['free_space'] = 'Free Space'
+ POOL_MAN_HEADER['status'] = 'Status'
+ POOL_MAN_HEADER['status_info'] = 'Status Info'
+ POOL_MAN_HEADER['system_id'] = 'System ID'
+
+ POOL_COLUME_KEYS = POOL_MAN_HEADER.keys()
+
+ POOL_OPT_HEADER = OrderedDict()
+ POOL_OPT_HEADER['raid_type'] = 'RAID Type'
+ POOL_OPT_HEADER['member_type'] = 'Member Type'
+ POOL_OPT_HEADER['member_ids'] = 'Member IDs'
+ POOL_OPT_HEADER['thinp_type'] = 'Provision Type'
+ POOL_OPT_HEADER['element_type'] = 'Element Type'
+
+ POOL_VALUE_CONV_ENUM = {
+ 'status': pool_status_to_str,
+ 'raid_type': pool_raid_type_to_str,
+ 'member_type': pool_member_type_to_str,
+ 'thinp_type': pool_thinp_type_to_str,
+ 'element_type': pool_element_type_to_str,
+ }
+
+ POOL_VALUE_CONV_HUMAN = ['total_space', 'free_space']
+
+ VALUE_CONVERT[Pool] = {
+ 'mandatory_headers': POOL_MAN_HEADER,
+ 'column_keys': POOL_COLUME_KEYS,
+ 'optional_headers': POOL_OPT_HEADER,
+ 'value_conv_enum': POOL_VALUE_CONV_ENUM,
+ 'value_conv_human': POOL_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Volume
+ VOL_MAN_HEADER = OrderedDict()
+ VOL_MAN_HEADER['id'] = 'ID'
+ VOL_MAN_HEADER['name'] = 'Name'
+ VOL_MAN_HEADER['vpd83'] = 'SCSI VPD 0x83'
+ VOL_MAN_HEADER['block_size'] = 'Block Size'
+ VOL_MAN_HEADER['num_of_blocks'] = '#blocks'
+ VOL_MAN_HEADER['size_bytes'] = 'Size'
+ VOL_MAN_HEADER['status'] = 'Status'
+ VOL_MAN_HEADER['pool_id'] = 'Pool ID'
+ VOL_MAN_HEADER['system_id'] = 'System ID'
+
+ VOL_COLUME_KEYS = []
+ for key_name in VOL_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['block_size', 'num_of_blocks', 'system_id']:
+ VOL_COLUME_KEYS.extend([key_name])
+
+ VOL_OPT_HEADER = OrderedDict()
+
+ VOL_VALUE_CONV_ENUM = {
+ 'status': vol_status_to_str,
+ }
+
+ VOL_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']
+
+ VALUE_CONVERT[Volume] = {
+ 'mandatory_headers': VOL_MAN_HEADER,
+ 'column_keys': VOL_COLUME_KEYS,
+ 'optional_headers': VOL_OPT_HEADER,
+ 'value_conv_enum': VOL_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Disk
+ DISK_MAN_HEADER = OrderedDict()
+ DISK_MAN_HEADER['id'] = 'ID'
+ DISK_MAN_HEADER['name'] = 'Name'
+ DISK_MAN_HEADER['disk_type'] = 'Type'
+ DISK_MAN_HEADER['block_size'] = 'Block Size'
+ DISK_MAN_HEADER['num_of_blocks'] = '#blocks'
+ DISK_MAN_HEADER['size_bytes'] = 'Size'
+ DISK_MAN_HEADER['status'] = 'Status'
+ DISK_MAN_HEADER['system_id'] = 'System ID'
+
+ DISK_COLUME_KEYS = []
+ for key_name in DISK_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['block_size', 'num_of_blocks']:
+ DISK_COLUME_KEYS.extend([key_name])
+
+ DISK_OPT_HEADER = OrderedDict()
+ DISK_OPT_HEADER['sn'] = 'Serial Number'
+ DISK_OPT_HEADER['part_num'] = 'Part Number'
+ DISK_OPT_HEADER['vendor'] = 'Vendor'
+ DISK_OPT_HEADER['model'] = 'Model'
+
+ DISK_VALUE_CONV_ENUM = {
+ 'status': disk_status_to_str,
+ 'disk_type': disk_type_to_str,
+ }
+
+ DISK_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']
+
+ VALUE_CONVERT[Disk] = {
+ 'mandatory_headers': DISK_MAN_HEADER,
+ 'column_keys': DISK_COLUME_KEYS,
+ 'optional_headers': DISK_OPT_HEADER,
+ 'value_conv_enum': DISK_VALUE_CONV_ENUM,
+ 'value_conv_human': DISK_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.AccessGroup
+ AG_MAN_HEADER = OrderedDict()
+ AG_MAN_HEADER['id'] = 'ID'
+ AG_MAN_HEADER['name'] = 'Name'
+ AG_MAN_HEADER['initiators'] = 'Initiator IDs'
+ AG_MAN_HEADER['system_id'] = 'System ID'
+
+ AG_COLUME_KEYS = AG_MAN_HEADER.keys()
+
+ AG_OPT_HEADER = OrderedDict()
+
+ AG_VALUE_CONV_ENUM = {}
+
+ AG_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[AccessGroup] = {
+ 'mandatory_headers': AG_MAN_HEADER,
+ 'column_keys': AG_COLUME_KEYS,
+ 'optional_headers': AG_OPT_HEADER,
+ 'value_conv_enum': AG_VALUE_CONV_ENUM,
+ 'value_conv_human': AG_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Initiator
+ INIT_MAN_HEADER = OrderedDict()
+ INIT_MAN_HEADER['id'] = 'ID'
+ INIT_MAN_HEADER['name'] = 'Name'
+ INIT_MAN_HEADER['type'] = 'Initiator Type'
+
+ INIT_COLUME_KEYS = INIT_MAN_HEADER.keys()
+
+ INIT_OPT_HEADER = OrderedDict()
+
+ INIT_VALUE_CONV_ENUM = {
+ 'type': init_type_to_str,
+ }
+
+ INIT_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[Initiator] = {
+ 'mandatory_headers': INIT_MAN_HEADER,
+ 'column_keys': INIT_COLUME_KEYS,
+ 'optional_headers': INIT_OPT_HEADER,
+ 'value_conv_enum': INIT_VALUE_CONV_ENUM,
+ 'value_conv_human': INIT_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.FileSystem
+ FS_MAN_HEADER = OrderedDict()
+ FS_MAN_HEADER['id'] = 'ID'
+ FS_MAN_HEADER['name'] = 'Name'
+ FS_MAN_HEADER['total_space'] = 'Total Space'
+ FS_MAN_HEADER['free_space'] = 'Free Space'
+ FS_MAN_HEADER['pool_id'] = 'Pool ID'
+ FS_MAN_HEADER['system_id'] = 'System ID'
+
+ FS_COLUME_KEYS = []
+ for key_name in FS_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['system_id']:
+ FS_COLUME_KEYS.extend([key_name])
+
+ FS_OPT_HEADER = OrderedDict()
+
+ FS_VALUE_CONV_ENUM = {
+ }
+
+ FS_VALUE_CONV_HUMAN = ['total_space', 'free_space']
+
+ VALUE_CONVERT[FileSystem] = {
+ 'mandatory_headers': FS_MAN_HEADER,
+ 'column_keys': FS_COLUME_KEYS,
+ 'optional_headers': FS_OPT_HEADER,
+ 'value_conv_enum': FS_VALUE_CONV_ENUM,
+ 'value_conv_human': FS_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.FsSnapshot
+ FS_SNAP_MAN_HEADER = OrderedDict()
+ FS_SNAP_MAN_HEADER['id'] = 'ID'
+ FS_SNAP_MAN_HEADER['name'] = 'Name'
+ FS_SNAP_MAN_HEADER['ts'] = 'Time Stamp'
+
+ FS_SNAP_COLUME_KEYS = FS_SNAP_MAN_HEADER.keys()
+
+ FS_SNAP_OPT_HEADER = OrderedDict()
+
+ FS_SNAP_VALUE_CONV_ENUM = {
+ 'ts': datetime.fromtimestamp
+ }
+
+ FS_SNAP_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[FsSnapshot] = {
+ 'mandatory_headers': FS_SNAP_MAN_HEADER,
+ 'column_keys': FS_SNAP_COLUME_KEYS,
+ 'optional_headers': FS_SNAP_OPT_HEADER,
+ 'value_conv_enum': FS_SNAP_VALUE_CONV_ENUM,
+ 'value_conv_human': FS_SNAP_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.NfsExport
+ NFS_EXPORT_MAN_HEADER = OrderedDict()
+ NFS_EXPORT_MAN_HEADER['id'] = 'ID'
+ NFS_EXPORT_MAN_HEADER['fs_id'] = 'FileSystem ID'
+ NFS_EXPORT_MAN_HEADER['export_path'] = 'Export Path'
+ NFS_EXPORT_MAN_HEADER['auth'] = 'Auth Type'
+ NFS_EXPORT_MAN_HEADER['root'] = 'Root Hosts'
+ NFS_EXPORT_MAN_HEADER['rw'] = 'RW Hosts'
+ NFS_EXPORT_MAN_HEADER['ro'] = 'RO Hosts'
+ NFS_EXPORT_MAN_HEADER['anonuid'] = 'Anonymous UID'
+ NFS_EXPORT_MAN_HEADER['anongid'] = 'Anonymous GID'
+ NFS_EXPORT_MAN_HEADER['options'] = 'Options'
+
+ NFS_EXPORT_COLUME_KEYS = []
+ for key_name in NFS_EXPORT_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['root', 'anonuid', 'anongid', 'auth']:
+ NFS_EXPORT_COLUME_KEYS.extend([key_name])
+
+ NFS_EXPORT_OPT_HEADER = OrderedDict()
+
+ NFS_EXPORT_VALUE_CONV_ENUM = {}
+
+ NFS_EXPORT_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[NfsExport] = {
+ 'mandatory_headers': NFS_EXPORT_MAN_HEADER,
+ 'column_keys': NFS_EXPORT_COLUME_KEYS,
+ 'optional_headers': NFS_EXPORT_OPT_HEADER,
+ 'value_conv_enum': NFS_EXPORT_VALUE_CONV_ENUM,
+ 'value_conv_human': NFS_EXPORT_VALUE_CONV_HUMAN,
}
@staticmethod
@@ -285,33 +649,33 @@ class DisplayData(object):
return max_width
@staticmethod
- def _data_dict_gen(obj, flag_human, flag_enum, extra_properties=None,
- flag_dsp_all_data=False):
+ def _data_dict_gen(obj, flag_human, flag_enum, display_way,
+ extra_properties=None, flag_dsp_all_data=False):
data_dict = OrderedDict()
value_convert = DisplayData.VALUE_CONVERT[type(obj)]
mandatory_headers = value_convert['mandatory_headers']
- display_headers = value_convert['display_headers']
optional_headers = value_convert['optional_headers']
value_conv_enum = value_convert['value_conv_enum']
value_conv_human = value_convert['value_conv_human']
- for key in display_headers.keys():
- key_str = display_headers[key]
+ if flag_dsp_all_data:
+ display_way = DisplayData.DISPLAY_WAY_SCRIPT
+
+ display_keys = []
+
+ if display_way == DisplayData.DISPLAY_WAY_COLUMN:
+ display_keys = value_convert['column_keys']
+ elif display_way == DisplayData.DISPLAY_WAY_SCRIPT:
+ display_keys = mandatory_headers.keys()
+
+ for key in display_keys:
+ key_str = mandatory_headers[key]
value = DisplayData._get_man_pro_value(
obj, key, value_conv_enum, value_conv_human, flag_human,
flag_enum)
data_dict[key_str] = value
if flag_dsp_all_data:
- for key in mandatory_headers.keys():
- if key in display_headers.keys():
- continue
- key_str = mandatory_headers[key]
- value = DisplayData._get_man_pro_value(
- obj, key, value_conv_enum, value_conv_human, flag_human,
- flag_enum)
- data_dict[key_str] = value
-
for key in optional_headers.keys():
key_str = optional_headers[key]
value = DisplayData._get_opt_pro_value(
@@ -319,11 +683,12 @@ class DisplayData(object):
flag_enum)
data_dict[key_str] = value
- elif extra_properties:
+ if extra_properties:
for key in extra_properties:
+ if key in data_dict.keys():
+ # already contained
+ continue
if key in mandatory_headers.keys():
- if key in display_headers.keys():
- continue
key_str = mandatory_headers[key]
value = DisplayData._get_man_pro_value(
obj, key, value_conv_enum, value_conv_human,
@@ -358,8 +723,8 @@ class DisplayData(object):
if type(objs[0]) in DisplayData.VALUE_CONVERT.keys():
for obj in objs:
data_dict = DisplayData._data_dict_gen(
- obj, flag_human, flag_enum, extra_properties,
- flag_dsp_all_data)
+ obj, flag_human, flag_enum, display_way,
+ extra_properties, flag_dsp_all_data)
data_dict_list.extend([data_dict])
else:
return None
@@ -400,25 +765,24 @@ class DisplayData(object):
splitter,
value_column_width)
obj_splitter = '%s%s%s' % ('-' * key_column_width,
- '-' * len(splitter),
- '-' * value_column_width)
+ '-' * len(splitter),
+ '-' * value_column_width)
for data_dict in data_dict_list:
- DisplayData._out(obj_splitter)
+ _print_out(obj_splitter)
for key_name in data_dict:
value = data_dict[key_name]
if isinstance(value, list):
flag_first_data = True
for sub_value in value:
if flag_first_data:
- DisplayData._out(row_format %
- (key_name, str(sub_value)))
+ _print_out(row_format % (key_name, str(sub_value)))
flag_first_data = False
else:
- DisplayData._out(sub_row_format % str(sub_value))
+ _print_out(sub_row_format % str(sub_value))
else:
- DisplayData._out(row_format % (key_name, str(value)))
- DisplayData._out(obj_splitter)
+ _print_out(row_format % (key_name, str(value)))
+ _print_out(obj_splitter)
@staticmethod
def _display_data_column_way(data_dict_list, splitter, flag_with_header):
@@ -451,7 +815,7 @@ class DisplayData(object):
for raw in range(0, row_width):
new = []
for column in range(0, item_count):
- new.append([''])
+ new.append('')
two_d_list.append(new)
# header
@@ -489,6 +853,6 @@ class DisplayData(object):
row_format = splitter.join(row_formats)
for row_index in range(0, len(two_d_list)):
- DisplayData._out(row_format % tuple(two_d_list[row_index]))
+ _print_out(row_format % tuple(two_d_list[row_index]))
if row_index == 0 and flag_with_header:
- DisplayData._out(header_splitter)
+ _print_out(header_splitter)
* Moving all value converting methods to data_display.py
* Use file level method in stead of @staticmethod as Tony suggested.
* Moved lsm.PlugData to data_display.py as it is only for lsmcli.
* Volume.STATUS_ERR is kept as old one. We will change it in another patch.
* In stead of use enumerate number for unknown, we will raise error when
got illegal string for string_to_type converting:
LsmError(ErrorNumber.INVALID_ARGUMENT, "balalala")
* As git summery said, we have more deletions than insertions, I will
consider this new display method as improvement.
There are still some duplicate code for VALUE_CONVERT, but I have no idea
how to do that(I don't want to use variable as variable name to same these
duplicate code, because that's why hash/dictionary invented for.)
* Tested with all 'lsmcli list' types on 'sim://'.
* Introduced 'IData.OPT_PROPERTIES' list, it could be use for checking
whether provided OptionalData is supported or not. Check
lsm.Disk.__init__() for detail.
Signed-off-by: Gris Ge <***@redhat.com>
---
python_binding/lsm/_cmdline.py | 26 +-
python_binding/lsm/_data.py | 532 +---------------------------------------
tools/lsmcli/data_display.py | 544 ++++++++++++++++++++++++++++++++++-------
3 files changed, 478 insertions(+), 624 deletions(-)
diff --git a/python_binding/lsm/_cmdline.py b/python_binding/lsm/_cmdline.py
index 1845e82..22f1a9a 100644
--- a/python_binding/lsm/_cmdline.py
+++ b/python_binding/lsm/_cmdline.py
@@ -26,9 +26,13 @@ from lsm import (Client, Pool, VERSION, LsmError, Capabilities, Disk,
Initiator, Volume, JobStatus, ErrorNumber, BlockRange,
uri_parse)
-from _data import PlugData
from _common import getch, size_human_2_size_bytes, Proxy
-from lsm.lsmcli.data_display import DisplayData
+from lsm.lsmcli.data_display import (
+ DisplayData, PlugData,
+ pool_raid_type_str_to_type, pool_member_type_str_to_type,
+ vol_provision_str_to_type, vol_rep_type_str_to_type,
+ vol_access_type_str_to_type)
+
##@package lsm.cmdline
@@ -1390,7 +1394,7 @@ class CmdLine:
p,
args.name,
self._size(args.size),
- Volume._prov_string_to_type(args.provisioning)))
+ vol_provision_str_to_type(args.provisioning)))
self.display_data([vol])
## Creates a snapshot
@@ -1493,7 +1497,7 @@ class CmdLine:
v = _get_item(self.c.volumes(), args.vol, "volume id")
- rep_type = Volume._rep_string_to_type(args.rep_type)
+ rep_type = vol_rep_type_str_to_type(args.rep_type)
if rep_type == Volume.REPLICATE_UNKNOWN:
raise ArgError("invalid replication type= %s" % rep_type)
@@ -1508,7 +1512,7 @@ class CmdLine:
dst = _get_item(self.c.volumes(), args.dst_vol,
"destination volume id")
- rep_type = Volume._rep_string_to_type(args.rep_type)
+ rep_type = vol_rep_type_str_to_type(args.rep_type)
if rep_type == Volume.REPLICATE_UNKNOWN:
raise ArgError("invalid replication type= %s" % rep_type)
@@ -1545,7 +1549,7 @@ class CmdLine:
i_type = CmdLine._init_type_to_enum(args.init_type)
access = 'DEFAULT'
if args.access is not None:
- access = Volume._access_string_to_type(args.access)
+ access = vol_access_type_str_to_type(args.access)
self.c.initiator_grant(initiator_id, i_type, v, access)
else:
@@ -1571,7 +1575,7 @@ class CmdLine:
access = 'RW'
if args.access is not None:
access = args.access
- access = Volume._access_string_to_type(args.access)
+ access = vol_access_type_str_to_type(args.access)
self.c.access_group_grant(group, v, access)
else:
self.c.access_group_revoke(group, v)
@@ -1661,14 +1665,14 @@ class CmdLine:
size_bytes = self._size(self.args.size)
if args.raid_type:
- raid_type = Pool._raid_type_str_to_type(
+ raid_type = pool_raid_type_str_to_type(
self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
args.raid_type)
if args.member_type:
- member_type = Pool._member_type_str_to_type(
+ member_type = pool_member_type_str_to_type(
args.member_type)
if member_type == Pool.MEMBER_TYPE_UNKNOWN:
raise ArgError("Unkonwn member type specified: %s" %
@@ -1699,7 +1703,7 @@ class CmdLine:
else:
disks_to_use.append(disk_ids[member_id])
- raid_type = Pool._raid_type_str_to_type(self.args.raid_type)
+ raid_type = pool_raid_type_str_to_type(self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
self.args.raid_type)
@@ -1724,7 +1728,7 @@ class CmdLine:
raise ArgError("Invalid volumes ID specified in " +
"--member-id %s " % member_id)
- raid_type = Pool._raid_type_str_to_type(
+ raid_type = pool_raid_type_str_to_type(
self.args.raid_type)
if raid_type == Pool.RAID_TYPE_UNKNOWN:
raise ArgError("Unknown RAID type specified: %s" %
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 0853b6c..db63d38 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -114,6 +114,8 @@ class IData(object):
"""
__metaclass__ = _ABCMeta
+ OPT_PROPERTIES = []
+
def _to_dict(self):
"""
Represent the class as a dictionary
@@ -157,111 +159,6 @@ class IData(object):
"""
return str(self._to_dict())
- _MAN_PROPERTIES_2_HEADER = dict()
- _OPT_PROPERTIES_2_HEADER = dict()
- _MAN_PROPERTIES_SEQUENCE = []
- _OPT_PROPERTIES_SEQUENCE = []
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- return value
-
- def _str_of_key(self, key_name=None):
- """
- If key_name == None or not provided:
- Return a dictionary providing the mandatory properties key name to
- human friendly string mapping:
- {
- 'id': 'ID',
- 'member_type': 'Member Type',
- .
- .
- .
- }
- else provide the human friendly string of certain key.
- """
- if key_name is None:
- return dict(list(self._MAN_PROPERTIES_2_HEADER.items()) +
- list(self._OPT_PROPERTIES_2_HEADER.items()))
-
- man_pros_header = self._MAN_PROPERTIES_2_HEADER
- opt_pros_header = self._OPT_PROPERTIES_2_HEADER
- if key_name in man_pros_header.keys():
- return man_pros_header[key_name]
- elif key_name in opt_pros_header.keys():
- return opt_pros_header[key_name]
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "%s class does not provide %s property" %
- (self.__name__, key_name))
-
- def _value_of_key(self, key_name=None, human=False, enum_as_number=False,
- list_convert=False):
- """
- Return the value of certain key, allowing do humanize converting,
- list converting, or enumerate as number.
- For optional properties, if requesting key is not valid for current
- instance(but is valid for class definition), return None
- If key_name == None, we return a dictionary like this:
- {
- # key_name: converted_value
- id: 1232424abcef,
- raid_type: 'RAID6',
- .
- .
- .
- }
- """
- man_pros_header = self._MAN_PROPERTIES_2_HEADER
- opt_pros_header = self._OPT_PROPERTIES_2_HEADER
- if key_name is None:
- all_value = {}
- for cur_key_name in man_pros_header.keys():
- all_value[cur_key_name] = self._value_of_key(
- key_name=cur_key_name,
- human=human,
- enum_as_number=enum_as_number,
- list_convert=list_convert)
- for cur_key_name in opt_pros_header.keys():
- cur_value = self._value_of_key(
- key_name=cur_key_name,
- human=human,
- enum_as_number=enum_as_number,
- list_convert=list_convert)
- if cur_value is None:
- continue
- else:
- all_value[cur_key_name] = cur_value
- return all_value
-
- if key_name in man_pros_header.keys():
- value = getattr(self, key_name)
-
- return self._value_convert(key_name, value, human, enum_as_number,
- list_convert)
-
- elif (hasattr(self, '_optional_data') and
- key_name in opt_pros_header.keys()):
- if key_name not in self._optional_data.list():
- return None
-
- value = self._optional_data.get(key_name)
- return self._value_convert(key_name, value, human, enum_as_number,
- list_convert)
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "%s class does not provide %s property" %
- (self.__name__, key_name))
-
- def _key_display_sequence(self):
- """
- Return a List with suggested data displaying order of properties.
- """
- key = self._MAN_PROPERTIES_SEQUENCE
- key.extend(self._OPT_PROPERTIES_SEQUENCE)
- return key
-
-
@default_property('id', doc="Unique identifier")
@default_property('type', doc="Enumerated initiator type")
@default_property('name', doc="User supplied name")
@@ -272,28 +169,6 @@ class Initiator(IData):
(TYPE_OTHER, TYPE_PORT_WWN, TYPE_NODE_WWN, TYPE_HOSTNAME, TYPE_ISCSI,
TYPE_SAS) = (1, 2, 3, 4, 5, 7)
- _type_map = {1: 'Other', 2: 'Port WWN', 3: 'Node WWN', 4: 'Hostname',
- 5: 'iSCSI', 7: "SAS"}
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'type': 'Type',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'type']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if not enum_as_number:
- if key_name == 'type':
- value = Initiator._type_to_str(value)
- return value
-
- @staticmethod
- def _type_to_str(init_type):
- return Initiator._type_map[init_type]
-
def __init__(self, _id, _type, _name):
if not _name or not len(_name):
@@ -345,21 +220,6 @@ class Disk(IData):
DISK_TYPE_SSD = 53 # Solid State Drive
DISK_TYPE_HYBRID = 54 # uses a combination of HDD and SSD
- _DISK_TYPE = {
- DISK_TYPE_UNKNOWN: 'UNKNOWN',
- DISK_TYPE_OTHER: 'OTHER',
- DISK_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- DISK_TYPE_ATA: 'ATA',
- DISK_TYPE_SATA: 'SATA',
- DISK_TYPE_SAS: 'SAS',
- DISK_TYPE_FC: 'FC',
- DISK_TYPE_SOP: 'SOP',
- DISK_TYPE_NL_SAS: 'NL_SAS',
- DISK_TYPE_HDD: 'HDD',
- DISK_TYPE_SSD: 'SSD',
- DISK_TYPE_HYBRID: 'HYBRID',
- }
-
MAX_DISK_STATUS_BITS = 64
# Disk status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
@@ -399,61 +259,8 @@ class Disk(IData):
# Disk is in reconstructing date from other RAID member.
# Should explain progress in Disk.status_info
- _STATUS = {
- STATUS_UNKNOWN: 'UNKNOWN',
- STATUS_OK: 'OK',
- STATUS_OTHER: 'OTHER',
- STATUS_PREDICTIVE_FAILURE: 'PREDICTIVE_FAILURE',
- STATUS_ERROR: 'ERROR',
- STATUS_OFFLINE: 'OFFLINE',
- STATUS_STARTING: 'STARTING',
- STATUS_STOPPING: 'STOPPING',
- STATUS_STOPPED: 'STOPPED',
- STATUS_INITIALIZING: 'INITIALIZING',
- STATUS_RECONSTRUCTING: 'RECONSTRUCTING',
- }
-
- @staticmethod
- def status_to_str(status):
- """
- Convert status to a string
- When having multiple status, will use a comma between them
- """
- status_str = ''
- for x in Disk._STATUS.keys():
- if x & status:
- status_str = txt_a(status_str, Disk._STATUS[x])
- if status_str:
- return status_str
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Invalid Disk.status: %d" % status)
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'disk_type': 'Disk Type',
- 'block_size': 'Block Size',
- 'num_of_blocks': '#blocks',
- 'size_bytes': 'Size',
- 'status': 'Status',
- 'system_id': 'System ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'disk_type', 'block_size',
- 'num_of_blocks', 'size_bytes', 'status',
- 'system_id']
-
- _OPT_PROPERTIES_2_HEADER = {
- 'sn': 'SN',
- 'part_num': 'Part Number',
- 'vendor': 'Vendor',
- 'model': 'Model',
- 'status_info': 'Status Info',
- 'owner_ctrler_id': 'Controller Owner',
- }
-
- _OPT_PROPERTIES_SEQUENCE = ['sn', 'part_num', 'vendor', 'model',
- 'status_info', 'owner_ctrler_id']
+ OPT_PROPERTIES = ['sn', 'part_num', 'vendor', 'model', 'status_info',
+ 'owner_ctrler_id']
def _value_convert(self, key_name, value, human, enum_as_number,
list_convert):
@@ -483,7 +290,7 @@ class Disk(IData):
self._optional_data = OptionalData()
else:
#Make sure the properties only contain ones we permit
- allowed = set(Disk._OPT_PROPERTIES_2_HEADER.keys())
+ allowed = set(Disk.OPT_PROPERTIES)
actual = set(_optional_data.list())
if actual <= allowed:
@@ -500,41 +307,9 @@ class Disk(IData):
"""
return self.block_size * self.num_of_blocks
- @staticmethod
- def disk_type_to_str(disk_type):
- if disk_type in Disk._DISK_TYPE.keys():
- return Disk._DISK_TYPE[disk_type]
- return Disk._DISK_TYPE[Disk.DISK_TYPE_UNKNOWN]
-
- @staticmethod
- def disk_type_str_to_type(disk_type_str):
- key = get_key(Disk._DISK_TYPE, disk_type_str)
- if key or key == 0:
- return key
- return Disk.DISK_TYPE_UNKNOWN
-
def __str__(self):
return self.name
- def _opt_column_headers(self):
- opt_headers = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_headers.extend([Disk._OPT_PROPERTIES_2_HEADER[opt_pro]])
- return opt_headers
-
- def _opt_column_data(self, human=False, enum_as_number=False):
- opt_data_values = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_pro_value = self._optional_data.get(opt_pro)
- if enum_as_number is False:
- pass
-
- opt_data_values.extend([opt_pro_value])
- return opt_data_values
-
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User given name")
@default_property('vpd83', doc="Vital product page 0x83 identifier")
@@ -547,7 +322,6 @@ class Volume(IData):
"""
Represents a volume.
"""
-
# Volume status Note: Volumes can have multiple status bits set at same
# time.
(STATUS_UNKNOWN, STATUS_OK, STATUS_DEGRADED, STATUS_ERR, STATUS_STARTING,
@@ -562,56 +336,10 @@ class Volume(IData):
(PROVISION_UNKNOWN, PROVISION_THIN, PROVISION_FULL, PROVISION_DEFAULT) = \
(-1, 1, 2, 3)
- @staticmethod
- def _prov_string_to_type(prov_type):
- if prov_type == 'DEFAULT':
- return Volume.PROVISION_DEFAULT
- elif prov_type == "FULL":
- return Volume.PROVISION_FULL
- elif prov_type == "THIN":
- return Volume.PROVISION_THIN
- else:
- return Volume.PROVISION_UNKNOWN
-
- @staticmethod
- def _rep_string_to_type(rt):
- if rt == "SNAPSHOT":
- return Volume.REPLICATE_SNAPSHOT
- elif rt == "CLONE":
- return Volume.REPLICATE_CLONE
- elif rt == "COPY":
- return Volume.REPLICATE_COPY
- elif rt == "MIRROR_SYNC":
- return Volume.REPLICATE_MIRROR_SYNC
- elif rt == "MIRROR_ASYNC":
- return Volume.REPLICATE_MIRROR_ASYNC
- else:
- return Volume.REPLICATE_UNKNOWN
-
#Initiator access
(ACCESS_READ_ONLY, ACCESS_READ_WRITE, ACCESS_NONE) = (1, 2, 3)
@staticmethod
- def _status_to_str(status):
- if status == 1:
- return "OK"
- elif status == 0:
- return "Unknown"
- else:
- rc = ""
- if status & Volume.STATUS_OK:
- rc = txt_a(rc, "OK")
- if status & Volume.STATUS_DEGRADED:
- rc = txt_a(rc, "Degraded")
- if status & Volume.STATUS_DORMANT:
- rc = txt_a(rc, "Dormant")
- if status & Volume.STATUS_ERR:
- rc = txt_a(rc, "Error")
- if status & Volume.STATUS_STARTING:
- rc = txt_a(rc, "Starting")
- return rc
-
- @staticmethod
def _access_string_to_type(access):
if access == "RW":
return Volume.ACCESS_READ_WRITE
@@ -639,35 +367,6 @@ class Volume(IData):
def __str__(self):
return self.name
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'vpd83': 'VPD83',
- 'block_size': 'Block Size',
- 'num_of_blocks': '#blocks',
- 'size_bytes': 'Size',
- 'status': 'Status',
- 'system_id': 'System ID',
- 'pool_id': 'Pool ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'vpd83', 'block_size',
- 'num_of_blocks', 'size_bytes', 'status',
- 'system_id', 'pool_id']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
-
- if enum_as_number is False:
- if key_name == 'status':
- value = self._status_to_str(value)
- if human:
- if key_name == 'size_bytes':
- value = sh(value, human)
- elif key_name == 'block_size':
- value = sh(value, human)
- return value
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User defined system name")
@@ -753,33 +452,13 @@ The lsm.System class does not have class methods.
STATUS_STOPPED = 1 << 8
STATUS_OTHER = 1 << 9
- @staticmethod
- def _status_to_str(status):
- if status == 0:
- return "Unknown"
- elif status == 1:
- return "OK"
- else:
- rc = ""
- if status & System.STATUS_OK:
- rc = txt_a(rc, "OK")
- if status & System.STATUS_DEGRADED:
- rc = txt_a(rc, "Degraded")
- if status & System.STATUS_ERROR:
- rc = txt_a(rc, "Error")
- if status & System.STATUS_PREDICTIVE_FAILURE:
- rc = txt_a(rc, "Predictive failure")
- if status & System.STATUS_VENDOR_SPECIFIC:
- rc = txt_a(rc, "Vendor specific status")
-
- return rc
-
def __init__(self, _id, _name, _status, _status_info):
self._id = _id
self._name = _name
self._status = _status
self._status_info = _status_info
+
@default_property('id', doc="Unique identifier")
@default_property('name', doc="User supplied name")
@default_property('total_space', doc="Total space in bytes")
@@ -976,82 +655,9 @@ class Pool(IData):
# DESTROYING:
# Array is removing current pool.
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- # id: Identifier of Pool.
- 'name': 'Name',
- # name: Human readable name of Pool.
- 'total_space': 'Total Space',
- # total_space: All spaces in bytes could be allocated to user.
- 'free_space': 'Free Space',
- # free_space: Free spaces in bytes could be allocated to user.
- 'status': 'Status',
- # status: Indicate the status of Pool.
- 'status_info': 'Status Info',
- # status_info: A string explaining the detail of current status.
- # Check comments above about Pool.STATUS_XXX for
- # what info you should save in it.
- 'system_id': 'System ID',
- # system_id: Identifier of belonging system.
- }
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'total_space', 'free_space',
- 'status', 'status_info', 'system_id']
-
- _OPT_PROPERTIES_2_HEADER = {
- 'raid_type': 'RAID Type',
- # raid_type: RAID Type of this pool's RAID Group(s):
- # RAID_TYPE_XXXX, check constants above.
- 'member_type': 'Member Type',
- # member_type: What kind of items assembled this pool:
- # MEMBER_TYPE_DISK/MEMBER_TYPE_POOL/MEMBER_TYPE_VOLUME
- 'member_ids': 'Member IDs',
- # member_ids: The list of items' ID assembled this pool:
- # [Pool.id, ] or [Disk.id, ] or [Volume.id, ]
- 'thinp_type': 'Thin Provision Type',
- # thinp_type: Can this pool support Thin Provisioning or not:
- # THINP_TYPE_THIN vs THINP_TYPE_THICK
- # THINP_TYPE_NOT_APPLICABLE for those pool can create
- # THICK sub_pool or THIN sub_pool. That means, ThinP is
- # not implemented at current pool level.
- # If we really need to identify the under algorithm some
- # day, we will expand to THINP_TYPE_THIN_ALLOCATED and etc
- 'element_type': 'Element Type',
- # element_type: That kind of items can this pool create:
- # ELEMENT_TYPE_VOLUME
- # ELEMENT_TYPE_POOL
- # ELEMENT_TYPE_FS
- # For those system reserved pool, use
- # ELEMENT_TYPE_SYS_RESERVED
- # For example, pools for replication or spare.
- # We will split them out once support spare and
- # replication. Those system pool should be neither
- # filtered or mark as ELEMENT_TYPE_SYS_RESERVED.
- }
-
- _OPT_PROPERTIES_SEQUENCE = ['raid_type', 'member_type', 'member_ids',
- 'element_type', 'thinp_type']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if human:
- if key_name == 'total_space' or key_name == 'free_space':
- value = sh(value, human)
- if list_convert:
- if key_name == 'member_ids':
- value = self._member_ids_to_str(value)
- if enum_as_number is False:
- if key_name == 'raid_type':
- value = self.raid_type_to_str(value)
- elif key_name == 'member_type':
- value = self._member_type_to_str(value)
- elif key_name == 'thinp_type':
- value = self.thinp_type_to_str(value)
- elif key_name == 'status':
- value = self._status_to_str(value)
- elif key_name == 'element_type':
- value = self._element_type_to_str(value)
- return value
+ OPT_PROPERTIES = ['raid_type', 'member_type', 'member_ids',
+ 'element_type', 'thinp_type']
def __init__(self, _id, _name, _total_space, _free_space, _status,
_status_info, _system_id, _optional_data=None):
@@ -1067,7 +673,7 @@ class Pool(IData):
self._optional_data = OptionalData()
else:
#Make sure the properties only contain ones we permit
- allowed = set(Pool._OPT_PROPERTIES_2_HEADER.keys())
+ allowed = set(Pool.OPT_PROPERTIES)
actual = set(_optional_data.list())
if actual <= allowed:
@@ -1077,35 +683,6 @@ class Pool(IData):
"Property keys are invalid: %s" %
"".join(actual - allowed))
- def _opt_column_headers(self):
- opt_headers = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_headers.extend([Pool._OPT_PROPERTIES_2_HEADER[opt_pro]])
- return opt_headers
-
- def _opt_column_data(self, human=False, enum_as_number=False):
- opt_data_values = []
- opt_pros = self._optional_data.list()
- for opt_pro in opt_pros:
- opt_pro_value = self._optional_data.get(opt_pro)
- if enum_as_number:
- pass # no byte size needed to humanize
- else:
- if opt_pro == 'member_ids':
- opt_pro_value = Pool._member_ids_to_str(opt_pro_value)
- elif opt_pro == 'raid_type':
- opt_pro_value = Pool.raid_type_to_str(opt_pro_value)
- elif opt_pro == 'member_type':
- opt_pro_value = Pool._member_type_to_str(opt_pro_value)
- elif opt_pro == 'thinp_type':
- opt_pro_value = Pool.thinp_type_to_str(opt_pro_value)
- elif opt_pro == 'element_type':
- opt_pro_value = Pool._element_type_to_str(opt_pro_value)
-
- opt_data_values.extend([opt_pro_value])
- return opt_data_values
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="File system name")
@@ -1123,26 +700,6 @@ class FileSystem(IData):
self._pool_id = _pool_id
self._system_id = _system_id
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'total_space': 'Total Space',
- 'free_space': 'Free Space',
- 'pool_id': 'Pool ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'total_space', 'free_space',
- 'pool_id']
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if human:
- if key_name == 'total_space':
- value = sh(value, human)
- elif key_name == 'free_space':
- value = sh(value, human)
- return value
-
@default_property('id', doc="Unique identifier")
@default_property('name', doc="Snapshot name")
@@ -1153,20 +710,6 @@ class FsSnapshot(IData):
self._name = _name
self._ts = int(_ts)
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if key_name == 'ts':
- value = datetime.fromtimestamp(value)
- return value
-
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'ts': 'Created',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'ts']
-
@default_property('id', doc="Unique identifier")
@default_property('fs_id', doc="Filesystem that is exported")
@@ -1198,22 +741,6 @@ class NfsExport(IData):
self._anongid = _anongid # gid for anonymous group id
self._options = _options # NFS options
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'fs_id': 'File system ID',
- 'export_path': 'Export Path',
- 'auth': 'Authentication',
- 'root': 'Root',
- 'rw': 'Read/Write',
- 'ro': 'Read Only',
- 'anonuid': 'Anon UID',
- 'anongid': 'Anon GID',
- 'options': 'Options'
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'fs_id', 'export_path', 'auth', 'root',
- 'rw', 'ro', 'anonuid', 'anongid', 'options']
-
@default_property('src_block', doc="Source logical block address")
@default_property('dest_block', doc="Destination logical block address")
@@ -1224,9 +751,6 @@ class BlockRange(IData):
self._dest_block = _dest_block
self._block_count = _block_count
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
@default_property('id', doc="Unique instance identifier")
@default_property('name', doc="Access group name")
@@ -1239,32 +763,12 @@ class AccessGroup(IData):
self._initiators = _initiators # List of initiators
self._system_id = _system_id # System id this group belongs
- _MAN_PROPERTIES_2_HEADER = {
- 'id': 'ID',
- 'name': 'Name',
- 'initiators': 'Initiator IDs',
- 'system_id': 'System ID',
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['id', 'name', 'initiators', 'system_id']
- _OPT_PROPERTIES_SEQUENCE = []
-
- def _value_convert(self, key_name, value, human, enum_as_number,
- list_convert):
- if list_convert:
- if key_name == 'initiators':
- value = ','.join(str(x) for x in value)
- return value
-
class OptionalData(IData):
def _column_data(self, human=False, enum_as_number=False):
return [sorted(self._values.iterkeys(),
key=lambda k: self._values[k][1])]
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
def __init__(self, _values=None):
if _values is not None:
self._values = _values
@@ -1430,24 +934,6 @@ class Capabilities(IData):
for i in range(len(self._cap)):
self._cap[i] = Capabilities.SUPPORTED
- def _str_of_key(self, key_name=None):
- raise NotImplementedError
-
-
-# This data is actually never serialized across the RPC, but is used only
-# for displaying the data.
-class PlugData(IData):
- _MAN_PROPERTIES_2_HEADER = {
- "desc": "Description",
- "version": "Version",
- }
-
- _MAN_PROPERTIES_SEQUENCE = ['desc', 'version']
-
- def __init__(self, description, plugin_version):
- self.desc = description
- self.version = plugin_version
-
if __name__ == '__main__':
#TODO Need some unit tests that encode/decode all the types with nested
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index c4f410e..82f84b8 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -17,39 +17,56 @@
# Author: Gris Ge <***@redhat.com>
import sys
from collections import OrderedDict
+from datetime import datetime
from lsm import (size_bytes_2_size_human, LsmError, ErrorNumber,
- System, Pool, Disk, Volume)
+ System, Pool, Disk, Volume, AccessGroup, Initiator,
+ FileSystem, FsSnapshot, NfsExport)
BIT_MAP_STRING_SPLITTER = ','
+
+def _print_out(msg):
+ try:
+ sys.stdout.write(str(msg))
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+ except IOError:
+ sys.exit(1)
+
+
def _txt_a(txt, append):
if len(txt):
return txt + BIT_MAP_STRING_SPLITTER + append
else:
return append
-def _enum_conv_to_str(enum, conv_dict):
+
+def _bit_map_to_str(bit_map, conv_dict):
rc = ''
for cur_enum in conv_dict.keys():
- if cur_enum & enum:
+ if cur_enum & bit_map:
rc = _txt_a(rc, conv_dict[cur_enum])
if rc == '':
return 'Unknown(%s)' % hex(enum)
+ return rc
-def _int_type_conv_to_str(int_type, conv_dict):
+
+def _enum_type_to_str(int_type, conv_dict):
rc = ''
if int_type in conv_dict.keys():
return conv_dict[int_type]
return 'Unknown(%d)' % int_type
-def _str_to_int_type_conv(type_str, conv_dict):
+
+def _str_to_enum(type_str, conv_dict):
keys = [k for k, v in conv_dict.items() if v.lower() == type_str.lower()]
if len(keys) > 0:
return keys[0]
raise LsmError(ErrorNumber.INVALID_ARGUMENT,
"Failed to convert %s to lsm type" % type_str)
+
_SYSTEM_STATUS_CONV = {
System.STATUS_UNKNOWN: 'Unknown',
System.STATUS_OK: 'OK',
@@ -63,8 +80,10 @@ _SYSTEM_STATUS_CONV = {
System.STATUS_OTHER: 'Other',
}
+
def system_status_to_str(system_status):
- return _enum_conv_to_str(system_status, _SYSTEM_STATUS_CONV)
+ return _bit_map_to_str(system_status, _SYSTEM_STATUS_CONV)
+
_POOL_STATUS_CONV = {
Pool.STATUS_UNKNOWN: 'UNKNOWN',
@@ -86,8 +105,10 @@ _POOL_STATUS_CONV = {
Pool.STATUS_DESTROYING: 'DESTROYING',
}
+
def pool_status_to_str(pool_status):
- return _enum_conv_to_str(pool_status, _POOL_STATUS_CONV)
+ return _bit_map_to_str(pool_status, _POOL_STATUS_CONV)
+
_POOL_ELEMENT_TYPE_CONV = {
Pool.ELEMENT_TYPE_UNKNOWN: 'UNKNOWN',
@@ -97,38 +118,43 @@ _POOL_ELEMENT_TYPE_CONV = {
Pool.ELEMENT_TYPE_SYS_RESERVED: 'SYSTEM_RESERVED',
}
+
def pool_element_type_to_str(element_type):
- return _enum_conv_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
-
-_RAID_TYPE_CONV = {
- RAID_TYPE_RAID0: 'RAID0', # stripe
- RAID_TYPE_RAID1: 'RAID1', # mirror
- RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
- # parity
- RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
- # parity
- RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
- # parity
- RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
- RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
- RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
- RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
- RAID_TYPE_RAID50: 'RAID50', # stripe of parities
- RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
- RAID_TYPE_RAID51: 'RAID51', # mirror of parities
- RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
- RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
- RAID_TYPE_UNKNOWN: 'UNKNOWN',
- RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
- # different RAID type
+ return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
+
+
+_POOL_RAID_TYPE_CONV = {
+ Pool.RAID_TYPE_RAID0: 'RAID0', # stripe
+ Pool.RAID_TYPE_RAID1: 'RAID1', # mirror
+ Pool.RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
+ # parity
+ Pool.RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
+ # parity
+ Pool.RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
+ # parity
+ Pool.RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
+ Pool.RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
+ Pool.RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
+ Pool.RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
+ Pool.RAID_TYPE_RAID50: 'RAID50', # stripe of parities
+ Pool.RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
+ Pool.RAID_TYPE_RAID51: 'RAID51', # mirror of parities
+ Pool.RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
+ Pool.RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
+ Pool.RAID_TYPE_UNKNOWN: 'UNKNOWN',
+ Pool.RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
+ Pool.RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
+ # different RAID type
}
+
def pool_raid_type_to_str(raid_type):
- return _int_type_conv_to_str(raid_type, _RAID_TYPE_CONV)
+ return _enum_type_to_str(raid_type, _POOL_RAID_TYPE_CONV)
+
def pool_raid_type_str_to_type(raid_type_str):
- return _str_to_int_type_conv(raid_type_str, _RAID_TYPE_CONV)
+ return _str_to_enum(raid_type_str, _POOL_RAID_TYPE_CONV)
+
_POOL_MEMBER_TYPE_CONV = {
Pool.MEMBER_TYPE_UNKNOWN: 'UNKNOWN',
@@ -148,11 +174,14 @@ _POOL_MEMBER_TYPE_CONV = {
Pool.MEMBER_TYPE_VOLUME: 'VOLUME', # Pool was created from Volume(s).
}
+
def pool_member_type_to_str(member_type):
- return _int_type_conv_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
+ return _enum_type_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
+
def pool_member_type_str_to_type(member_type_str):
- return _str_to_int_type_conv(member_type_str, _POOL_MEMBER_TYPE_CONV)
+ return _str_to_enum(member_type_str, _POOL_MEMBER_TYPE_CONV)
+
_POOL_THINP_TYPE_CONV = {
Pool.THINP_TYPE_UNKNOWN: 'UNKNOWN',
@@ -161,11 +190,64 @@ _POOL_THINP_TYPE_CONV = {
Pool.THINP_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
}
+
def pool_thinp_type_to_str(thinp_type):
- return _int_type_conv_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
+ return _enum_type_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
+
def pool_thinp_type_str_to_type(thinp_type_str):
- return _str_to_int_type_conv(thinp_type_str, _POOL_THINP_TYPE_CONV)
+ return _str_to_enum(thinp_type_str, _POOL_THINP_TYPE_CONV)
+
+
+_VOL_STATUS_CONV = {
+ Volume.STATUS_UNKNOWN: 'Unknown',
+ Volume.STATUS_OK: 'OK',
+ Volume.STATUS_DEGRADED: 'Degraded',
+ Volume.STATUS_DORMANT: 'Dormant',
+ Volume.STATUS_ERR: 'Error',
+ Volume.STATUS_STARTING: 'Starting',
+}
+
+
+_VOL_PROVISION_CONV = {
+ Volume.PROVISION_DEFAULT: 'DEFAULT',
+ Volume.PROVISION_FULL: 'FULL',
+ Volume.PROVISION_THIN: 'THIN',
+ Volume.PROVISION_UNKNOWN: 'UNKNOWN',
+}
+
+
+def vol_provision_str_to_type(vol_provision_str):
+ return _str_to_enum(vol_provision_str, _VOL_PROVISION_CONV)
+
+
+_VOL_REP_TYPE_CONV = {
+ Volume.REPLICATE_SNAPSHOT: 'SNAPSHOT',
+ Volume.REPLICATE_CLONE: 'CLONE',
+ Volume.REPLICATE_COPY: 'COPY',
+ Volume.REPLICATE_MIRROR_SYNC: 'MIRROR_SYNC',
+ Volume.REPLICATE_MIRROR_ASYNC: 'MIRROR_ASYNC',
+ Volume.REPLICATE_UNKNOWN: 'UNKNOWN',
+}
+
+
+def vol_rep_type_str_to_type(vol_rep_type_str):
+ return _str_to_enum(vol_rep_type_str, _VOL_REP_TYPE_CONV)
+
+
+_VOL_ACCESS_TYPE_CONV = {
+ Volume.ACCESS_READ_WRITE: 'RW',
+ Volume.ACCESS_READ_ONLY: 'RO'
+}
+
+
+def vol_access_type_str_to_type(vol_access_type_str):
+ return _str_to_enum(vol_access_type_str, _VOL_ACCESS_TYPE_CONV)
+
+
+def vol_status_to_str(vol_status):
+ return _bit_map_to_str(vol_status, _VOL_STATUS_CONV)
+
_DISK_TYPE_CONV = {
Disk.DISK_TYPE_UNKNOWN: 'UNKNOWN',
@@ -182,10 +264,12 @@ _DISK_TYPE_CONV = {
Disk.DISK_TYPE_HYBRID: 'HYBRID',
}
+
def disk_type_to_str(disk_type):
- return _int_type_conv_to_str(disk_type, _DISK_TYPE_CONV)
+ return _enum_type_to_str(disk_type, _DISK_TYPE_CONV)
-_DISK_STATUS = {
+
+_DISK_STATUS_CONV = {
Disk.STATUS_UNKNOWN: 'UNKNOWN',
Disk.STATUS_OK: 'OK',
Disk.STATUS_OTHER: 'OTHER',
@@ -199,8 +283,29 @@ _DISK_STATUS = {
Disk.STATUS_RECONSTRUCTING: 'RECONSTRUCTING',
}
+
def disk_status_to_str(disk_status):
- return _enum_conv_to_str(disk_status, _DISK_STATUS)
+ return _bit_map_to_str(disk_status, _DISK_STATUS_CONV)
+
+
+_INIT_TYPE_CONV = {
+ Initiator.TYPE_OTHER: 'Other',
+ Initiator.TYPE_PORT_WWN: 'Port WWN',
+ Initiator.TYPE_NODE_WWN: 'Node WWN',
+ Initiator.TYPE_HOSTNAME: 'Hostname',
+ Initiator.TYPE_ISCSI: 'iSCSI',
+ Initiator.TYPE_SAS: "SAS"
+}
+
+
+def init_type_to_str(init_type):
+ return _enum_type_to_str(init_type, _INIT_TYPE_CONV)
+
+
+class PlugData(object):
+ def __init__(self, description, plugin_version):
+ self.desc = description
+ self.version = plugin_version
class DisplayData(object):
@@ -208,15 +313,6 @@ class DisplayData(object):
def __init__(self):
pass
- @staticmethod
- def _out(msg):
- try:
- sys.stdout.write(str(msg))
- sys.stdout.write("\n")
- sys.stdout.flush()
- except IOError:
- sys.exit(1)
-
DISPLAY_WAY_COLUMN = 0
DISPLAY_WAY_SCRIPT = 1
@@ -224,6 +320,9 @@ class DisplayData(object):
DEFAULT_SPLITTER = ' | '
+ VALUE_CONVERT = {}
+
+ # lsm.System
SYSTEM_MAN_HEADER = OrderedDict()
SYSTEM_MAN_HEADER['id'] = 'ID'
SYSTEM_MAN_HEADER['name'] = 'Name'
@@ -232,23 +331,288 @@ class DisplayData(object):
SYSTEM_OPT_HEADER = OrderedDict()
- SYSTEM_DSP_HEADER = SYSTEM_MAN_HEADER # SYSTEM_DSP_HEADER should be
- # subset of SYSTEM_MAN_HEADER
+ SYSTEM_COLUME_KEYS = SYSTEM_MAN_HEADER.keys()
+ # SYSTEM_COLUME_KEYS should be subset of SYSTEM_MAN_HEADER.keys()
+ # XXX_COLUME_KEYS contain a list of mandatory properties which will be
+ # displayed in column way. It was used to limit the output of properties
+ # in sure the colume display way does not exceeded the column width 78.
+ # All mandatory_headers will be displayed in script way.
+ # if '-o' define, both mandatory_headers and optional_headers will be
+ # displayed in script way.
SYSTEM_VALUE_CONV_ENUM = {
- 'status': EnumConvert.system_status_to_str,
+ 'status': system_status_to_str,
}
SYSTEM_VALUE_CONV_HUMAN = []
- VALUE_CONVERT = {
- System: {
- 'mandatory_headers': SYSTEM_MAN_HEADER,
- 'display_headers': SYSTEM_DSP_HEADER,
- 'optional_headers': SYSTEM_OPT_HEADER,
- 'value_conv_enum': SYSTEM_VALUE_CONV_ENUM,
- 'value_conv_human': SYSTEM_VALUE_CONV_HUMAN,
- }
+ VALUE_CONVERT[System] = {
+ 'mandatory_headers': SYSTEM_MAN_HEADER,
+ 'column_keys': SYSTEM_COLUME_KEYS,
+ 'optional_headers': SYSTEM_OPT_HEADER,
+ 'value_conv_enum': SYSTEM_VALUE_CONV_ENUM,
+ 'value_conv_human': SYSTEM_VALUE_CONV_HUMAN,
+ }
+
+ PLUG_DATA_MAN_HEADER = OrderedDict()
+ PLUG_DATA_MAN_HEADER['desc'] = 'Description'
+ PLUG_DATA_MAN_HEADER['version'] = 'Version'
+
+ PLUG_DATA_COLUME_KEYS = PLUG_DATA_MAN_HEADER.keys()
+
+ PLUG_DATA_OPT_HEADER = OrderedDict()
+ PLUG_DATA_VALUE_CONV_ENUM = {}
+ PLUG_DATA_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[PlugData] = {
+ 'mandatory_headers': PLUG_DATA_MAN_HEADER,
+ 'column_keys': PLUG_DATA_COLUME_KEYS,
+ 'optional_headers': PLUG_DATA_OPT_HEADER,
+ 'value_conv_enum': PLUG_DATA_VALUE_CONV_ENUM,
+ 'value_conv_human': PLUG_DATA_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Pool
+ POOL_MAN_HEADER = OrderedDict()
+ POOL_MAN_HEADER['id'] = 'ID'
+ POOL_MAN_HEADER['name'] = 'Name'
+ POOL_MAN_HEADER['total_space'] = 'Total Space'
+ POOL_MAN_HEADER['free_space'] = 'Free Space'
+ POOL_MAN_HEADER['status'] = 'Status'
+ POOL_MAN_HEADER['status_info'] = 'Status Info'
+ POOL_MAN_HEADER['system_id'] = 'System ID'
+
+ POOL_COLUME_KEYS = POOL_MAN_HEADER.keys()
+
+ POOL_OPT_HEADER = OrderedDict()
+ POOL_OPT_HEADER['raid_type'] = 'RAID Type'
+ POOL_OPT_HEADER['member_type'] = 'Member Type'
+ POOL_OPT_HEADER['member_ids'] = 'Member IDs'
+ POOL_OPT_HEADER['thinp_type'] = 'Provision Type'
+ POOL_OPT_HEADER['element_type'] = 'Element Type'
+
+ POOL_VALUE_CONV_ENUM = {
+ 'status': pool_status_to_str,
+ 'raid_type': pool_raid_type_to_str,
+ 'member_type': pool_member_type_to_str,
+ 'thinp_type': pool_thinp_type_to_str,
+ 'element_type': pool_element_type_to_str,
+ }
+
+ POOL_VALUE_CONV_HUMAN = ['total_space', 'free_space']
+
+ VALUE_CONVERT[Pool] = {
+ 'mandatory_headers': POOL_MAN_HEADER,
+ 'column_keys': POOL_COLUME_KEYS,
+ 'optional_headers': POOL_OPT_HEADER,
+ 'value_conv_enum': POOL_VALUE_CONV_ENUM,
+ 'value_conv_human': POOL_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Volume
+ VOL_MAN_HEADER = OrderedDict()
+ VOL_MAN_HEADER['id'] = 'ID'
+ VOL_MAN_HEADER['name'] = 'Name'
+ VOL_MAN_HEADER['vpd83'] = 'SCSI VPD 0x83'
+ VOL_MAN_HEADER['block_size'] = 'Block Size'
+ VOL_MAN_HEADER['num_of_blocks'] = '#blocks'
+ VOL_MAN_HEADER['size_bytes'] = 'Size'
+ VOL_MAN_HEADER['status'] = 'Status'
+ VOL_MAN_HEADER['pool_id'] = 'Pool ID'
+ VOL_MAN_HEADER['system_id'] = 'System ID'
+
+ VOL_COLUME_KEYS = []
+ for key_name in VOL_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['block_size', 'num_of_blocks', 'system_id']:
+ VOL_COLUME_KEYS.extend([key_name])
+
+ VOL_OPT_HEADER = OrderedDict()
+
+ VOL_VALUE_CONV_ENUM = {
+ 'status': vol_status_to_str,
+ }
+
+ VOL_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']
+
+ VALUE_CONVERT[Volume] = {
+ 'mandatory_headers': VOL_MAN_HEADER,
+ 'column_keys': VOL_COLUME_KEYS,
+ 'optional_headers': VOL_OPT_HEADER,
+ 'value_conv_enum': VOL_VALUE_CONV_ENUM,
+ 'value_conv_human': VOL_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Disk
+ DISK_MAN_HEADER = OrderedDict()
+ DISK_MAN_HEADER['id'] = 'ID'
+ DISK_MAN_HEADER['name'] = 'Name'
+ DISK_MAN_HEADER['disk_type'] = 'Type'
+ DISK_MAN_HEADER['block_size'] = 'Block Size'
+ DISK_MAN_HEADER['num_of_blocks'] = '#blocks'
+ DISK_MAN_HEADER['size_bytes'] = 'Size'
+ DISK_MAN_HEADER['status'] = 'Status'
+ DISK_MAN_HEADER['system_id'] = 'System ID'
+
+ DISK_COLUME_KEYS = []
+ for key_name in DISK_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['block_size', 'num_of_blocks']:
+ DISK_COLUME_KEYS.extend([key_name])
+
+ DISK_OPT_HEADER = OrderedDict()
+ DISK_OPT_HEADER['sn'] = 'Serial Number'
+ DISK_OPT_HEADER['part_num'] = 'Part Number'
+ DISK_OPT_HEADER['vendor'] = 'Vendor'
+ DISK_OPT_HEADER['model'] = 'Model'
+
+ DISK_VALUE_CONV_ENUM = {
+ 'status': disk_status_to_str,
+ 'disk_type': disk_type_to_str,
+ }
+
+ DISK_VALUE_CONV_HUMAN = ['size_bytes', 'block_size']
+
+ VALUE_CONVERT[Disk] = {
+ 'mandatory_headers': DISK_MAN_HEADER,
+ 'column_keys': DISK_COLUME_KEYS,
+ 'optional_headers': DISK_OPT_HEADER,
+ 'value_conv_enum': DISK_VALUE_CONV_ENUM,
+ 'value_conv_human': DISK_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.AccessGroup
+ AG_MAN_HEADER = OrderedDict()
+ AG_MAN_HEADER['id'] = 'ID'
+ AG_MAN_HEADER['name'] = 'Name'
+ AG_MAN_HEADER['initiators'] = 'Initiator IDs'
+ AG_MAN_HEADER['system_id'] = 'System ID'
+
+ AG_COLUME_KEYS = AG_MAN_HEADER.keys()
+
+ AG_OPT_HEADER = OrderedDict()
+
+ AG_VALUE_CONV_ENUM = {}
+
+ AG_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[AccessGroup] = {
+ 'mandatory_headers': AG_MAN_HEADER,
+ 'column_keys': AG_COLUME_KEYS,
+ 'optional_headers': AG_OPT_HEADER,
+ 'value_conv_enum': AG_VALUE_CONV_ENUM,
+ 'value_conv_human': AG_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.Initiator
+ INIT_MAN_HEADER = OrderedDict()
+ INIT_MAN_HEADER['id'] = 'ID'
+ INIT_MAN_HEADER['name'] = 'Name'
+ INIT_MAN_HEADER['type'] = 'Initiator Type'
+
+ INIT_COLUME_KEYS = INIT_MAN_HEADER.keys()
+
+ INIT_OPT_HEADER = OrderedDict()
+
+ INIT_VALUE_CONV_ENUM = {
+ 'type': init_type_to_str,
+ }
+
+ INIT_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[Initiator] = {
+ 'mandatory_headers': INIT_MAN_HEADER,
+ 'column_keys': INIT_COLUME_KEYS,
+ 'optional_headers': INIT_OPT_HEADER,
+ 'value_conv_enum': INIT_VALUE_CONV_ENUM,
+ 'value_conv_human': INIT_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.FileSystem
+ FS_MAN_HEADER = OrderedDict()
+ FS_MAN_HEADER['id'] = 'ID'
+ FS_MAN_HEADER['name'] = 'Name'
+ FS_MAN_HEADER['total_space'] = 'Total Space'
+ FS_MAN_HEADER['free_space'] = 'Free Space'
+ FS_MAN_HEADER['pool_id'] = 'Pool ID'
+ FS_MAN_HEADER['system_id'] = 'System ID'
+
+ FS_COLUME_KEYS = []
+ for key_name in FS_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['system_id']:
+ FS_COLUME_KEYS.extend([key_name])
+
+ FS_OPT_HEADER = OrderedDict()
+
+ FS_VALUE_CONV_ENUM = {
+ }
+
+ FS_VALUE_CONV_HUMAN = ['total_space', 'free_space']
+
+ VALUE_CONVERT[FileSystem] = {
+ 'mandatory_headers': FS_MAN_HEADER,
+ 'column_keys': FS_COLUME_KEYS,
+ 'optional_headers': FS_OPT_HEADER,
+ 'value_conv_enum': FS_VALUE_CONV_ENUM,
+ 'value_conv_human': FS_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.FsSnapshot
+ FS_SNAP_MAN_HEADER = OrderedDict()
+ FS_SNAP_MAN_HEADER['id'] = 'ID'
+ FS_SNAP_MAN_HEADER['name'] = 'Name'
+ FS_SNAP_MAN_HEADER['ts'] = 'Time Stamp'
+
+ FS_SNAP_COLUME_KEYS = FS_SNAP_MAN_HEADER.keys()
+
+ FS_SNAP_OPT_HEADER = OrderedDict()
+
+ FS_SNAP_VALUE_CONV_ENUM = {
+ 'ts': datetime.fromtimestamp
+ }
+
+ FS_SNAP_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[FsSnapshot] = {
+ 'mandatory_headers': FS_SNAP_MAN_HEADER,
+ 'column_keys': FS_SNAP_COLUME_KEYS,
+ 'optional_headers': FS_SNAP_OPT_HEADER,
+ 'value_conv_enum': FS_SNAP_VALUE_CONV_ENUM,
+ 'value_conv_human': FS_SNAP_VALUE_CONV_HUMAN,
+ }
+
+ # lsm.NfsExport
+ NFS_EXPORT_MAN_HEADER = OrderedDict()
+ NFS_EXPORT_MAN_HEADER['id'] = 'ID'
+ NFS_EXPORT_MAN_HEADER['fs_id'] = 'FileSystem ID'
+ NFS_EXPORT_MAN_HEADER['export_path'] = 'Export Path'
+ NFS_EXPORT_MAN_HEADER['auth'] = 'Auth Type'
+ NFS_EXPORT_MAN_HEADER['root'] = 'Root Hosts'
+ NFS_EXPORT_MAN_HEADER['rw'] = 'RW Hosts'
+ NFS_EXPORT_MAN_HEADER['ro'] = 'RO Hosts'
+ NFS_EXPORT_MAN_HEADER['anonuid'] = 'Anonymous UID'
+ NFS_EXPORT_MAN_HEADER['anongid'] = 'Anonymous GID'
+ NFS_EXPORT_MAN_HEADER['options'] = 'Options'
+
+ NFS_EXPORT_COLUME_KEYS = []
+ for key_name in NFS_EXPORT_MAN_HEADER.keys():
+ # Skip these keys for colume display
+ if key_name not in ['root', 'anonuid', 'anongid', 'auth']:
+ NFS_EXPORT_COLUME_KEYS.extend([key_name])
+
+ NFS_EXPORT_OPT_HEADER = OrderedDict()
+
+ NFS_EXPORT_VALUE_CONV_ENUM = {}
+
+ NFS_EXPORT_VALUE_CONV_HUMAN = []
+
+ VALUE_CONVERT[NfsExport] = {
+ 'mandatory_headers': NFS_EXPORT_MAN_HEADER,
+ 'column_keys': NFS_EXPORT_COLUME_KEYS,
+ 'optional_headers': NFS_EXPORT_OPT_HEADER,
+ 'value_conv_enum': NFS_EXPORT_VALUE_CONV_ENUM,
+ 'value_conv_human': NFS_EXPORT_VALUE_CONV_HUMAN,
}
@staticmethod
@@ -285,33 +649,33 @@ class DisplayData(object):
return max_width
@staticmethod
- def _data_dict_gen(obj, flag_human, flag_enum, extra_properties=None,
- flag_dsp_all_data=False):
+ def _data_dict_gen(obj, flag_human, flag_enum, display_way,
+ extra_properties=None, flag_dsp_all_data=False):
data_dict = OrderedDict()
value_convert = DisplayData.VALUE_CONVERT[type(obj)]
mandatory_headers = value_convert['mandatory_headers']
- display_headers = value_convert['display_headers']
optional_headers = value_convert['optional_headers']
value_conv_enum = value_convert['value_conv_enum']
value_conv_human = value_convert['value_conv_human']
- for key in display_headers.keys():
- key_str = display_headers[key]
+ if flag_dsp_all_data:
+ display_way = DisplayData.DISPLAY_WAY_SCRIPT
+
+ display_keys = []
+
+ if display_way == DisplayData.DISPLAY_WAY_COLUMN:
+ display_keys = value_convert['column_keys']
+ elif display_way == DisplayData.DISPLAY_WAY_SCRIPT:
+ display_keys = mandatory_headers.keys()
+
+ for key in display_keys:
+ key_str = mandatory_headers[key]
value = DisplayData._get_man_pro_value(
obj, key, value_conv_enum, value_conv_human, flag_human,
flag_enum)
data_dict[key_str] = value
if flag_dsp_all_data:
- for key in mandatory_headers.keys():
- if key in display_headers.keys():
- continue
- key_str = mandatory_headers[key]
- value = DisplayData._get_man_pro_value(
- obj, key, value_conv_enum, value_conv_human, flag_human,
- flag_enum)
- data_dict[key_str] = value
-
for key in optional_headers.keys():
key_str = optional_headers[key]
value = DisplayData._get_opt_pro_value(
@@ -319,11 +683,12 @@ class DisplayData(object):
flag_enum)
data_dict[key_str] = value
- elif extra_properties:
+ if extra_properties:
for key in extra_properties:
+ if key in data_dict.keys():
+ # already contained
+ continue
if key in mandatory_headers.keys():
- if key in display_headers.keys():
- continue
key_str = mandatory_headers[key]
value = DisplayData._get_man_pro_value(
obj, key, value_conv_enum, value_conv_human,
@@ -358,8 +723,8 @@ class DisplayData(object):
if type(objs[0]) in DisplayData.VALUE_CONVERT.keys():
for obj in objs:
data_dict = DisplayData._data_dict_gen(
- obj, flag_human, flag_enum, extra_properties,
- flag_dsp_all_data)
+ obj, flag_human, flag_enum, display_way,
+ extra_properties, flag_dsp_all_data)
data_dict_list.extend([data_dict])
else:
return None
@@ -400,25 +765,24 @@ class DisplayData(object):
splitter,
value_column_width)
obj_splitter = '%s%s%s' % ('-' * key_column_width,
- '-' * len(splitter),
- '-' * value_column_width)
+ '-' * len(splitter),
+ '-' * value_column_width)
for data_dict in data_dict_list:
- DisplayData._out(obj_splitter)
+ _print_out(obj_splitter)
for key_name in data_dict:
value = data_dict[key_name]
if isinstance(value, list):
flag_first_data = True
for sub_value in value:
if flag_first_data:
- DisplayData._out(row_format %
- (key_name, str(sub_value)))
+ _print_out(row_format % (key_name, str(sub_value)))
flag_first_data = False
else:
- DisplayData._out(sub_row_format % str(sub_value))
+ _print_out(sub_row_format % str(sub_value))
else:
- DisplayData._out(row_format % (key_name, str(value)))
- DisplayData._out(obj_splitter)
+ _print_out(row_format % (key_name, str(value)))
+ _print_out(obj_splitter)
@staticmethod
def _display_data_column_way(data_dict_list, splitter, flag_with_header):
@@ -451,7 +815,7 @@ class DisplayData(object):
for raw in range(0, row_width):
new = []
for column in range(0, item_count):
- new.append([''])
+ new.append('')
two_d_list.append(new)
# header
@@ -489,6 +853,6 @@ class DisplayData(object):
row_format = splitter.join(row_formats)
for row_index in range(0, len(two_d_list)):
- DisplayData._out(row_format % tuple(two_d_list[row_index]))
+ _print_out(row_format % tuple(two_d_list[row_index]))
if row_index == 0 and flag_with_header:
- DisplayData._out(header_splitter)
+ _print_out(header_splitter)
--
1.8.3.1
1.8.3.1