Discussion:
[Libstoragemgmt-devel] [PATCH 1/2] client.py: fix return_requires of initiator_revoke() and initiator_grant()
Gris Ge
2014-02-10 14:42:11 UTC
Permalink
* Fix the @return_requires() decorator of initiator_revoke() and
initiator_grant() to support ASYNC job as their comments said.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/client.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lsm/lsm/client.py b/lsm/lsm/client.py
index cbe01f0..bce5eee 100644
--- a/lsm/lsm/client.py
+++ b/lsm/lsm/client.py
@@ -463,7 +463,7 @@ class Client(INetworkAttachedStorage):
# @param access Enumerated access type
# @param flags Reserved for future use, must be zero
# @returns None on success, else job id.
- @return_requires(None)
+ @return_requires(unicode)
def initiator_grant(self, initiator_id, initiator_type, volume, access,
flags=0):
"""
@@ -477,7 +477,7 @@ class Client(INetworkAttachedStorage):
# @param volume The volume to revoke access for
# @param flags Reserved for future use, must be zero
# @return None on success, else job id
- @return_requires(None)
+ @return_requires(unicode)
def initiator_revoke(self, initiator, volume, flags=0):
"""
Revokes access to a volume for the specified initiator
--
1.8.3.1
Gris Ge
2014-02-10 14:42:12 UTC
Permalink
* New design of simulator:
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.

* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make test' (actually is test/runtest.sh).

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/__init__.py | 3 +-
lsm/lsm/simarray.py | 1316 ++++++++++++++++++++++++++++++++++++++++++++++++++
lsm/lsm/simulator.py | 850 ++++++--------------------------
lsm/sim_lsmplugin | 4 +-
4 files changed, 1459 insertions(+), 714 deletions(-)
create mode 100644 lsm/lsm/simarray.py

diff --git a/lsm/lsm/__init__.py b/lsm/lsm/__init__.py
index 3407382..71bb7e2 100644
--- a/lsm/lsm/__init__.py
+++ b/lsm/lsm/__init__.py
@@ -17,6 +17,7 @@ from data import DataEncoder, DataDecoder, IData, Initiator, Volume, Pool, \
from iplugin import IPlugin, IStorageAreaNetwork, INetworkAttachedStorage, INfs

from pluginrunner import PluginRunner
-from simulator import StorageSimulator, SimJob, SimState
+from simulator import SimPlugin
+from simarray import SimData, SimJob, SimArray
from transport import Transport
from version import VERSION
diff --git a/lsm/lsm/simarray.py b/lsm/lsm/simarray.py
new file mode 100644
index 0000000..4df630c
--- /dev/null
+++ b/lsm/lsm/simarray.py
@@ -0,0 +1,1316 @@
+# Copyright (C) 2011-2013 Red Hat, Inc.
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: tasleson
+# Gris Ge <***@redhat.com>
+
+# TODO: 1. Introduce constant check by using state_to_str() converting.
+# 2. Snapshot should consume space in pool.
+
+import random
+import pickle
+import tempfile
+import os
+import time
+
+from common import md5, LsmError, ErrorNumber, size_human_2_size_bytes, \
+ JobStatus
+from data import System, Volume, Disk, Pool, FileSystem, AccessGroup, \
+ Initiator, BlockRange, Snapshot, NfsExport
+
+class SimJob(object):
+ """
+ Simulates a longer running job, uses actual wall time. If test cases
+ take too long we can reduce time by shortening time duration.
+ """
+
+ def _calc_progress(self):
+ if self.percent < 100:
+ end = self.start + self.duration
+ now = time.time()
+ if now >= end:
+ self.percent = 100
+ self.status = JobStatus.COMPLETE
+ else:
+ diff = now - self.start
+ self.percent = int(100 * (diff / self.duration))
+
+ def __init__(self, item_to_return):
+ duration = os.getenv("LSM_SIM_TIME", 1)
+ self.status = JobStatus.INPROGRESS
+ self.percent = 0
+ self.__item = item_to_return
+ self.start = time.time()
+ self.duration = float(random.randint(0, int(duration)))
+
+ def progress(self):
+ """
+ Returns a tuple (status, percent, data)
+ """
+ self._calc_progress()
+ return self.status, self.percent, self.item
+
+ @property
+ def item(self):
+ if self.percent >= 100:
+ return self.__item
+ return None
+
+ @item.setter
+ def item(self, value):
+ self.__item = value
+
+
+class SimArray(object):
+ SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
+ tempfile.gettempdir() + '/lsm_sim_data')
+
+ @staticmethod
+ def _version_error(dump_file):
+ raise LsmError(ErrorNumber.INVALID_ARGUMENT,
+ "Stored simulator state incompatible with "
+ "simulator, please move or delete %s" %
+ dump_file)
+
+ def __init__(self, dump_file=None):
+ if dump_file is None:
+ self.dump_file = SimArray.SIM_DATA_FILE
+ else:
+ self.dump_file = dump_file
+
+ if os.path.exists(self.dump_file):
+
+ with open(self.dump_file, 'rb') as f:
+ self.data = pickle.load(f)
+
+ # Going forward we could get smarter about handling this for
+ # changes that aren't invasive, but we at least need to check
+ # to make sure that the data will work and not cause any
+ # undo confusion.
+ try:
+ if self.data.version != SimData.SIM_DATA_VERSION or \
+ self.data.signature != SimData._state_signature():
+ SimArray._version_error(self.dump_file)
+ except AttributeError:
+ SimArray._version_error(self.dump_file)
+
+ else:
+ self.data = SimData()
+
+ def save_state(self):
+ fh_dump_file = open(self.dump_file, 'wb')
+ pickle.dump(self.data, fh_dump_file)
+ fh_dump_file.close()
+
+ def job_status(self, job_id, flags=0):
+ return self.data.job_status(job_id, flags=0)
+
+ def job_free(self, job_id, flags=0):
+ return self.data.job_free(job_id, flags=0)
+
+ def set_time_out(self, ms, flags=0):
+ return self.data.set_time_out(ms, flags)
+
+ def get_time_out(self, flags=0):
+ return self.data.get_time_out(flags)
+
+ def systems(self):
+ return self.data.systems()
+
+ @staticmethod
+ def _sim_vol_2_lsm(sim_vol):
+ return Volume(sim_vol['vol_id'], sim_vol['name'], sim_vol['vpd83'],
+ SimData.SIM_DATA_BLK_SIZE,
+ int(sim_vol['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Volume.STATUS_OK, sim_vol['sys_id'],
+ sim_vol['pool_id'])
+
+ def volumes(self):
+ sim_vols = self.data.volumes()
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def pools(self):
+ rc = []
+ sim_pools = self.data.pools()
+ for sim_pool in sim_pools:
+ pool = Pool(sim_pool['pool_id'], sim_pool['name'],
+ sim_pool['total_space'], sim_pool['free_space'],
+ sim_pool['sys_id'])
+ rc.extend([pool])
+ return rc
+
+ def disks(self):
+ rc = []
+ sim_disks = self.data.disks()
+ for sim_disk in sim_disks:
+ disk = Disk(sim_disk['disk_id'], sim_disk['name'],
+ sim_disk['disk_type'], SimData.SIM_DATA_BLK_SIZE,
+ int(sim_disk['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Disk.STATUS_OK, sim_disk['sys_id'])
+ rc.extend([disk])
+ return rc
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ sim_vol = self.data.volume_create(
+ pool_id, vol_name, size_bytes, thinp, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_delete(self, vol_id, flags=0):
+ self.data.volume_delete(vol_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ sim_vol = self.data.volume_resize(vol_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ sim_vol = self.data.volume_replicate(
+ dst_pool_id, rep_type, src_vol_id, new_vol_name, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return self.data.volume_replicate_range_block_size(sys_id, flags)
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ return self.data.job_create(
+ self.data.volume_replicate_range(
+ rep_type, src_vol_id, dst_vol_id, ranges, flags))[0]
+
+ def volume_online(self, vol_id, flags=0):
+ return self.data.volume_online(vol_id, flags)
+
+ def volume_offline(self, vol_id, flags=0):
+ return self.data.volume_offline(vol_id, flags)
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ return self.data.volume_child_dependency(vol_id, flags)
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ return self.data.job_create(
+ self.data.volume_child_dependency_rm(vol_id, flags))[0]
+
+ @staticmethod
+ def _sim_fs_2_lsm(sim_fs):
+ return FileSystem(sim_fs['fs_id'], sim_fs['name'],
+ sim_fs['total_space'], sim_fs['free_space'],
+ sim_fs['pool_id'], sim_fs['sys_id'])
+
+ def fs(self):
+ sim_fss = self.data.fs()
+ return [SimArray._sim_fs_2_lsm(f) for f in sim_fss]
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ sim_fs = self.data.fs_create(pool_id, fs_name, size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_delete(self, fs_id, flags=0):
+ self.data.fs_delete(fs_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ sim_fs = self.data.fs_resize(fs_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ sim_fs = self.data.fs_clone(src_fs_id, dst_fs_name, snap_id, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.file_clone(
+ fs_id, src_fs_name, dst_fs_name, snap_id, flags))[0]
+
+ @staticmethod
+ def _sim_snap_2_lsm(sim_snap):
+ return Snapshot(sim_snap['snap_id'], sim_snap['name'],
+ sim_snap['timestamp'])
+
+ def fs_snapshots(self, fs_id, flags=0):
+ sim_snaps = self.data.fs_snapshots(fs_id, flags)
+ return [SimArray._sim_snap_2_lsm(s) for s in sim_snaps]
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ sim_snap = self.data.fs_snapshot_create(fs_id, snap_name, files,
+ flags)
+ return self.data.job_create(SimArray._sim_snap_2_lsm(sim_snap))
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.fs_snapshot_delete(fs_id, snap_id, flags))[0]
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ return self.data.job_create(
+ self.data.fs_snapshot_revert(
+ fs_id, snap_id, files, restore_files,
+ flag_all_files, flags))[0]
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ return self.data.fs_child_dependency(fs_id, files, flags)
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ return self.data.job_create(
+ self.data.fs_child_dependency_rm(fs_id, files, flags))[0]
+
+ @staticmethod
+ def _sim_exp_2_lsm(sim_exp):
+ return NfsExport(
+ sim_exp['exp_id'], sim_exp['fs_id'], sim_exp['exp_path'],
+ sim_exp['auth_type'], sim_exp['root_hosts'], sim_exp['rw_hosts'],
+ sim_exp['ro_hosts'], sim_exp['anon_uid'], sim_exp['anon_gid'],
+ sim_exp['options'])
+
+ def exports(self, flags=0):
+ sim_exps = self.data.exports(flags)
+ return [SimArray._sim_exp_2_lsm(e) for e in sim_exps]
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ sim_exp = self.data.fs_export(
+ fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags)
+ return SimArray._sim_exp_2_lsm(sim_exp)
+
+ def fs_unexport(self, exp_id, flags=0):
+ return self.data.fs_unexport(exp_id, flags)
+
+ @staticmethod
+ def _sim_ag_2_lsm(sim_ag):
+ return AccessGroup(sim_ag['ag_id'], sim_ag['name'],
+ sim_ag['init_ids'], sim_ag['sys_id'])
+
+ def ags(self):
+ sim_ags = self.data.ags()
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = self.data.access_group_create(
+ name, init_id, init_type, sys_id, flags)
+ return SimArray._sim_ag_2_lsm(sim_ag)
+
+ def access_group_del(self, ag_id, flags=0):
+ return self.data.job_create(self.data.access_group_del(ag_id, flags))[0]
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ sim_ag = self.data.access_group_add_initiator(
+ ag_id, init_id, init_type, flags)
+ return self.data.job_create(SimArray._sim_ag_2_lsm(sim_ag))[0]
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ return self.data.job_create(
+ self.data.access_group_del_initiator(ag_id, init_id, flags))[0]
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ return self.data.job_create(
+ self.data.access_group_grant(ag_id, vol_id, access, flags))[0]
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ return self.data.job_create(
+ self.data.access_group_revoke(ag_id, vol_id, flags))[0]
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_access_group(ag_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ sim_ags = self.data.access_groups_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ @staticmethod
+ def _sim_init_2_lsm(sim_init):
+ return Initiator(sim_init['init_id'], sim_init['init_type'],
+ sim_init['name'])
+
+ def inits(self, flags=0):
+ sim_inits = self.data.inits()
+ return [SimArray._sim_init_2_lsm(a) for a in sim_inits]
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags=0):
+ return self.data.job_create(
+ self.data.initiator_grant(
+ init_id, init_type, vol_id, access, flags))[0]
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ return self.data.job_create(
+ self.data.initiator_revoke(init_id, vol_id, flags))[0]
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_initiator(init_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ sim_inits = self.data.initiators_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_init_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ return self.data.iscsi_chap_auth(init_id, in_user, in_pass, out_user,
+ out_pass, flags)
+
+
+class SimData(object):
+ """
+ Rules here are:
+ * we don't store one data twice
+ * we don't srore data which could be caculated out
+
+ self.vol_dict = {
+ Volume.id = sim_vol,
+ }
+
+ sim_vol = {
+ 'vol_id': "VOL_ID_%s" % SimData._random_vpd(4),
+ 'vpd83': SimData._random_vpd(),
+ 'name': vol_name,
+ 'total_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': owner_pool_id,
+ 'consume_size': size_bytes,
+ 'replicate': {
+ dst_vol_id = [
+ {
+ 'src_start_blk': src_start_blk,
+ 'dst_start_blk': dst_start_blk,
+ 'blk_count': blk_count,
+ 'rep_type': Volume.REPLICATE_XXXX,
+ },
+ ],
+ },
+ 'mask': {
+ ag_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ },
+ 'mask_init': {
+ init_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ }
+ }
+
+ self.init_dict = {
+ Initiator.id = sim_init,
+ }
+ sim_init = {
+ 'init_id': Initiator.id,
+ 'init_type': Initiator.TYPE_XXXX,
+ 'name': SimData.SIM_DATA_INIT_NAME,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ self.ag_dict ={
+ AccessGroup.id = sim_ag,
+ }
+ sim_ag = {
+ 'init_ids': [init_id,],
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'name': name,
+ 'ag_id': "AG_ID_%s" % SimData._random_vpd(4),
+ }
+
+ self.fs_dict = {
+ FileSystem.id = sim_fs,
+ }
+ sim_fs = {
+ 'fs_id': "FS_ID_%s" % SimData._random_vpd(4),
+ 'name': fs_name,
+ 'total_space': size_bytes,
+ 'free_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': pool_id,
+ 'consume_size': size_bytes,
+ 'clone': {
+ dst_fs_id: {
+ 'snap_id': snap_id, # None if no snapshot
+ 'files': [ file_path, ] # [] if all files cloned.
+ },
+ },
+ 'snaps' = [snap_id, ],
+ }
+ self.snap_dict = {
+ Snapshot.id: sim_snap,
+ }
+ sim_snap = {
+ 'snap_id': "SNAP_ID_%s" % SimData._random_vpd(4),
+ 'name': snap_name,
+ 'fs_id': fs_id,
+ 'files': [file_path, ],
+ 'timestamp': time.time(),
+ }
+ self.exp_dict = {
+ Export.id: sim_exp,
+ }
+ sim_exp = {
+ 'exp_id': "EXP_ID_%s" % SimData._random_vpd(4),
+ 'fs_id': fs_id,
+ 'exp_path': exp_path,
+ 'auth_type': auth_type,
+ 'root_hosts': [root_host, ],
+ 'rw_hosts': [rw_host, ],
+ 'ro_hosts': [ro_host, ],
+ 'anon_uid': anon_uid,
+ 'anon_gid': anon_gid,
+ 'options': [option, ],
+ }
+ """
+ SIM_DATA_BLK_SIZE = 512
+ SIM_DATA_VERSION = "2.0"
+ SIM_DATA_SYS_ID = 'sim-01'
+ SIM_DATA_INIT_NAME = 'NULL'
+ SIM_DATA_TMO = 30000 # ms
+
+ @staticmethod
+ def _state_signature():
+ return 'LSM_SIMULATOR_DATA_%s' % md5(SimData.SIM_DATA_VERSION)
+
+ def __init__(self):
+ self.tmo = SimData.SIM_DATA_TMO
+ self.version = SimData.SIM_DATA_VERSION
+ self.signature = SimData._state_signature()
+ self.job_num = 0
+ self.job_dict = {
+ # id: SimJob
+ }
+ self.syss = [System(SimData.SIM_DATA_SYS_ID,
+ 'LSM simulated storage plug-in',
+ System.STATUS_OK)]
+ pool_size_200g = size_human_2_size_bytes('200GiB')
+ self.pool_dict = {
+ 'POO1': {
+ 'pool_id': 'POO1',
+ 'name': 'Pool 1',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_000', 'DISK_ID_001'],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'POO2': {
+ 'pool_id': 'POO2',
+ 'name': 'Pool 2',
+ 'total_space': pool_size_200g,
+ 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_ids': ['POO1'],
+ 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ # lsm_test_aggr pool is requred by test/runtest.sh
+ 'lsm_test_aggr': {
+ 'pool_id': 'lsm_test_aggr',
+ 'name': 'lsm_test_aggr',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_002', 'DISK_ID_003'],
+ 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.vol_dict = {
+ }
+ self.fs_dict = {
+ }
+ self.snap_dict = {
+ }
+ self.exp_dict = {
+ }
+ disk_size_2t = size_human_2_size_bytes('2TiB')
+ self.disk_dict = {
+ 'DISK_ID_000': {
+ 'disk_id': 'DISK_ID_000',
+ 'name': 'SATA Disk 000',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_001': {
+ 'disk_id': 'DISK_ID_001',
+ 'name': 'SATA Disk 001',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_002': {
+ 'disk_id': 'DISK_ID_002',
+ 'name': 'SAS Disk 002',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_003': {
+ 'disk_id': 'DISK_ID_003',
+ 'name': 'SAS Disk 003',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.ag_dict = {
+ }
+ self.init_dict = {
+ }
+ # Create some volumes, fs and etc
+ self.volume_create(
+ 'POO1', 'Volume 000', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+ self.volume_create(
+ 'POO1', 'Volume 001', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+
+ self.pool_dict['POO3']= {
+ 'pool_id': 'POO3',
+ 'name': 'Pool 3',
+ 'member_type': Pool.MEMBER_TYPE_VOLUME,
+ 'member_ids': [
+ self.vol_dict.values()[0]['vol_id'],
+ self.vol_dict.values()[1]['vol_id'],
+ ],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ return
+
+ def _pool_free_space(self, pool_id):
+ """
+ Calculate out the free size of certain pool.
+ """
+ free_space = self._pool_total_space(pool_id)
+ for sim_vol in self.vol_dict.values():
+ if sim_vol['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_vol['consume_size']:
+ return 0
+ free_space -= sim_vol['consume_size']
+ for sim_fs in self.fs_dict.values():
+ if sim_fs['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_fs['consume_size']:
+ return 0
+ free_space -= sim_fs['consume_size']
+ return free_space
+
+ @staticmethod
+ def _random_vpd(l=16):
+ """
+ Generate a random 16 digit number as hex
+ """
+ vpd = []
+ for i in range(0, l):
+ vpd.append(str('%02X' % (random.randint(0, 255))))
+ return "".join(vpd)
+
+ def _pool_total_space(self, pool_id):
+ """
+ Find out the correct size of RAID pool
+ """
+ member_type = self.pool_dict[pool_id]['member_type']
+ if member_type == Pool.MEMBER_TYPE_POOL:
+ return self.pool_dict[pool_id]['total_space']
+
+ all_size = 0
+ item_size = 0 # disk size, used by RAID 3/4/5/6
+ member_ids = self.pool_dict[pool_id]['member_ids']
+ raid_type = self.pool_dict[pool_id]['raid_type']
+ member_count = len(member_ids)
+
+ if member_type == Pool.MEMBER_TYPE_DISK:
+ for member_id in member_ids:
+ all_size += self.disk_dict[member_id]['total_space']
+ item_size = self.disk_dict[member_id]['total_space']
+
+ elif member_type == Pool.MEMBER_TYPE_VOLUME:
+ for member_id in member_ids:
+ all_size += self.vol_dict[member_id]['total_space']
+ item_size = self.vol_dict[member_id]['total_space']
+
+ if raid_type == Pool.RAID_TYPE_JBOD:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID0:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID1 or \
+ raid_type == Pool.RAID_TYPE_RAID10:
+ return int(all_size/2)
+ elif raid_type == Pool.RAID_TYPE_RAID3 or \
+ raid_type == Pool.RAID_TYPE_RAID4 or \
+ raid_type == Pool.RAID_TYPE_RAID5 or \
+ raid_type == Pool.RAID_TYPE_RAID50:
+ return int(all_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID6 or \
+ raid_type == Pool.RAID_TYPE_RAID60:
+ return int(all_size - item_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID51:
+ return int((all_size - item_size)/2)
+ elif raid_type == Pool.RAID_TYPE_RAID61:
+ return int((all_size - item_size - item_size)/2)
+ return 0
+
+ @staticmethod
+ def _block_rounding(size_bytes):
+ return (size_bytes / SimData.SIM_DATA_BLK_SIZE + 1) * \
+ SimData.SIM_DATA_BLK_SIZE
+
+ def job_create(self, returned_item):
+ if True:
+ #if random.randint(0,5) == 1:
+ self.job_num += 1
+ job_id = "JOB_%s" % self.job_num
+ self.job_dict[job_id] = SimJob(returned_item)
+ return job_id, None
+ else:
+ return None, returned_item
+
+ def job_status(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ return self.job_dict[job_id].progress()
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def job_free(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ del(self.job_dict[job_id])
+ return
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def set_time_out(self, ms, flags=0):
+ self.tmo = ms
+ return None
+
+ def get_time_out(self, flags=0):
+ return self.tmo
+
+ def systems(self):
+ return self.syss
+
+ def pools(self):
+ rc = []
+ for sim_pool in self.pool_dict.values():
+ sim_pool['total_space'] = \
+ self._pool_total_space(sim_pool['pool_id'])
+ sim_pool['free_space'] = \
+ self._pool_free_space(sim_pool['pool_id'])
+ rc.extend([sim_pool])
+ return rc
+
+ def volumes(self):
+ return self.vol_dict.values()
+
+ def disks(self):
+ return self.disk_dict.values()
+
+ def access_group_list(self):
+ return self.ag_dict.values()
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['thinp'] = thinp
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+ return sim_vol
+
+ def volume_delete(self, vol_id, flags=0):
+ if vol_id in self.vol_dict.keys():
+ del(self.vol_dict[vol_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if vol_id in self.vol_dict.keys():
+ pool_id = self.vol_dict[vol_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.vol_dict[vol_id]['total_space'] = new_size_bytes
+ self.vol_dict[vol_id]['consume_size'] = new_size_bytes
+ return self.vol_dict[vol_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+ size_bytes = self.vol_dict[src_vol_id]['total_space']
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(dst_pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = new_vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = dst_pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+
+ dst_vol_id = vol_id
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ sim_rep = {
+ 'rep_type': rep_type,
+ 'src_start_blk': 0,
+ 'dst_start_blk': 0,
+ 'blk_count': size_bytes,
+ }
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_rep])
+
+ return sim_vol
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return SimData.SIM_DATA_BLK_SIZE
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % src_vol_id)
+
+ if dst_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % dst_vol_id)
+
+ sim_reps = []
+ for rep_range in ranges:
+ sim_rep = dict()
+ sim_rep['rep_type'] = rep_type
+ sim_rep['src_start_blk'] = rep_range.src_block
+ sim_rep['dst_start_blk'] = rep_range.dest_block
+ sim_rep['blk_count'] = rep_range.block_count
+ sim_reps.extend([sim_rep])
+
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_reps])
+
+ return None
+
+ def volume_online(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_offline(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ """
+ If volume is a src or dst of a replication, we return True.
+ """
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ return True
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ return True
+ return False
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ del self.vol_dict[vol_id]['replicate']
+
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ del sim_vol['replicate'][vol_id]
+ return None
+
+ def ags(self, flags=0):
+ return self.ag_dict.values()
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = dict()
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+
+ sim_ag['init_ids'] = [init_id]
+ sim_ag['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_ag['name'] = name
+ sim_ag['ag_id'] = "AG_ID_%s" % SimData._random_vpd(4)
+ self.ag_dict[sim_ag['ag_id']] = sim_ag
+ return sim_ag
+
+ def access_group_del(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ del(self.ag_dict[ag_id])
+ return None
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ return self.ag_dict[ag_id]
+
+ self.ag_dict[ag_id]['init_ids'].extend([init_id])
+
+ return self.ag_dict[ag_id]
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if init_id not in self.init_dict.keys():
+ return None
+
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ new_init_ids = []
+ for cur_init_id in self.ag_dict[ag_id]['init_ids']:
+ if cur_init_id != init_id:
+ new_init_ids.extend([cur_init_id])
+ del(self.ag_dict[ag_id]['init_ids'])
+ self.ag_dict[ag_id]['init_ids'] = new_init_ids
+ return None
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask'] = dict()
+
+ self.vol_dict[vol_id]['mask'][ag_id] = access
+ return None
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ return None
+
+ if ag_id not in self.vol_dict[vol_id]['mask'].keys():
+ return None
+
+ del(self.vol_dict[vol_id]['mask'][ag_id])
+ return None
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ rc = []
+ for sim_vol in self.vol_dict.values():
+ if 'mask' not in sim_vol:
+ continue
+ if ag_id in sim_vol['mask'].keys():
+ rc.extend([sim_vol])
+ return rc
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ sim_ags = []
+ if 'mask' in self.vol_dict[vol_id].keys():
+ ag_ids = self.vol_dict[vol_id]['mask'].keys()
+ for ag_id in ag_ids:
+ sim_ags.extend([self.ag_dict[ag_id]])
+ return sim_ags
+
+ def inits(self, flags=0):
+ return self.init_dict.values()
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if 'mask_init' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask_init'] = dict()
+
+ self.vol_dict[vol_id]['mask_init'][init_id] = access
+ return None
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ if init_id in self.vol_dict[vol_id]['mask_init'].keys():
+ del self.vol_dict[vol_id]['mask_init'][init_id]
+
+ return None
+
+ def _ag_ids_of_init(self, init_id):
+ """
+ Find out the access groups defined initiator belong to.
+ Will return a list of access group id or []
+ """
+ rc = []
+ for sim_ag in self.ag_dict.values():
+ if init_id in sim_ag['init_ids']:
+ rc.extend([sim_ag['ag_id']])
+ return rc
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ rc_dedup_dict = dict()
+ ag_ids = self._ag_ids_of_init(init_id)
+ for ag_id in ag_ids:
+ sim_vols = self.volumes_accessible_by_access_group(ag_id)
+ for sim_vol in sim_vols:
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+
+ for sim_vol in self.vol_dict.values():
+ if 'mask_init' in sim_vol:
+ if init_id in sim_vol['mask_init'].keys():
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+ return rc_dedup_dict.values()
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ rc_dedup_dict = dict()
+ sim_ags = self.access_groups_granted_to_volume(vol_id, flags)
+ for sim_ag in sim_ags:
+ for init_id in sim_ag['init_ids']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ for init_id in self.vol_dict[vol_id]['mask_init']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ return rc_dedup_dict.values()
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ if self.init_dict[init_id]['init_type'] != Initiator.TYPE_ISCSI:
+ raise LsmError(ErrorNumber.UNSUPPORTED_INITIATOR_TYPE,
+ "Initiator %s is not an iSCSI IQN" % init_id)
+ # No iscsi chap query API yet
+ return None
+
+ def fs(self):
+ return self.fs_dict.values()
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_fs = dict()
+ fs_id = "FS_ID_%s" % SimData._random_vpd(4)
+ sim_fs['fs_id'] = fs_id
+ sim_fs['name'] = fs_name
+ sim_fs['total_space'] = size_bytes
+ sim_fs['free_space'] = size_bytes
+ sim_fs['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_fs['pool_id'] = pool_id
+ sim_fs['consume_size'] = size_bytes
+ self.fs_dict[fs_id] = sim_fs
+ return sim_fs
+
+ def fs_delete(self, fs_id, flags=0):
+ if fs_id in self.fs_dict.keys():
+ del(self.fs_dict[fs_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_FS,
+ "No such File System: %s" % fs_id)
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if fs_id in self.fs_dict.keys():
+ pool_id = self.fs_dict[fs_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.fs_dict[fs_id]['total_space'] = new_size_bytes
+ self.fs_dict[fs_id]['free_space'] = new_size_bytes
+ self.fs_dict[fs_id]['consume_size'] = new_size_bytes
+ return self.fs_dict[fs_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such File System: %s" % fs_id)
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ if src_fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % src_fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ src_sim_fs = self.fs_dict[src_fs_id]
+ dst_sim_fs = self.fs_create(
+ src_sim_fs['pool_id'], dst_fs_name, src_sim_fs['total_space'], 0)
+ if 'clone' not in src_sim_fs.keys():
+ src_sim_fs['clone'] = dict()
+ src_sim_fs['clone'][dst_sim_fs['fs_id']] = {
+ 'snap_id': snap_id,
+ }
+ return dst_sim_fs
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # TODO: No file clone query API yet, no need to do anything internally
+ return None
+
+ def fs_snapshots(self, fs_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ rc = []
+ if 'snaps' in self.fs_dict[fs_id].keys():
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ rc.extend([self.snap_dict[snap_id]])
+ return rc
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ self.fs_dict[fs_id]['snaps'] = []
+
+ snap_id = "SNAP_ID_%s" % SimData._random_vpd(4)
+ sim_snap = dict()
+ sim_snap['snap_id'] = snap_id
+ sim_snap['name'] = snap_name
+ if files is None:
+ sim_snap['files'] = []
+ else:
+ sim_snap['files'] = files
+ sim_snap['timestamp'] = time.time()
+ self.snap_dict[snap_id] = sim_snap
+ self.fs_dict[fs_id]['snaps'].extend([snap_id])
+ return sim_snap
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ del self.snap_dict[snap_id]
+ new_snap_ids = []
+ for old_snap_id in self.fs_dict[fs_id]['snaps']:
+ if old_snap_id != snap_id:
+ new_snap_ids.extend([old_snap_id])
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # Nothing need to done internally for revert.
+ return None
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return False
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ return True
+ else:
+ for req_file in files:
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # We are snapshoting all files
+ return True
+ if req_file in self.snap_dict[snap_id]['files']:
+ return True
+ return False
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return None
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ snap_ids = self.fs_dict[fs_id]['snaps']
+ for snap_id in snap_ids:
+ del self.snap_dict[snap_id]
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ for req_file in files:
+ snap_ids_to_rm = []
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # BUG: if certain snapshot is againsting all files,
+ # what should we do if user request remove
+ # dependency on certain files.
+ # Currently, we do nothing
+ return None
+ if req_file in self.snap_dict[snap_id]['files']:
+ new_files = []
+ for old_file in self.snap_dict[snap_id]['files']:
+ if old_file != req_file:
+ new_files.extend([old_file])
+ if len(new_files) == 0:
+ # all files has been removed from snapshot list.
+ snap_ids_to_rm.extend([snap_id])
+ else:
+ self.snap_dict[snap_id]['files'] = new_files
+ for snap_id in snap_ids_to_rm:
+ del self.snap_dict[snap_id]
+
+ new_snap_ids = []
+ for cur_snap_id in self.fs_dict[fs_id]['snaps']:
+ if cur_snap_id not in snap_ids_to_rm:
+ new_snap_ids.extend([cur_snap_id])
+ if len(new_snap_ids) == 0:
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def exports(self, flags=0):
+ return self.exp_dict.values()
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ sim_exp = dict()
+ sim_exp['exp_id'] = "EXP_ID_%s" % SimData._random_vpd(4)
+ sim_exp['fs_id'] = fs_id
+ if exp_path is None:
+ sim_exp['exp_path'] = "/%s" % sim_exp['exp_id']
+ else:
+ sim_exp['exp_path'] = exp_path
+ sim_exp['auth_type'] = auth_type
+ sim_exp['root_hosts'] = root_hosts
+ sim_exp['rw_hosts'] = rw_hosts
+ sim_exp['ro_hosts'] = ro_hosts
+ sim_exp['anon_uid'] = anon_uid
+ sim_exp['anon_gid'] = anon_gid
+ sim_exp['options'] = options
+ self.exp_dict[sim_exp['exp_id']] = sim_exp
+ return sim_exp
+
+ def fs_unexport(self, exp_id, flags=0):
+ if exp_id not in self.exp_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_NFS,
+ "No such NFS Export: %s" % exp_id)
+ del self.exp_dict[exp_id]
+ return None
+
+ def pool_create(self,
+ system_id,
+ pool_name='',
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ member_type=Pool.MEMBER_TYPE_UNKNOWN,
+ member_ids=None,
+ member_count=0,
+ size_bytes=0,
+ thinp_type=Pool.THINP_TYPE_UNKNOWN,
+ flags=0):
+ if pool_name == '':
+ pool_name = 'POOL %s' % SimData._random_vpd(4)
+
+ ## Coding
+ return
diff --git a/lsm/lsm/simulator.py b/lsm/lsm/simulator.py
index ca9d3c4..9a7a75c 100644
--- a/lsm/lsm/simulator.py
+++ b/lsm/lsm/simulator.py
@@ -26,270 +26,17 @@ from data import Pool, Initiator, Volume, BlockRange, System, AccessGroup, \
Snapshot, NfsExport, FileSystem, Capabilities, Disk, OptionalData
from iplugin import INfs, IStorageAreaNetwork
from version import VERSION
+from simarray import SimArray, SimJob

-SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
- tempfile.gettempdir() + '/lsm_sim_data')
-duration = os.getenv("LSM_SIM_TIME", 1)
-
-# Bump this when the sim data layout changes on disk
-SIM_DATA_VERSION = 1
-
-
-class SimJob(object):
- """
- Simulates a longer running job, uses actual wall time. If test cases
- take too long we can reduce time by shortening time duration.
- """
-
- def __calc_progress(self):
- if self.percent < 100:
- end = self.start + self.duration
- now = time.time()
- if now >= end:
- self.percent = 100
- self.status = JobStatus.COMPLETE
- else:
- diff = now - self.start
- self.percent = int(100 * (diff / self.duration))
-
- def __init__(self, item_to_return):
- self.status = JobStatus.INPROGRESS
- self.percent = 0
- self.__item = item_to_return
- self.start = time.time()
- self.duration = float(random.randint(0, int(duration)))
-
- def progress(self):
- """
- Returns a tuple (status, percent, volume)
- """
- self.__calc_progress()
- return self.status, self.percent, self.item
-
- @property
- def item(self):
- if self.percent >= 100:
- return self.__item
- return None
-
- @item.setter
- def item(self, value):
- self.__item = value
-
-
-def _signature(obj):
- """
- Generate some kind of signature for this object, not sure this is ideal.
-
- Hopefully this will save some debug time.
- """
- sig = ''
- keys = obj.__dict__.keys()
- keys.sort()
-
- for k in keys:
- sig = md5(sig + k)
- return sig
-
-
-def _state_signature():
- rc = ''
- objects = [Pool('', '', 0, 0, ''), Volume('', '', '', 1, 1, 0, '', ''),
- AccessGroup('', '', ['']), Initiator('', 0, ''),
- System('', '', 0), FileSystem('', '', 0, 0, '', ''),
- BlockRange(0, 100, 50), Capabilities(),
- NfsExport('', '', '', '', '', '', '', '', '', '', ),
- Snapshot('', '', 10)]
-
- for o in objects:
- rc = md5(rc + _signature(o))
-
- return rc
-
-
-class SimState(object):
- def __init__(self):
- self.version = SIM_DATA_VERSION
- self.sys_info = System('sim-01', 'LSM simulated storage plug-in',
- System.STATUS_OK)
- p1 = Pool('POO1', 'Pool 1', 2 ** 64, 2 ** 64, self.sys_info.id)
- p2 = Pool('POO2', 'Pool 2', 2 ** 64, 2 ** 64, self.sys_info.id)
- p3 = Pool('POO3', 'Pool 3', 2 ** 64, 2 ** 64, self.sys_info.id)
- p4 = Pool('POO4', 'lsm_test_aggr', 2 ** 64, 2 ** 64, self.sys_info.id)
-
- self.block_size = 512
-
- pm1 = {'pool': p1, 'volumes': {}}
- pm2 = {'pool': p2, 'volumes': {}}
- pm3 = {'pool': p3, 'volumes': {}}
- pm4 = {'pool': p4, 'volumes': {}}
-
- self.pools = {p1.id: pm1, p2.id: pm2, p3.id: pm3, p4.id: pm4}
- self.volumes = {}
- self.vol_num = 1
- self.access_groups = {}
-
- self.fs = {}
- self.fs_num = 1
-
- self.tmo = 30000
- self.jobs = {}
- self.job_num = 1
-
- #These express relationships between initiators and volumes. This
- #is done because if you delete either part of the relationship
- #you need to delete the association between them. Holding this stuff
- #in a db would be easier :-)
- self.group_grants = {} # {access group id : {volume id: access }}
-
- #Create a signature
- self.signature = _state_signature()
-
-
-class StorageSimulator(INfs, IStorageAreaNetwork):
+class SimPlugin(INfs, IStorageAreaNetwork):
"""
Simple class that implements enough to allow the framework to be exercised.
"""
-
- @staticmethod
- def __random_vpd(l=16):
- """
- Generate a random 16 digit number as hex
- """
- vpd = []
- for i in range(0, l):
- vpd.append(str('%02X' % (random.randint(0, 255))))
- return "".join(vpd)
-
- def __block_rounding(self, size_bytes):
- """
- Round the requested size to block size.
- """
- return (size_bytes / self.s.block_size) * self.s.block_size
-
- def __create_job(self, returned_item):
- if True:
- #if random.randint(0,5) == 1:
- self.s.job_num += 1
- job = "JOB_" + str(self.s.job_num)
- self.s.jobs[job] = SimJob(returned_item)
- return job, None
- else:
- return None, returned_item
-
- def _version_error(self):
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Stored simulator state incompatible with "
- "simulator, please move or delete %s" %
- self.file)
-
- def _load(self):
- tmp = None
- if os.path.exists(self.file):
- with open(self.file, 'rb') as f:
- tmp = pickle.load(f)
-
- # Going forward we could get smarter about handling this for
- # changes that aren't invasive, but we at least need to check
- # to make sure that the data will work and not cause any
- # undo confusion.
- try:
- if tmp.version != SIM_DATA_VERSION or \
- tmp.signature != _state_signature():
- self._version_error()
- except AttributeError:
- self._version_error()
-
- return tmp
-
- def _save(self):
- f = open(self.file, 'wb')
- pickle.dump(self.s, f)
- f.close()
-
- #If we run via the daemon the file will be owned by libstoragemgmt
- #and if we run sim_lsmplugin stand alone we will be unable to
- #change the permissions.
- try:
- os.chmod(self.file, 0666)
- except OSError:
- pass
-
- def _load_state(self):
- prev = self._load()
- if prev:
- return prev
- return SimState()
-
- @staticmethod
- def _check_sl(string_list):
- """
- String list should be an empty list or a list with items
- """
- if string_list is not None and isinstance(string_list, list):
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_SL, 'Invalid string list')
-
def __init__(self):
-
- self.file = SIM_DATA_FILE
- self.s = self._load_state()
self.uri = None
self.password = None
self.tmo = 0

- def _allocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
-
- rounded_size = self.__block_rounding(size_bytes)
-
- if p.free_space >= rounded_size:
- p.free_space -= rounded_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- return rounded_size
-
- def _deallocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
- p.free_space += size_bytes
-
- @staticmethod
- def _ag_id(name):
- return md5(name)
-
- def _new_access_group(self, name, h):
- return AccessGroup(StorageSimulator._ag_id(name), name,
- [i.id for i in h['initiators']], self.s.sys_info.id)
-
- def _create_vol(self, pool, name, size_bytes):
- actual_size = self._allocate_from_pool(pool.id, size_bytes)
-
- nv = Volume('Vol' + str(self.s.vol_num), name,
- StorageSimulator.__random_vpd(), self.s.block_size,
- (actual_size / self.s.block_size), Volume.STATUS_OK,
- self.s.sys_info.id,
- pool.id)
- self.s.volumes[nv.id] = {'pool': pool, 'volume': nv}
- self.s.vol_num += 1
- return self.__create_job(nv)
-
- def _create_fs(self, pool, name, size_bytes):
- if pool.id in self.s.pools:
- p = self.s.pools[pool.id]['pool']
- actual_size = self._allocate_from_pool(p.id, size_bytes)
-
- new_fs = FileSystem('FS' + str(self.s.fs_num), name, actual_size,
- actual_size, p.id, self.s.sys_info.id)
-
- self.s.fs[new_fs.id] = {'pool': p, 'fs': new_fs, 'ss': {},
- 'exports': {}}
- self.s.fs_num += 1
- return self.__create_job(new_fs)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Pool not found')
-
def startup(self, uri, password, timeout, flags=0):
self.uri = uri
self.password = password
@@ -299,17 +46,38 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
qp = uri_parse(uri)
if 'parameters' in qp and 'statefile' in qp['parameters'] \
and qp['parameters']['statefile'] is not None:
- self.file = qp['parameters']['statefile']
- self._load_state()
+ self.sim_array = SimArray(qp['parameters']['statefile'])
+ else:
+ self.sim_array = SimArray()

return None

+ def shutdown(self, flags=0):
+ self.sim_array.save_state()
+
+ def job_status(self, job_id, flags=0):
+ return self.sim_array.job_status(job_id, flags)
+
+ def job_free(self, job_id, flags=0):
+ return self.sim_array.job_free(job_id, flags)
+
+ @staticmethod
+ def _sim_data_2_lsm(sim_data):
+ """
+ Fake converter. SimArray already do SimData to LSM data convert.
+ We move data convert to SimArray to make this sample plugin looks
+ clean.
+ But in real world, data converting is often handled by plugin itself
+ rather than array.
+ """
+ return sim_data
+
def set_time_out(self, ms, flags=0):
- self.tmo = ms
+ self.sim_array.set_time_out(ms, flags)
return None

def get_time_out(self, flags=0):
- return self.tmo
+ return self.sim_array.get_time_out(flags)

def capabilities(self, system, flags=0):
rc = Capabilities()
@@ -319,544 +87,204 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
def plugin_info(self, flags=0):
return "Storage simulator", VERSION

- def shutdown(self, flags=0):
- self._save()
-
def systems(self, flags=0):
- return [self.s.sys_info]
-
- def job_status(self, job_id, flags=0):
- if job_id in self.s.jobs:
- return self.s.jobs[job_id].progress()
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def job_free(self, job_id, flags=0):
- if job_id in self.s.jobs:
- del self.s.jobs[job_id]
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def volumes(self, flags=0):
- return [e['volume'] for e in self.s.volumes.itervalues()]
-
- def _get_volume(self, volume_id):
- for v in self.s.volumes.itervalues():
- if v['volume'].id == volume_id:
- return v['volume']
- return None
+ sim_syss = self.sim_array.systems()
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_syss]

def pools(self, flags=0):
- return [e['pool'] for e in self.s.pools.itervalues()]
-
- def _volume_accessible(self, access_group_id, volume):
+ sim_pools = self.sim_array.pools()
+ return [SimPlugin._sim_data_2_lsm(p) for p in sim_pools]

- if access_group_id in self.s.group_grants:
- ag = self.s.group_grants[access_group_id]
-
- if volume.id in ag:
- return True
-
- return False
-
- def _initiators(self, volume_filter=None):
- rc = []
- if len(self.s.access_groups):
- for k, v in self.s.access_groups.items():
- if volume_filter:
- ag = self._new_access_group(k, v)
- if self._volume_accessible(ag.id, volume_filter):
- rc.extend(v['initiators'])
- else:
- rc.extend(v['initiators'])
-
- #We can have multiples as the same initiator can be in multiple access
- #groups
- remove_dupes = {}
- for x in rc:
- remove_dupes[x.id] = x
-
- return list(remove_dupes.values())
+ def volumes(self, flags=0):
+ sim_vols = self.sim_array.volumes()
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- def initiators(self, flags=0):
- return self._initiators()
+ def disks(self, flags=0):
+ sim_disks = self.sim_array.disks()
+ return [SimPlugin._sim_data_2_lsm(d) for d in sim_disks]

def volume_create(self, pool, volume_name, size_bytes, provisioning,
flags=0):
- assert provisioning is not None
- return self._create_vol(pool, volume_name, size_bytes)
+ sim_vol = self.sim_array.volume_create(
+ pool.id, volume_name, size_bytes, provisioning, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)

def volume_delete(self, volume, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
- self._deallocate_from_pool(p.id, v.size_bytes)
- del self.s.volumes[volume.id]
-
- for (k, v) in self.s.group_grants.items():
- if volume.id in v:
- del self.s.group_grants[k][volume.id]
-
- #We only return null or job id.
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
+ return self.sim_array.volume_delete(volume.id, flags)

- def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
- assert rep_type is not None
+ def volume_resize(self, volume, new_size_bytes, flags=0):
+ sim_vol = self.sim_array.volume_resize(
+ volume.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)

- p_id = None
+ def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
+ dst_pool_id = None

if pool is not None:
- p_id = pool.id
- else:
- p_id = volume_src.pool_id
-
- if p_id in self.s.pools and volume_src.id in self.s.volumes:
- p = self.s.pools[p_id]['pool']
- v = self.s.volumes[volume_src.id]['volume']
-
- return self._create_vol(p, name, v.size_bytes)
+ dst_pool_id = pool.id
else:
- if pool.id not in self.s.pools:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Incorrect pool')
-
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- 'Volume not present')
- return None
+ dst_pool_id = volume_src.pool_id
+ return self.sim_array.volume_replicate(
+ dst_pool_id, rep_type, volume_src.id, name, flags)

def volume_replicate_range_block_size(self, system, flags=0):
- return self.s.block_size
+ return self.sim_array.volume_replicate_range_block_size(
+ system.id, flags)

def volume_replicate_range(self, rep_type, volume_src, volume_dest,
ranges, flags=0):
-
- if rep_type not in (Volume.REPLICATE_SNAPSHOT,
- Volume.REPLICATE_CLONE,
- Volume.REPLICATE_COPY,
- Volume.REPLICATE_MIRROR_ASYNC,
- Volume.REPLICATE_MIRROR_SYNC):
- raise LsmError(ErrorNumber.UNSUPPORTED_REPLICATION_TYPE,
- "Rep_type invalid")
-
- if ranges:
- if isinstance(ranges, list):
- for r in ranges:
- if isinstance(r, BlockRange):
- #We could do some overlap range testing etc. here.
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Range element not BlockRange")
-
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Ranges not a list")
-
- #Make sure all the arguments are validated
- if volume_src.id in self.s.volumes \
- and volume_dest.id in self.s.volumes:
- return None
- else:
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_src not found")
- if volume_dest.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_dest not found")
+ return self.sim_array.volume_replicate_range(
+ rep_type, volume_src.id, volume_dest.id, ranges, flags)

def volume_online(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
+ return self.sim_array.volume_online(volume.id, flags)

def volume_offline(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
-
- def volume_resize(self, volume, new_size_bytes, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
-
- current_size = v.size_bytes
- new_size = self.__block_rounding(new_size_bytes)
-
- if new_size == current_size:
- raise LsmError(ErrorNumber.SIZE_SAME,
- 'Volume same size')
-
- if new_size < current_size \
- or p.free_space >= (new_size - current_size):
- p.free_space -= (new_size - current_size)
- v.num_of_blocks = new_size / self.s.block_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- return self.__create_job(v)
-
- def access_group_grant(self, group, volume, access, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id not in self.s.group_grants:
- self.s.group_grants[group.id] = {volume.id: access}
- elif volume.id not in self.s.group_grants[group.id]:
- self.s.group_grants[group.id][volume.id] = access
- else:
- raise LsmError(ErrorNumber.IS_MAPPED, 'Existing access present')
-
- def access_group_revoke(self, group, volume, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id in self.s.group_grants \
- and volume.id in self.s.group_grants[group.id]:
- del self.s.group_grants[group.id][volume.id]
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- 'No volume access to revoke')
+ return self.sim_array.volume_online(volume.id, flags)

def access_group_list(self, flags=0):
- rc = []
- for (k, v) in self.s.access_groups.items():
- rc.append(self._new_access_group(k, v))
- return rc
-
- def _get_access_group(self, ag_id):
- groups = self.access_group_list()
- for g in groups:
- if g.id == ag_id:
- return g
- return None
+ sim_ags = self.sim_array.ags()
+ return [SimPlugin._sim_data_2_lsm(a) for a in sim_ags]

def access_group_create(self, name, initiator_id, id_type, system_id,
flags=0):
- if name not in self.s.access_groups:
- self.s.access_groups[name] = \
- {'initiators': [Initiator(initiator_id, id_type, 'UNA')],
- 'access': {}}
- return self._new_access_group(name, self.s.access_groups[name])
- else:
- raise LsmError(ErrorNumber.EXISTS_ACCESS_GROUP,
- "Access group with name exists")
+ sim_ag = self.sim_array.access_group_create(name, initiator_id,
+ id_type, system_id, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)

def access_group_del(self, group, flags=0):
- if group.name in self.s.access_groups:
- del self.s.access_groups[group.name]
-
- if group.id in self.s.group_grants:
- del self.s.group_grants[group.id]
-
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del(group.id, flags)

def access_group_add_initiator(self, group, initiator_id, id_type,
flags=0):
- if group.name in self.s.access_groups:
- self.s.access_groups[group.name]['initiators']. \
- append(Initiator(initiator_id, id_type, 'UNA'))
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ sim_ag = self.sim_array.access_group_add_initiator(
+ group.id, initiator_id, id_type, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)

def access_group_del_initiator(self, group, initiator_id, flags=0):
- if group.name in self.s.access_groups:
- for i in self.s.access_groups[group.name]['initiators']:
- if i.id == initiator_id:
- self.s.access_groups[group.name]['initiators']. \
- remove(i)
- return None
-
- raise LsmError(ErrorNumber.INITIATOR_NOT_IN_ACCESS_GROUP,
- "Initiator not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del_initiator(
+ group.id, initiator_id, flags)

- def volumes_accessible_by_access_group(self, group, flags=0):
- rc = []
- if group.name in self.s.access_groups:
- if group.id in self.s.group_grants:
- for (k, v) in self.s.group_grants[group.id].items():
- rc.append(self._get_volume(k))
+ def access_group_grant(self, group, volume, access, flags=0):
+ return self.sim_array.access_group_grant(
+ group.id, volume.id, access, flags)

- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ def access_group_revoke(self, group, volume, flags=0):
+ return self.sim_array.access_group_revoke(
+ group.id, volume.id, flags)

- def access_groups_granted_to_volume(self, volume, flags=0):
- rc = []
+ def volumes_accessible_by_access_group(self, group, flags=0):
+ sim_vols = self.sim_array.volumes_accessible_by_access_group(
+ group.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- for (k, v) in self.s.group_grants.items():
- if volume.id in self.s.group_grants[k]:
- rc.append(self._get_access_group(k))
- return rc
+ def access_groups_granted_to_volume(self, volume, flags=0):
+ sim_vols = self.sim_array.access_groups_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- def iscsi_chap_auth(self, initiator, in_user, in_password, out_user,
- out_password, flags=0):
- if initiator is None:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- 'Initiator is required')
+ def initiators(self, flags=0):
+ return self.sim_array.inits(flags)

def initiator_grant(self, initiator_id, initiator_type, volume, access,
flags=0):
- name = initiator_id + volume.id
- group = None
-
- try:
- group = self.access_group_create(name, initiator_id,
- initiator_type,
- volume.system_id)
- result = self.access_group_grant(group, volume, access)
-
- except Exception as e:
- if group:
- self.access_group_del(group)
- raise e
-
- return result
+ return self.sim_array.initiator_grant(
+ initiator_id, initiator_type, volume.id, access, flags)

def initiator_revoke(self, initiator, volume, flags=0):
- name = initiator.id + volume.id
-
- if any(x.id for x in self.initiators()):
- if volume.id in self.s.volumes:
- ag = self._new_access_group(name, self.s.access_groups[name])
-
- if ag:
- self.access_group_del(ag)
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- "No mapping of initiator "
- "and volume")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "Volume not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_INITIATOR,
- "Initiator not found")
-
- return None
+ return self.sim_array.initiator_revoke(initiator.id, volume.id, flags)

def volumes_accessible_by_initiator(self, initiator, flags=0):
- rc = []
- volumes = {}
-
- #Go through each access group, for each one see if our initiator
- #is one of them.
- for ag_name, ag_info in self.s.access_groups.items():
- # Check to see if the initiator is in the group.
- if initiator.id in [i.id for i in ag_info['initiators']]:
- # Look up the privileges for this group, if any
- ag_id = StorageSimulator._ag_id(ag_name)
- if ag_id in self.s.group_grants:
- # Loop through the volumes granted to this AG
- for volume_id in self.s.group_grants[ag_id].keys():
- volumes[volume_id] = None
-
- # We very well may have duplicates, thus the reason we enter the
- # volume id into the hash with no value, we are weeding out dupes
- for vol_id in volumes.keys():
- rc.append(self._get_volume(vol_id))
-
- return rc
+ sim_vols = self.sim_array.volumes_accessible_by_initiator(
+ initiator.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

def initiators_granted_to_volume(self, volume, flags=0):
- return self._initiators(volume)
+ sim_inits = self.sim_array.initiators_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, initiator, in_user, in_password,
+ out_user, out_password, flags=0):
+ return self.sim_array.iscsi_chap_auth(
+ initiator.id, in_user, in_password, out_user, out_password, flags)

def volume_child_dependency(self, volume, flags=0):
- return False
+ return self.sim_array.volume_child_dependency(volume.id, flags)

def volume_child_dependency_rm(self, volume, flags=0):
- return None
+ return self.sim_array.volume_child_dependency_rm(volume.id, flags)

def fs(self, flags=0):
- return [e['fs'] for e in self.s.fs.itervalues()]
-
- def fs_delete(self, fs, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- self._deallocate_from_pool(p.id, f.total_space)
- del self.s.fs[fs.id]
+ sim_fss = self.sim_array.fs()
+ return [SimPlugin._sim_data_2_lsm(f) for f in sim_fss]

- #TODO: Check for exports and remove them.
+ def fs_create(self, pool, name, size_bytes, flags=0):
+ sim_fs = self.sim_array.fs_create(pool.id, name, size_bytes)
+ return SimPlugin._sim_data_2_lsm(sim_fs)

- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ def fs_delete(self, fs, flags=0):
+ return self.sim_array.fs_delete(fs.id, flags)

def fs_resize(self, fs, new_size_bytes, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- #TODO Check to make sure we have enough space before proceeding
- self._deallocate_from_pool(p.id, f.total_space)
- f.total_space = self._allocate_from_pool(p.id, new_size_bytes)
- f.free_space = f.total_space
- return self.__create_job(f)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def fs_create(self, pool, name, size_bytes, flags=0):
- return self._create_fs(pool, name, size_bytes)
+ sim_fs = self.sim_array.fs_resize(
+ fs.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_fs)

def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0):
- #TODO If snapshot is not None, then check for existence.
-
- if src_fs.id in self.s.fs:
- f = self.s.fs[src_fs.id]['fs']
- p = self.s.fs[src_fs.id]['pool']
- return self._create_fs(p, dest_fs_name, f.total_space)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, None, flags)
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, snapshot.id, flags)

def file_clone(self, fs, src_file_name, dest_file_name, snapshot=None,
flags=0):
- #TODO If snapshot is not None, then check for existence.
- if fs.id in self.s.fs:
- if src_file_name is not None and dest_file_name is not None:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Invalid src/destination file names")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, None, flags)
+
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, snapshot.id, flags)

def fs_snapshots(self, fs, flags=0):
- if fs.id in self.s.fs:
- rc = [e for e in self.s.fs[fs.id]['ss'].itervalues()]
- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_snaps = self.sim_array.fs_snapshots(fs.id, flags)
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_snaps]

def fs_snapshot_create(self, fs, snapshot_name, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- for e in self.s.fs[fs.id]['ss'].itervalues():
- if e.name == snapshot_name:
- raise LsmError(ErrorNumber.EXISTS_NAME,
- 'Snapshot name exists')
-
- s = Snapshot(md5(snapshot_name), snapshot_name, time.time())
- self.s.fs[fs.id]['ss'][s.id] = s
- return self.__create_job(s)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_create(
+ fs.id, snapshot_name, files, flags)

def fs_snapshot_delete(self, fs, snapshot, flags=0):
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- del self.s.fs[fs.id]['ss'][snapshot.id]
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_delete(
+ fs.id, snapshot.id, flags)

def fs_snapshot_revert(self, fs, snapshot, files, restore_files,
all_files=False, flags=0):
-
- StorageSimulator._check_sl(files)
- StorageSimulator._check_sl(files)
-
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_revert(
+ fs.id, snapshot.id, files, restore_files, all_files, flags)

def fs_child_dependency(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return False
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency(fs.id, files, flags)

def fs_child_dependency_rm(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency_rm(fs.id, files, flags)

def export_auth(self, flags=0):
+ # The API should change some day
return ["simple"]

def exports(self, flags=0):
- rc = []
- for fs in self.s.fs.itervalues():
- for exp in fs['exports'].values():
- rc.append(exp)
- return rc
+ sim_exps = self.sim_array.exports(flags)
+ return [SimPlugin._sim_data_2_lsm(e) for e in sim_exps]

def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list,
anon_uid, anon_gid, auth_type, options, flags=0):
-
- if fs_id in self.s.fs:
- if export_path is None:
- export_path = "/mnt/lsm/sim/%s" % self.s.fs[fs_id]['fs'].name
-
- export_id = md5(export_path)
-
- export = NfsExport(export_id, fs_id, export_path, auth_type,
- root_list, rw_list, ro_list, anon_uid, anon_gid,
- options)
-
- self.s.fs[fs_id]['exports'][export_id] = export
- return export
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_exp = self.sim_array.fs_export(
+ fs_id, export_path, root_list, rw_list, ro_list,
+ anon_uid, anon_gid, auth_type, options, flags=0)
+ return SimPlugin._sim_data_2_lsm(sim_exp)

def export_remove(self, export, flags=0):
- fs_id = export.fs_id
-
- if fs_id in self.s.fs:
- if export.id in self.s.fs[fs_id]['exports']:
- del self.s.fs[fs_id]['exports'][export.id]
- else:
- raise LsmError(ErrorNumber.FS_NOT_EXPORTED, "FS not exported")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def disks(self, flags=0):
+ return self.sim_array.fs_unexport(export.id, flags)

- rc = []
- # TODO Make these persistent and make it fit into the total model
-
- for i in range(0, 10):
- name = "Sim disk %d" % i
- optionals = None
-
- if flags == Disk.RETRIEVE_FULL_INFO:
- optionals = OptionalData()
- optionals.set('sn', self.__random_vpd(8))
-
- rc.append(Disk(md5(name), name, Disk.DISK_TYPE_HYBRID, 512,
- 1893933056, Disk.STATUS_OK,
- self.s.sys_info.id, optionals))
-
- return rc
diff --git a/lsm/sim_lsmplugin b/lsm/sim_lsmplugin
index a4439a5..d2bee09 100755
--- a/lsm/sim_lsmplugin
+++ b/lsm/sim_lsmplugin
@@ -22,10 +22,10 @@ import syslog

try:
from lsm.pluginrunner import PluginRunner
- from lsm.simulator import StorageSimulator
+ from lsm.simulator import SimPlugin

if __name__ == '__main__':
- PluginRunner(StorageSimulator, sys.argv).run()
+ PluginRunner(SimPlugin, sys.argv).run()
except Exception as e:
#This should be quite rare, but when it does happen this is pretty
#key in understanding what happened, especially when it happens when
--
1.8.3.1
Tony Asleson
2014-02-11 23:34:16 UTC
Permalink
Post by Gris Ge
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.
* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make test' (actually is test/runtest.sh).
This patch does not pass when I use my updated client.py as the
simulator is returning values when they should be None. See patch
"[PATCH] client.py: Correct method decorator for return types".

Please fix-up and resubmit. Please make sure `make distcheck` passes
after your changes. This is especially true when you introduce new
files/remove files.

Thanks,
Tony
Tony Asleson
2014-02-12 00:02:06 UTC
Permalink
Post by Tony Asleson
Post by Gris Ge
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.
* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make test' (actually is test/runtest.sh).
This patch does not pass when I use my updated client.py as the
simulator is returning values when they should be None. See patch
"[PATCH] client.py: Correct method decorator for return types".
Please fix-up and resubmit. Please make sure `make distcheck` passes
after your changes. This is especially true when you introduce new
files/remove files.
One other thing. If you use an old simulator state, you fail with an
exception as the data on disk is incompatible. I was expecting a nice
message about the simulator data being incompatible, but get:

error: -32601 msg: 'module' object has no attribute 'SimState'

It looks like you changed what I was doing before and this new way isn't
catching the fact that the on disk data is indeed incompatible.

Please fix this too.

Thanks!

Regards,
Tony
Gris Ge
2014-02-12 09:01:50 UTC
Permalink
Problem:
'make check' will fail in rare chances(1 out of 20) at 'test_smoke_test'
due to the incorrect initiator got after simulator plugin initiation.

Root cause:
Every test case will be forked within 1 second which cause them to use the
same random seed which get the same random number.
When 'test_smoke_test' and 'test_initiator_methods' are sharing the
same state files, the initiator count might not be 0.

Fix:
The random seed is different now after adding with their PID.

Signed-off-by: Gris Ge <***@redhat.com>
---
test/tester.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/tester.c b/test/tester.c
index 4e8de1f..d409aa7 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -43,8 +43,8 @@ void generateRandom(char *buff, uint32_t len)

if( !seed ) {
seed = time(NULL);
- srandom(seed);
}
+ srandom(seed + getpid());

if( buff && (len > 1) ) {
for(i = 0; i < (len - 1); ++i) {
--
1.8.3.1
Gris Ge
2014-02-12 09:01:51 UTC
Permalink
* New design of simulator:
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.

* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make check', 'make distcheck' and 'rpmbuild'.

Changes since V1:
* Fixed the complain message if old version state file found.
* Fixed the returns of initiator_grant() and etc.
* Fixed Makefile and rpm spec file for the new simarray.py files.

Signed-off-by: Gris Ge <***@redhat.com>
---
libstoragemgmt.spec.in | 1 +
lsm/Makefile.am | 1 +
lsm/lsm/__init__.py | 3 +-
lsm/lsm/simarray.py | 1309 ++++++++++++++++++++++++++++++++++++++++++++++++
lsm/lsm/simulator.py | 850 +++++--------------------------
lsm/sim_lsmplugin | 4 +-
6 files changed, 1454 insertions(+), 714 deletions(-)
create mode 100644 lsm/lsm/simarray.py

diff --git a/libstoragemgmt.spec.in b/libstoragemgmt.spec.in
index 6e64d8f..eacb3fd 100644
--- a/libstoragemgmt.spec.in
+++ b/libstoragemgmt.spec.in
@@ -246,6 +246,7 @@ fi
%{python_sitelib}/lsm/iplugin.*
%{python_sitelib}/lsm/pluginrunner.*
%{python_sitelib}/lsm/simulator.*
+%{python_sitelib}/lsm/simarray.*
%{python_sitelib}/lsm/transport.*
%{python_sitelib}/lsm/version.*
%{_bindir}/sim_lsmplugin
diff --git a/lsm/Makefile.am b/lsm/Makefile.am
index 3a405d5..f4dde10 100644
--- a/lsm/Makefile.am
+++ b/lsm/Makefile.am
@@ -24,6 +24,7 @@ lsm_PYTHON = lsm/__init__.py \
lsm/ontap.py \
lsm/pluginrunner.py \
lsm/simulator.py \
+ lsm/simarray.py \
lsm/smis.py \
lsm/smisproxy.py \
lsm/transport.py \
diff --git a/lsm/lsm/__init__.py b/lsm/lsm/__init__.py
index 3407382..71bb7e2 100644
--- a/lsm/lsm/__init__.py
+++ b/lsm/lsm/__init__.py
@@ -17,6 +17,7 @@ from data import DataEncoder, DataDecoder, IData, Initiator, Volume, Pool, \
from iplugin import IPlugin, IStorageAreaNetwork, INetworkAttachedStorage, INfs

from pluginrunner import PluginRunner
-from simulator import StorageSimulator, SimJob, SimState
+from simulator import SimPlugin
+from simarray import SimData, SimJob, SimArray
from transport import Transport
from version import VERSION
diff --git a/lsm/lsm/simarray.py b/lsm/lsm/simarray.py
new file mode 100644
index 0000000..8a19fd2
--- /dev/null
+++ b/lsm/lsm/simarray.py
@@ -0,0 +1,1309 @@
+# Copyright (C) 2011-2013 Red Hat, Inc.
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: tasleson
+# Gris Ge <***@redhat.com>
+
+# TODO: 1. Introduce constant check by using state_to_str() converting.
+# 2. Snapshot should consume space in pool.
+
+import random
+import pickle
+import tempfile
+import os
+import time
+
+from common import md5, LsmError, ErrorNumber, size_human_2_size_bytes, \
+ JobStatus
+from data import System, Volume, Disk, Pool, FileSystem, AccessGroup, \
+ Initiator, BlockRange, Snapshot, NfsExport
+
+class SimJob(object):
+ """
+ Simulates a longer running job, uses actual wall time. If test cases
+ take too long we can reduce time by shortening time duration.
+ """
+
+ def _calc_progress(self):
+ if self.percent < 100:
+ end = self.start + self.duration
+ now = time.time()
+ if now >= end:
+ self.percent = 100
+ self.status = JobStatus.COMPLETE
+ else:
+ diff = now - self.start
+ self.percent = int(100 * (diff / self.duration))
+
+ def __init__(self, item_to_return):
+ duration = os.getenv("LSM_SIM_TIME", 1)
+ self.status = JobStatus.INPROGRESS
+ self.percent = 0
+ self.__item = item_to_return
+ self.start = time.time()
+ self.duration = float(random.randint(0, int(duration)))
+
+ def progress(self):
+ """
+ Returns a tuple (status, percent, data)
+ """
+ self._calc_progress()
+ return self.status, self.percent, self.item
+
+ @property
+ def item(self):
+ if self.percent >= 100:
+ return self.__item
+ return None
+
+ @item.setter
+ def item(self, value):
+ self.__item = value
+
+
+class SimArray(object):
+ SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
+ tempfile.gettempdir() + '/lsm_sim_data')
+
+ @staticmethod
+ def _version_error(dump_file):
+ raise LsmError(ErrorNumber.INVALID_ARGUMENT,
+ "Stored simulator state incompatible with "
+ "simulator, please move or delete %s" %
+ dump_file)
+
+ def __init__(self, dump_file=None):
+ if dump_file is None:
+ self.dump_file = SimArray.SIM_DATA_FILE
+ else:
+ self.dump_file = dump_file
+
+ if os.path.exists(self.dump_file):
+ try:
+ with open(self.dump_file, 'rb') as f:
+ self.data = pickle.load(f)
+
+ # Going forward we could get smarter about handling this for
+ # changes that aren't invasive, but we at least need to check
+ # to make sure that the data will work and not cause any
+ # undo confusion.
+ if self.data.version != SimData.SIM_DATA_VERSION or \
+ self.data.signature != SimData._state_signature():
+ SimArray._version_error(self.dump_file)
+ except AttributeError:
+ SimArray._version_error(self.dump_file)
+
+ else:
+ self.data = SimData()
+
+ def save_state(self):
+ fh_dump_file = open(self.dump_file, 'wb')
+ pickle.dump(self.data, fh_dump_file)
+ fh_dump_file.close()
+
+ def job_status(self, job_id, flags=0):
+ return self.data.job_status(job_id, flags=0)
+
+ def job_free(self, job_id, flags=0):
+ return self.data.job_free(job_id, flags=0)
+
+ def set_time_out(self, ms, flags=0):
+ return self.data.set_time_out(ms, flags)
+
+ def get_time_out(self, flags=0):
+ return self.data.get_time_out(flags)
+
+ def systems(self):
+ return self.data.systems()
+
+ @staticmethod
+ def _sim_vol_2_lsm(sim_vol):
+ return Volume(sim_vol['vol_id'], sim_vol['name'], sim_vol['vpd83'],
+ SimData.SIM_DATA_BLK_SIZE,
+ int(sim_vol['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Volume.STATUS_OK, sim_vol['sys_id'],
+ sim_vol['pool_id'])
+
+ def volumes(self):
+ sim_vols = self.data.volumes()
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def pools(self):
+ rc = []
+ sim_pools = self.data.pools()
+ for sim_pool in sim_pools:
+ pool = Pool(sim_pool['pool_id'], sim_pool['name'],
+ sim_pool['total_space'], sim_pool['free_space'],
+ sim_pool['sys_id'])
+ rc.extend([pool])
+ return rc
+
+ def disks(self):
+ rc = []
+ sim_disks = self.data.disks()
+ for sim_disk in sim_disks:
+ disk = Disk(sim_disk['disk_id'], sim_disk['name'],
+ sim_disk['disk_type'], SimData.SIM_DATA_BLK_SIZE,
+ int(sim_disk['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Disk.STATUS_OK, sim_disk['sys_id'])
+ rc.extend([disk])
+ return rc
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ sim_vol = self.data.volume_create(
+ pool_id, vol_name, size_bytes, thinp, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_delete(self, vol_id, flags=0):
+ self.data.volume_delete(vol_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ sim_vol = self.data.volume_resize(vol_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ sim_vol = self.data.volume_replicate(
+ dst_pool_id, rep_type, src_vol_id, new_vol_name, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return self.data.volume_replicate_range_block_size(sys_id, flags)
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ return self.data.job_create(
+ self.data.volume_replicate_range(
+ rep_type, src_vol_id, dst_vol_id, ranges, flags))[0]
+
+ def volume_online(self, vol_id, flags=0):
+ return self.data.volume_online(vol_id, flags)
+
+ def volume_offline(self, vol_id, flags=0):
+ return self.data.volume_offline(vol_id, flags)
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ return self.data.volume_child_dependency(vol_id, flags)
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ return self.data.job_create(
+ self.data.volume_child_dependency_rm(vol_id, flags))[0]
+
+ @staticmethod
+ def _sim_fs_2_lsm(sim_fs):
+ return FileSystem(sim_fs['fs_id'], sim_fs['name'],
+ sim_fs['total_space'], sim_fs['free_space'],
+ sim_fs['pool_id'], sim_fs['sys_id'])
+
+ def fs(self):
+ sim_fss = self.data.fs()
+ return [SimArray._sim_fs_2_lsm(f) for f in sim_fss]
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ sim_fs = self.data.fs_create(pool_id, fs_name, size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_delete(self, fs_id, flags=0):
+ self.data.fs_delete(fs_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ sim_fs = self.data.fs_resize(fs_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ sim_fs = self.data.fs_clone(src_fs_id, dst_fs_name, snap_id, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.file_clone(
+ fs_id, src_fs_name, dst_fs_name, snap_id, flags))[0]
+
+ @staticmethod
+ def _sim_snap_2_lsm(sim_snap):
+ return Snapshot(sim_snap['snap_id'], sim_snap['name'],
+ sim_snap['timestamp'])
+
+ def fs_snapshots(self, fs_id, flags=0):
+ sim_snaps = self.data.fs_snapshots(fs_id, flags)
+ return [SimArray._sim_snap_2_lsm(s) for s in sim_snaps]
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ sim_snap = self.data.fs_snapshot_create(fs_id, snap_name, files,
+ flags)
+ return self.data.job_create(SimArray._sim_snap_2_lsm(sim_snap))
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.fs_snapshot_delete(fs_id, snap_id, flags))[0]
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ return self.data.job_create(
+ self.data.fs_snapshot_revert(
+ fs_id, snap_id, files, restore_files,
+ flag_all_files, flags))[0]
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ return self.data.fs_child_dependency(fs_id, files, flags)
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ return self.data.job_create(
+ self.data.fs_child_dependency_rm(fs_id, files, flags))[0]
+
+ @staticmethod
+ def _sim_exp_2_lsm(sim_exp):
+ return NfsExport(
+ sim_exp['exp_id'], sim_exp['fs_id'], sim_exp['exp_path'],
+ sim_exp['auth_type'], sim_exp['root_hosts'], sim_exp['rw_hosts'],
+ sim_exp['ro_hosts'], sim_exp['anon_uid'], sim_exp['anon_gid'],
+ sim_exp['options'])
+
+ def exports(self, flags=0):
+ sim_exps = self.data.exports(flags)
+ return [SimArray._sim_exp_2_lsm(e) for e in sim_exps]
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ sim_exp = self.data.fs_export(
+ fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags)
+ return SimArray._sim_exp_2_lsm(sim_exp)
+
+ def fs_unexport(self, exp_id, flags=0):
+ return self.data.fs_unexport(exp_id, flags)
+
+ @staticmethod
+ def _sim_ag_2_lsm(sim_ag):
+ return AccessGroup(sim_ag['ag_id'], sim_ag['name'],
+ sim_ag['init_ids'], sim_ag['sys_id'])
+
+ def ags(self):
+ sim_ags = self.data.ags()
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = self.data.access_group_create(
+ name, init_id, init_type, sys_id, flags)
+ return SimArray._sim_ag_2_lsm(sim_ag)
+
+ def access_group_del(self, ag_id, flags=0):
+ return self.data.access_group_del(ag_id, flags)
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ return self.data.access_group_add_initiator(
+ ag_id, init_id, init_type, flags)
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ return self.data.access_group_del_initiator(ag_id, init_id, flags)
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ return self.data.access_group_grant(ag_id, vol_id, access, flags)
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ return self.data.access_group_revoke(ag_id, vol_id, flags)
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_access_group(ag_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ sim_ags = self.data.access_groups_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ @staticmethod
+ def _sim_init_2_lsm(sim_init):
+ return Initiator(sim_init['init_id'], sim_init['init_type'],
+ sim_init['name'])
+
+ def inits(self, flags=0):
+ sim_inits = self.data.inits()
+ return [SimArray._sim_init_2_lsm(a) for a in sim_inits]
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags=0):
+ return self.data.initiator_grant(
+ init_id, init_type, vol_id, access, flags)
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ return self.data.initiator_revoke(init_id, vol_id, flags)
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_initiator(init_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ sim_inits = self.data.initiators_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_init_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ return self.data.iscsi_chap_auth(init_id, in_user, in_pass, out_user,
+ out_pass, flags)
+
+
+class SimData(object):
+ """
+ Rules here are:
+ * we don't store one data twice
+ * we don't srore data which could be caculated out
+
+ self.vol_dict = {
+ Volume.id = sim_vol,
+ }
+
+ sim_vol = {
+ 'vol_id': "VOL_ID_%s" % SimData._random_vpd(4),
+ 'vpd83': SimData._random_vpd(),
+ 'name': vol_name,
+ 'total_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': owner_pool_id,
+ 'consume_size': size_bytes,
+ 'replicate': {
+ dst_vol_id = [
+ {
+ 'src_start_blk': src_start_blk,
+ 'dst_start_blk': dst_start_blk,
+ 'blk_count': blk_count,
+ 'rep_type': Volume.REPLICATE_XXXX,
+ },
+ ],
+ },
+ 'mask': {
+ ag_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ },
+ 'mask_init': {
+ init_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ }
+ }
+
+ self.init_dict = {
+ Initiator.id = sim_init,
+ }
+ sim_init = {
+ 'init_id': Initiator.id,
+ 'init_type': Initiator.TYPE_XXXX,
+ 'name': SimData.SIM_DATA_INIT_NAME,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ self.ag_dict ={
+ AccessGroup.id = sim_ag,
+ }
+ sim_ag = {
+ 'init_ids': [init_id,],
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'name': name,
+ 'ag_id': "AG_ID_%s" % SimData._random_vpd(4),
+ }
+
+ self.fs_dict = {
+ FileSystem.id = sim_fs,
+ }
+ sim_fs = {
+ 'fs_id': "FS_ID_%s" % SimData._random_vpd(4),
+ 'name': fs_name,
+ 'total_space': size_bytes,
+ 'free_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': pool_id,
+ 'consume_size': size_bytes,
+ 'clone': {
+ dst_fs_id: {
+ 'snap_id': snap_id, # None if no snapshot
+ 'files': [ file_path, ] # [] if all files cloned.
+ },
+ },
+ 'snaps' = [snap_id, ],
+ }
+ self.snap_dict = {
+ Snapshot.id: sim_snap,
+ }
+ sim_snap = {
+ 'snap_id': "SNAP_ID_%s" % SimData._random_vpd(4),
+ 'name': snap_name,
+ 'fs_id': fs_id,
+ 'files': [file_path, ],
+ 'timestamp': time.time(),
+ }
+ self.exp_dict = {
+ Export.id: sim_exp,
+ }
+ sim_exp = {
+ 'exp_id': "EXP_ID_%s" % SimData._random_vpd(4),
+ 'fs_id': fs_id,
+ 'exp_path': exp_path,
+ 'auth_type': auth_type,
+ 'root_hosts': [root_host, ],
+ 'rw_hosts': [rw_host, ],
+ 'ro_hosts': [ro_host, ],
+ 'anon_uid': anon_uid,
+ 'anon_gid': anon_gid,
+ 'options': [option, ],
+ }
+ """
+ SIM_DATA_BLK_SIZE = 512
+ SIM_DATA_VERSION = "2.0"
+ SIM_DATA_SYS_ID = 'sim-01'
+ SIM_DATA_INIT_NAME = 'NULL'
+ SIM_DATA_TMO = 30000 # ms
+
+ @staticmethod
+ def _state_signature():
+ return 'LSM_SIMULATOR_DATA_%s' % md5(SimData.SIM_DATA_VERSION)
+
+ def __init__(self):
+ self.tmo = SimData.SIM_DATA_TMO
+ self.version = SimData.SIM_DATA_VERSION
+ self.signature = SimData._state_signature()
+ self.job_num = 0
+ self.job_dict = {
+ # id: SimJob
+ }
+ self.syss = [System(SimData.SIM_DATA_SYS_ID,
+ 'LSM simulated storage plug-in',
+ System.STATUS_OK)]
+ pool_size_200g = size_human_2_size_bytes('200GiB')
+ self.pool_dict = {
+ 'POO1': {
+ 'pool_id': 'POO1',
+ 'name': 'Pool 1',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_000', 'DISK_ID_001'],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'POO2': {
+ 'pool_id': 'POO2',
+ 'name': 'Pool 2',
+ 'total_space': pool_size_200g,
+ 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_ids': ['POO1'],
+ 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ # lsm_test_aggr pool is requred by test/runtest.sh
+ 'lsm_test_aggr': {
+ 'pool_id': 'lsm_test_aggr',
+ 'name': 'lsm_test_aggr',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_002', 'DISK_ID_003'],
+ 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.vol_dict = {
+ }
+ self.fs_dict = {
+ }
+ self.snap_dict = {
+ }
+ self.exp_dict = {
+ }
+ disk_size_2t = size_human_2_size_bytes('2TiB')
+ self.disk_dict = {
+ 'DISK_ID_000': {
+ 'disk_id': 'DISK_ID_000',
+ 'name': 'SATA Disk 000',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_001': {
+ 'disk_id': 'DISK_ID_001',
+ 'name': 'SATA Disk 001',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_002': {
+ 'disk_id': 'DISK_ID_002',
+ 'name': 'SAS Disk 002',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_003': {
+ 'disk_id': 'DISK_ID_003',
+ 'name': 'SAS Disk 003',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.ag_dict = {
+ }
+ self.init_dict = {
+ }
+ # Create some volumes, fs and etc
+ self.volume_create(
+ 'POO1', 'Volume 000', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+ self.volume_create(
+ 'POO1', 'Volume 001', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+
+ self.pool_dict['POO3']= {
+ 'pool_id': 'POO3',
+ 'name': 'Pool 3',
+ 'member_type': Pool.MEMBER_TYPE_VOLUME,
+ 'member_ids': [
+ self.vol_dict.values()[0]['vol_id'],
+ self.vol_dict.values()[1]['vol_id'],
+ ],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ return
+
+ def _pool_free_space(self, pool_id):
+ """
+ Calculate out the free size of certain pool.
+ """
+ free_space = self._pool_total_space(pool_id)
+ for sim_vol in self.vol_dict.values():
+ if sim_vol['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_vol['consume_size']:
+ return 0
+ free_space -= sim_vol['consume_size']
+ for sim_fs in self.fs_dict.values():
+ if sim_fs['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_fs['consume_size']:
+ return 0
+ free_space -= sim_fs['consume_size']
+ return free_space
+
+ @staticmethod
+ def _random_vpd(l=16):
+ """
+ Generate a random 16 digit number as hex
+ """
+ vpd = []
+ for i in range(0, l):
+ vpd.append(str('%02X' % (random.randint(0, 255))))
+ return "".join(vpd)
+
+ def _pool_total_space(self, pool_id):
+ """
+ Find out the correct size of RAID pool
+ """
+ member_type = self.pool_dict[pool_id]['member_type']
+ if member_type == Pool.MEMBER_TYPE_POOL:
+ return self.pool_dict[pool_id]['total_space']
+
+ all_size = 0
+ item_size = 0 # disk size, used by RAID 3/4/5/6
+ member_ids = self.pool_dict[pool_id]['member_ids']
+ raid_type = self.pool_dict[pool_id]['raid_type']
+ member_count = len(member_ids)
+
+ if member_type == Pool.MEMBER_TYPE_DISK:
+ for member_id in member_ids:
+ all_size += self.disk_dict[member_id]['total_space']
+ item_size = self.disk_dict[member_id]['total_space']
+
+ elif member_type == Pool.MEMBER_TYPE_VOLUME:
+ for member_id in member_ids:
+ all_size += self.vol_dict[member_id]['total_space']
+ item_size = self.vol_dict[member_id]['total_space']
+
+ if raid_type == Pool.RAID_TYPE_JBOD:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID0:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID1 or \
+ raid_type == Pool.RAID_TYPE_RAID10:
+ return int(all_size/2)
+ elif raid_type == Pool.RAID_TYPE_RAID3 or \
+ raid_type == Pool.RAID_TYPE_RAID4 or \
+ raid_type == Pool.RAID_TYPE_RAID5 or \
+ raid_type == Pool.RAID_TYPE_RAID50:
+ return int(all_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID6 or \
+ raid_type == Pool.RAID_TYPE_RAID60:
+ return int(all_size - item_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID51:
+ return int((all_size - item_size)/2)
+ elif raid_type == Pool.RAID_TYPE_RAID61:
+ return int((all_size - item_size - item_size)/2)
+ return 0
+
+ @staticmethod
+ def _block_rounding(size_bytes):
+ return (size_bytes / SimData.SIM_DATA_BLK_SIZE + 1) * \
+ SimData.SIM_DATA_BLK_SIZE
+
+ def job_create(self, returned_item):
+ if True:
+ #if random.randint(0,5) == 1:
+ self.job_num += 1
+ job_id = "JOB_%s" % self.job_num
+ self.job_dict[job_id] = SimJob(returned_item)
+ return job_id, None
+ else:
+ return None, returned_item
+
+ def job_status(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ return self.job_dict[job_id].progress()
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def job_free(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ del(self.job_dict[job_id])
+ return
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def set_time_out(self, ms, flags=0):
+ self.tmo = ms
+ return None
+
+ def get_time_out(self, flags=0):
+ return self.tmo
+
+ def systems(self):
+ return self.syss
+
+ def pools(self):
+ rc = []
+ for sim_pool in self.pool_dict.values():
+ sim_pool['total_space'] = \
+ self._pool_total_space(sim_pool['pool_id'])
+ sim_pool['free_space'] = \
+ self._pool_free_space(sim_pool['pool_id'])
+ rc.extend([sim_pool])
+ return rc
+
+ def volumes(self):
+ return self.vol_dict.values()
+
+ def disks(self):
+ return self.disk_dict.values()
+
+ def access_group_list(self):
+ return self.ag_dict.values()
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['thinp'] = thinp
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+ return sim_vol
+
+ def volume_delete(self, vol_id, flags=0):
+ if vol_id in self.vol_dict.keys():
+ del(self.vol_dict[vol_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if vol_id in self.vol_dict.keys():
+ pool_id = self.vol_dict[vol_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.vol_dict[vol_id]['total_space'] = new_size_bytes
+ self.vol_dict[vol_id]['consume_size'] = new_size_bytes
+ return self.vol_dict[vol_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+ size_bytes = self.vol_dict[src_vol_id]['total_space']
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(dst_pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = new_vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = dst_pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+
+ dst_vol_id = vol_id
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ sim_rep = {
+ 'rep_type': rep_type,
+ 'src_start_blk': 0,
+ 'dst_start_blk': 0,
+ 'blk_count': size_bytes,
+ }
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_rep])
+
+ return sim_vol
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return SimData.SIM_DATA_BLK_SIZE
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % src_vol_id)
+
+ if dst_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % dst_vol_id)
+
+ sim_reps = []
+ for rep_range in ranges:
+ sim_rep = dict()
+ sim_rep['rep_type'] = rep_type
+ sim_rep['src_start_blk'] = rep_range.src_block
+ sim_rep['dst_start_blk'] = rep_range.dest_block
+ sim_rep['blk_count'] = rep_range.block_count
+ sim_reps.extend([sim_rep])
+
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_reps])
+
+ return None
+
+ def volume_online(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_offline(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ """
+ If volume is a src or dst of a replication, we return True.
+ """
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ return True
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ return True
+ return False
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ del self.vol_dict[vol_id]['replicate']
+
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ del sim_vol['replicate'][vol_id]
+ return None
+
+ def ags(self, flags=0):
+ return self.ag_dict.values()
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = dict()
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+
+ sim_ag['init_ids'] = [init_id]
+ sim_ag['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_ag['name'] = name
+ sim_ag['ag_id'] = "AG_ID_%s" % SimData._random_vpd(4)
+ self.ag_dict[sim_ag['ag_id']] = sim_ag
+ return sim_ag
+
+ def access_group_del(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ del(self.ag_dict[ag_id])
+ return None
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ return self.ag_dict[ag_id]
+
+ self.ag_dict[ag_id]['init_ids'].extend([init_id])
+
+ return None
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if init_id not in self.init_dict.keys():
+ return None
+
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ new_init_ids = []
+ for cur_init_id in self.ag_dict[ag_id]['init_ids']:
+ if cur_init_id != init_id:
+ new_init_ids.extend([cur_init_id])
+ del(self.ag_dict[ag_id]['init_ids'])
+ self.ag_dict[ag_id]['init_ids'] = new_init_ids
+ return None
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask'] = dict()
+
+ self.vol_dict[vol_id]['mask'][ag_id] = access
+ return None
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ return None
+
+ if ag_id not in self.vol_dict[vol_id]['mask'].keys():
+ return None
+
+ del(self.vol_dict[vol_id]['mask'][ag_id])
+ return None
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ rc = []
+ for sim_vol in self.vol_dict.values():
+ if 'mask' not in sim_vol:
+ continue
+ if ag_id in sim_vol['mask'].keys():
+ rc.extend([sim_vol])
+ return rc
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ sim_ags = []
+ if 'mask' in self.vol_dict[vol_id].keys():
+ ag_ids = self.vol_dict[vol_id]['mask'].keys()
+ for ag_id in ag_ids:
+ sim_ags.extend([self.ag_dict[ag_id]])
+ return sim_ags
+
+ def inits(self, flags=0):
+ return self.init_dict.values()
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if 'mask_init' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask_init'] = dict()
+
+ self.vol_dict[vol_id]['mask_init'][init_id] = access
+ return None
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ if init_id in self.vol_dict[vol_id]['mask_init'].keys():
+ del self.vol_dict[vol_id]['mask_init'][init_id]
+
+ return None
+
+ def _ag_ids_of_init(self, init_id):
+ """
+ Find out the access groups defined initiator belong to.
+ Will return a list of access group id or []
+ """
+ rc = []
+ for sim_ag in self.ag_dict.values():
+ if init_id in sim_ag['init_ids']:
+ rc.extend([sim_ag['ag_id']])
+ return rc
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ rc_dedup_dict = dict()
+ ag_ids = self._ag_ids_of_init(init_id)
+ for ag_id in ag_ids:
+ sim_vols = self.volumes_accessible_by_access_group(ag_id)
+ for sim_vol in sim_vols:
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+
+ for sim_vol in self.vol_dict.values():
+ if 'mask_init' in sim_vol:
+ if init_id in sim_vol['mask_init'].keys():
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+ return rc_dedup_dict.values()
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ rc_dedup_dict = dict()
+ sim_ags = self.access_groups_granted_to_volume(vol_id, flags)
+ for sim_ag in sim_ags:
+ for init_id in sim_ag['init_ids']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ for init_id in self.vol_dict[vol_id]['mask_init']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ return rc_dedup_dict.values()
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ if self.init_dict[init_id]['init_type'] != Initiator.TYPE_ISCSI:
+ raise LsmError(ErrorNumber.UNSUPPORTED_INITIATOR_TYPE,
+ "Initiator %s is not an iSCSI IQN" % init_id)
+ # No iscsi chap query API yet
+ return None
+
+ def fs(self):
+ return self.fs_dict.values()
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_fs = dict()
+ fs_id = "FS_ID_%s" % SimData._random_vpd(4)
+ sim_fs['fs_id'] = fs_id
+ sim_fs['name'] = fs_name
+ sim_fs['total_space'] = size_bytes
+ sim_fs['free_space'] = size_bytes
+ sim_fs['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_fs['pool_id'] = pool_id
+ sim_fs['consume_size'] = size_bytes
+ self.fs_dict[fs_id] = sim_fs
+ return sim_fs
+
+ def fs_delete(self, fs_id, flags=0):
+ if fs_id in self.fs_dict.keys():
+ del(self.fs_dict[fs_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_FS,
+ "No such File System: %s" % fs_id)
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if fs_id in self.fs_dict.keys():
+ pool_id = self.fs_dict[fs_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.fs_dict[fs_id]['total_space'] = new_size_bytes
+ self.fs_dict[fs_id]['free_space'] = new_size_bytes
+ self.fs_dict[fs_id]['consume_size'] = new_size_bytes
+ return self.fs_dict[fs_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such File System: %s" % fs_id)
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ if src_fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % src_fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ src_sim_fs = self.fs_dict[src_fs_id]
+ dst_sim_fs = self.fs_create(
+ src_sim_fs['pool_id'], dst_fs_name, src_sim_fs['total_space'], 0)
+ if 'clone' not in src_sim_fs.keys():
+ src_sim_fs['clone'] = dict()
+ src_sim_fs['clone'][dst_sim_fs['fs_id']] = {
+ 'snap_id': snap_id,
+ }
+ return dst_sim_fs
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # TODO: No file clone query API yet, no need to do anything internally
+ return None
+
+ def fs_snapshots(self, fs_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ rc = []
+ if 'snaps' in self.fs_dict[fs_id].keys():
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ rc.extend([self.snap_dict[snap_id]])
+ return rc
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ self.fs_dict[fs_id]['snaps'] = []
+
+ snap_id = "SNAP_ID_%s" % SimData._random_vpd(4)
+ sim_snap = dict()
+ sim_snap['snap_id'] = snap_id
+ sim_snap['name'] = snap_name
+ if files is None:
+ sim_snap['files'] = []
+ else:
+ sim_snap['files'] = files
+ sim_snap['timestamp'] = time.time()
+ self.snap_dict[snap_id] = sim_snap
+ self.fs_dict[fs_id]['snaps'].extend([snap_id])
+ return sim_snap
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ del self.snap_dict[snap_id]
+ new_snap_ids = []
+ for old_snap_id in self.fs_dict[fs_id]['snaps']:
+ if old_snap_id != snap_id:
+ new_snap_ids.extend([old_snap_id])
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # Nothing need to done internally for revert.
+ return None
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return False
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ return True
+ else:
+ for req_file in files:
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # We are snapshoting all files
+ return True
+ if req_file in self.snap_dict[snap_id]['files']:
+ return True
+ return False
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return None
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ snap_ids = self.fs_dict[fs_id]['snaps']
+ for snap_id in snap_ids:
+ del self.snap_dict[snap_id]
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ for req_file in files:
+ snap_ids_to_rm = []
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # BUG: if certain snapshot is againsting all files,
+ # what should we do if user request remove
+ # dependency on certain files.
+ # Currently, we do nothing
+ return None
+ if req_file in self.snap_dict[snap_id]['files']:
+ new_files = []
+ for old_file in self.snap_dict[snap_id]['files']:
+ if old_file != req_file:
+ new_files.extend([old_file])
+ if len(new_files) == 0:
+ # all files has been removed from snapshot list.
+ snap_ids_to_rm.extend([snap_id])
+ else:
+ self.snap_dict[snap_id]['files'] = new_files
+ for snap_id in snap_ids_to_rm:
+ del self.snap_dict[snap_id]
+
+ new_snap_ids = []
+ for cur_snap_id in self.fs_dict[fs_id]['snaps']:
+ if cur_snap_id not in snap_ids_to_rm:
+ new_snap_ids.extend([cur_snap_id])
+ if len(new_snap_ids) == 0:
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def exports(self, flags=0):
+ return self.exp_dict.values()
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ sim_exp = dict()
+ sim_exp['exp_id'] = "EXP_ID_%s" % SimData._random_vpd(4)
+ sim_exp['fs_id'] = fs_id
+ if exp_path is None:
+ sim_exp['exp_path'] = "/%s" % sim_exp['exp_id']
+ else:
+ sim_exp['exp_path'] = exp_path
+ sim_exp['auth_type'] = auth_type
+ sim_exp['root_hosts'] = root_hosts
+ sim_exp['rw_hosts'] = rw_hosts
+ sim_exp['ro_hosts'] = ro_hosts
+ sim_exp['anon_uid'] = anon_uid
+ sim_exp['anon_gid'] = anon_gid
+ sim_exp['options'] = options
+ self.exp_dict[sim_exp['exp_id']] = sim_exp
+ return sim_exp
+
+ def fs_unexport(self, exp_id, flags=0):
+ if exp_id not in self.exp_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_NFS,
+ "No such NFS Export: %s" % exp_id)
+ del self.exp_dict[exp_id]
+ return None
+
+ def pool_create(self,
+ system_id,
+ pool_name='',
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ member_type=Pool.MEMBER_TYPE_UNKNOWN,
+ member_ids=None,
+ member_count=0,
+ size_bytes=0,
+ thinp_type=Pool.THINP_TYPE_UNKNOWN,
+ flags=0):
+ if pool_name == '':
+ pool_name = 'POOL %s' % SimData._random_vpd(4)
+
+ ## Coding
+ return
diff --git a/lsm/lsm/simulator.py b/lsm/lsm/simulator.py
index ca9d3c4..9a7a75c 100644
--- a/lsm/lsm/simulator.py
+++ b/lsm/lsm/simulator.py
@@ -26,270 +26,17 @@ from data import Pool, Initiator, Volume, BlockRange, System, AccessGroup, \
Snapshot, NfsExport, FileSystem, Capabilities, Disk, OptionalData
from iplugin import INfs, IStorageAreaNetwork
from version import VERSION
+from simarray import SimArray, SimJob

-SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
- tempfile.gettempdir() + '/lsm_sim_data')
-duration = os.getenv("LSM_SIM_TIME", 1)
-
-# Bump this when the sim data layout changes on disk
-SIM_DATA_VERSION = 1
-
-
-class SimJob(object):
- """
- Simulates a longer running job, uses actual wall time. If test cases
- take too long we can reduce time by shortening time duration.
- """
-
- def __calc_progress(self):
- if self.percent < 100:
- end = self.start + self.duration
- now = time.time()
- if now >= end:
- self.percent = 100
- self.status = JobStatus.COMPLETE
- else:
- diff = now - self.start
- self.percent = int(100 * (diff / self.duration))
-
- def __init__(self, item_to_return):
- self.status = JobStatus.INPROGRESS
- self.percent = 0
- self.__item = item_to_return
- self.start = time.time()
- self.duration = float(random.randint(0, int(duration)))
-
- def progress(self):
- """
- Returns a tuple (status, percent, volume)
- """
- self.__calc_progress()
- return self.status, self.percent, self.item
-
- @property
- def item(self):
- if self.percent >= 100:
- return self.__item
- return None
-
- @item.setter
- def item(self, value):
- self.__item = value
-
-
-def _signature(obj):
- """
- Generate some kind of signature for this object, not sure this is ideal.
-
- Hopefully this will save some debug time.
- """
- sig = ''
- keys = obj.__dict__.keys()
- keys.sort()
-
- for k in keys:
- sig = md5(sig + k)
- return sig
-
-
-def _state_signature():
- rc = ''
- objects = [Pool('', '', 0, 0, ''), Volume('', '', '', 1, 1, 0, '', ''),
- AccessGroup('', '', ['']), Initiator('', 0, ''),
- System('', '', 0), FileSystem('', '', 0, 0, '', ''),
- BlockRange(0, 100, 50), Capabilities(),
- NfsExport('', '', '', '', '', '', '', '', '', '', ),
- Snapshot('', '', 10)]
-
- for o in objects:
- rc = md5(rc + _signature(o))
-
- return rc
-
-
-class SimState(object):
- def __init__(self):
- self.version = SIM_DATA_VERSION
- self.sys_info = System('sim-01', 'LSM simulated storage plug-in',
- System.STATUS_OK)
- p1 = Pool('POO1', 'Pool 1', 2 ** 64, 2 ** 64, self.sys_info.id)
- p2 = Pool('POO2', 'Pool 2', 2 ** 64, 2 ** 64, self.sys_info.id)
- p3 = Pool('POO3', 'Pool 3', 2 ** 64, 2 ** 64, self.sys_info.id)
- p4 = Pool('POO4', 'lsm_test_aggr', 2 ** 64, 2 ** 64, self.sys_info.id)
-
- self.block_size = 512
-
- pm1 = {'pool': p1, 'volumes': {}}
- pm2 = {'pool': p2, 'volumes': {}}
- pm3 = {'pool': p3, 'volumes': {}}
- pm4 = {'pool': p4, 'volumes': {}}
-
- self.pools = {p1.id: pm1, p2.id: pm2, p3.id: pm3, p4.id: pm4}
- self.volumes = {}
- self.vol_num = 1
- self.access_groups = {}
-
- self.fs = {}
- self.fs_num = 1
-
- self.tmo = 30000
- self.jobs = {}
- self.job_num = 1
-
- #These express relationships between initiators and volumes. This
- #is done because if you delete either part of the relationship
- #you need to delete the association between them. Holding this stuff
- #in a db would be easier :-)
- self.group_grants = {} # {access group id : {volume id: access }}
-
- #Create a signature
- self.signature = _state_signature()
-
-
-class StorageSimulator(INfs, IStorageAreaNetwork):
+class SimPlugin(INfs, IStorageAreaNetwork):
"""
Simple class that implements enough to allow the framework to be exercised.
"""
-
- @staticmethod
- def __random_vpd(l=16):
- """
- Generate a random 16 digit number as hex
- """
- vpd = []
- for i in range(0, l):
- vpd.append(str('%02X' % (random.randint(0, 255))))
- return "".join(vpd)
-
- def __block_rounding(self, size_bytes):
- """
- Round the requested size to block size.
- """
- return (size_bytes / self.s.block_size) * self.s.block_size
-
- def __create_job(self, returned_item):
- if True:
- #if random.randint(0,5) == 1:
- self.s.job_num += 1
- job = "JOB_" + str(self.s.job_num)
- self.s.jobs[job] = SimJob(returned_item)
- return job, None
- else:
- return None, returned_item
-
- def _version_error(self):
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Stored simulator state incompatible with "
- "simulator, please move or delete %s" %
- self.file)
-
- def _load(self):
- tmp = None
- if os.path.exists(self.file):
- with open(self.file, 'rb') as f:
- tmp = pickle.load(f)
-
- # Going forward we could get smarter about handling this for
- # changes that aren't invasive, but we at least need to check
- # to make sure that the data will work and not cause any
- # undo confusion.
- try:
- if tmp.version != SIM_DATA_VERSION or \
- tmp.signature != _state_signature():
- self._version_error()
- except AttributeError:
- self._version_error()
-
- return tmp
-
- def _save(self):
- f = open(self.file, 'wb')
- pickle.dump(self.s, f)
- f.close()
-
- #If we run via the daemon the file will be owned by libstoragemgmt
- #and if we run sim_lsmplugin stand alone we will be unable to
- #change the permissions.
- try:
- os.chmod(self.file, 0666)
- except OSError:
- pass
-
- def _load_state(self):
- prev = self._load()
- if prev:
- return prev
- return SimState()
-
- @staticmethod
- def _check_sl(string_list):
- """
- String list should be an empty list or a list with items
- """
- if string_list is not None and isinstance(string_list, list):
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_SL, 'Invalid string list')
-
def __init__(self):
-
- self.file = SIM_DATA_FILE
- self.s = self._load_state()
self.uri = None
self.password = None
self.tmo = 0

- def _allocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
-
- rounded_size = self.__block_rounding(size_bytes)
-
- if p.free_space >= rounded_size:
- p.free_space -= rounded_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- return rounded_size
-
- def _deallocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
- p.free_space += size_bytes
-
- @staticmethod
- def _ag_id(name):
- return md5(name)
-
- def _new_access_group(self, name, h):
- return AccessGroup(StorageSimulator._ag_id(name), name,
- [i.id for i in h['initiators']], self.s.sys_info.id)
-
- def _create_vol(self, pool, name, size_bytes):
- actual_size = self._allocate_from_pool(pool.id, size_bytes)
-
- nv = Volume('Vol' + str(self.s.vol_num), name,
- StorageSimulator.__random_vpd(), self.s.block_size,
- (actual_size / self.s.block_size), Volume.STATUS_OK,
- self.s.sys_info.id,
- pool.id)
- self.s.volumes[nv.id] = {'pool': pool, 'volume': nv}
- self.s.vol_num += 1
- return self.__create_job(nv)
-
- def _create_fs(self, pool, name, size_bytes):
- if pool.id in self.s.pools:
- p = self.s.pools[pool.id]['pool']
- actual_size = self._allocate_from_pool(p.id, size_bytes)
-
- new_fs = FileSystem('FS' + str(self.s.fs_num), name, actual_size,
- actual_size, p.id, self.s.sys_info.id)
-
- self.s.fs[new_fs.id] = {'pool': p, 'fs': new_fs, 'ss': {},
- 'exports': {}}
- self.s.fs_num += 1
- return self.__create_job(new_fs)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Pool not found')
-
def startup(self, uri, password, timeout, flags=0):
self.uri = uri
self.password = password
@@ -299,17 +46,38 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
qp = uri_parse(uri)
if 'parameters' in qp and 'statefile' in qp['parameters'] \
and qp['parameters']['statefile'] is not None:
- self.file = qp['parameters']['statefile']
- self._load_state()
+ self.sim_array = SimArray(qp['parameters']['statefile'])
+ else:
+ self.sim_array = SimArray()

return None

+ def shutdown(self, flags=0):
+ self.sim_array.save_state()
+
+ def job_status(self, job_id, flags=0):
+ return self.sim_array.job_status(job_id, flags)
+
+ def job_free(self, job_id, flags=0):
+ return self.sim_array.job_free(job_id, flags)
+
+ @staticmethod
+ def _sim_data_2_lsm(sim_data):
+ """
+ Fake converter. SimArray already do SimData to LSM data convert.
+ We move data convert to SimArray to make this sample plugin looks
+ clean.
+ But in real world, data converting is often handled by plugin itself
+ rather than array.
+ """
+ return sim_data
+
def set_time_out(self, ms, flags=0):
- self.tmo = ms
+ self.sim_array.set_time_out(ms, flags)
return None

def get_time_out(self, flags=0):
- return self.tmo
+ return self.sim_array.get_time_out(flags)

def capabilities(self, system, flags=0):
rc = Capabilities()
@@ -319,544 +87,204 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
def plugin_info(self, flags=0):
return "Storage simulator", VERSION

- def shutdown(self, flags=0):
- self._save()
-
def systems(self, flags=0):
- return [self.s.sys_info]
-
- def job_status(self, job_id, flags=0):
- if job_id in self.s.jobs:
- return self.s.jobs[job_id].progress()
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def job_free(self, job_id, flags=0):
- if job_id in self.s.jobs:
- del self.s.jobs[job_id]
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def volumes(self, flags=0):
- return [e['volume'] for e in self.s.volumes.itervalues()]
-
- def _get_volume(self, volume_id):
- for v in self.s.volumes.itervalues():
- if v['volume'].id == volume_id:
- return v['volume']
- return None
+ sim_syss = self.sim_array.systems()
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_syss]

def pools(self, flags=0):
- return [e['pool'] for e in self.s.pools.itervalues()]
-
- def _volume_accessible(self, access_group_id, volume):
+ sim_pools = self.sim_array.pools()
+ return [SimPlugin._sim_data_2_lsm(p) for p in sim_pools]

- if access_group_id in self.s.group_grants:
- ag = self.s.group_grants[access_group_id]
-
- if volume.id in ag:
- return True
-
- return False
-
- def _initiators(self, volume_filter=None):
- rc = []
- if len(self.s.access_groups):
- for k, v in self.s.access_groups.items():
- if volume_filter:
- ag = self._new_access_group(k, v)
- if self._volume_accessible(ag.id, volume_filter):
- rc.extend(v['initiators'])
- else:
- rc.extend(v['initiators'])
-
- #We can have multiples as the same initiator can be in multiple access
- #groups
- remove_dupes = {}
- for x in rc:
- remove_dupes[x.id] = x
-
- return list(remove_dupes.values())
+ def volumes(self, flags=0):
+ sim_vols = self.sim_array.volumes()
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- def initiators(self, flags=0):
- return self._initiators()
+ def disks(self, flags=0):
+ sim_disks = self.sim_array.disks()
+ return [SimPlugin._sim_data_2_lsm(d) for d in sim_disks]

def volume_create(self, pool, volume_name, size_bytes, provisioning,
flags=0):
- assert provisioning is not None
- return self._create_vol(pool, volume_name, size_bytes)
+ sim_vol = self.sim_array.volume_create(
+ pool.id, volume_name, size_bytes, provisioning, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)

def volume_delete(self, volume, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
- self._deallocate_from_pool(p.id, v.size_bytes)
- del self.s.volumes[volume.id]
-
- for (k, v) in self.s.group_grants.items():
- if volume.id in v:
- del self.s.group_grants[k][volume.id]
-
- #We only return null or job id.
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
+ return self.sim_array.volume_delete(volume.id, flags)

- def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
- assert rep_type is not None
+ def volume_resize(self, volume, new_size_bytes, flags=0):
+ sim_vol = self.sim_array.volume_resize(
+ volume.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)

- p_id = None
+ def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
+ dst_pool_id = None

if pool is not None:
- p_id = pool.id
- else:
- p_id = volume_src.pool_id
-
- if p_id in self.s.pools and volume_src.id in self.s.volumes:
- p = self.s.pools[p_id]['pool']
- v = self.s.volumes[volume_src.id]['volume']
-
- return self._create_vol(p, name, v.size_bytes)
+ dst_pool_id = pool.id
else:
- if pool.id not in self.s.pools:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Incorrect pool')
-
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- 'Volume not present')
- return None
+ dst_pool_id = volume_src.pool_id
+ return self.sim_array.volume_replicate(
+ dst_pool_id, rep_type, volume_src.id, name, flags)

def volume_replicate_range_block_size(self, system, flags=0):
- return self.s.block_size
+ return self.sim_array.volume_replicate_range_block_size(
+ system.id, flags)

def volume_replicate_range(self, rep_type, volume_src, volume_dest,
ranges, flags=0):
-
- if rep_type not in (Volume.REPLICATE_SNAPSHOT,
- Volume.REPLICATE_CLONE,
- Volume.REPLICATE_COPY,
- Volume.REPLICATE_MIRROR_ASYNC,
- Volume.REPLICATE_MIRROR_SYNC):
- raise LsmError(ErrorNumber.UNSUPPORTED_REPLICATION_TYPE,
- "Rep_type invalid")
-
- if ranges:
- if isinstance(ranges, list):
- for r in ranges:
- if isinstance(r, BlockRange):
- #We could do some overlap range testing etc. here.
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Range element not BlockRange")
-
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Ranges not a list")
-
- #Make sure all the arguments are validated
- if volume_src.id in self.s.volumes \
- and volume_dest.id in self.s.volumes:
- return None
- else:
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_src not found")
- if volume_dest.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_dest not found")
+ return self.sim_array.volume_replicate_range(
+ rep_type, volume_src.id, volume_dest.id, ranges, flags)

def volume_online(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
+ return self.sim_array.volume_online(volume.id, flags)

def volume_offline(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
-
- def volume_resize(self, volume, new_size_bytes, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
-
- current_size = v.size_bytes
- new_size = self.__block_rounding(new_size_bytes)
-
- if new_size == current_size:
- raise LsmError(ErrorNumber.SIZE_SAME,
- 'Volume same size')
-
- if new_size < current_size \
- or p.free_space >= (new_size - current_size):
- p.free_space -= (new_size - current_size)
- v.num_of_blocks = new_size / self.s.block_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- return self.__create_job(v)
-
- def access_group_grant(self, group, volume, access, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id not in self.s.group_grants:
- self.s.group_grants[group.id] = {volume.id: access}
- elif volume.id not in self.s.group_grants[group.id]:
- self.s.group_grants[group.id][volume.id] = access
- else:
- raise LsmError(ErrorNumber.IS_MAPPED, 'Existing access present')
-
- def access_group_revoke(self, group, volume, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id in self.s.group_grants \
- and volume.id in self.s.group_grants[group.id]:
- del self.s.group_grants[group.id][volume.id]
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- 'No volume access to revoke')
+ return self.sim_array.volume_online(volume.id, flags)

def access_group_list(self, flags=0):
- rc = []
- for (k, v) in self.s.access_groups.items():
- rc.append(self._new_access_group(k, v))
- return rc
-
- def _get_access_group(self, ag_id):
- groups = self.access_group_list()
- for g in groups:
- if g.id == ag_id:
- return g
- return None
+ sim_ags = self.sim_array.ags()
+ return [SimPlugin._sim_data_2_lsm(a) for a in sim_ags]

def access_group_create(self, name, initiator_id, id_type, system_id,
flags=0):
- if name not in self.s.access_groups:
- self.s.access_groups[name] = \
- {'initiators': [Initiator(initiator_id, id_type, 'UNA')],
- 'access': {}}
- return self._new_access_group(name, self.s.access_groups[name])
- else:
- raise LsmError(ErrorNumber.EXISTS_ACCESS_GROUP,
- "Access group with name exists")
+ sim_ag = self.sim_array.access_group_create(name, initiator_id,
+ id_type, system_id, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)

def access_group_del(self, group, flags=0):
- if group.name in self.s.access_groups:
- del self.s.access_groups[group.name]
-
- if group.id in self.s.group_grants:
- del self.s.group_grants[group.id]
-
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del(group.id, flags)

def access_group_add_initiator(self, group, initiator_id, id_type,
flags=0):
- if group.name in self.s.access_groups:
- self.s.access_groups[group.name]['initiators']. \
- append(Initiator(initiator_id, id_type, 'UNA'))
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ sim_ag = self.sim_array.access_group_add_initiator(
+ group.id, initiator_id, id_type, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)

def access_group_del_initiator(self, group, initiator_id, flags=0):
- if group.name in self.s.access_groups:
- for i in self.s.access_groups[group.name]['initiators']:
- if i.id == initiator_id:
- self.s.access_groups[group.name]['initiators']. \
- remove(i)
- return None
-
- raise LsmError(ErrorNumber.INITIATOR_NOT_IN_ACCESS_GROUP,
- "Initiator not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del_initiator(
+ group.id, initiator_id, flags)

- def volumes_accessible_by_access_group(self, group, flags=0):
- rc = []
- if group.name in self.s.access_groups:
- if group.id in self.s.group_grants:
- for (k, v) in self.s.group_grants[group.id].items():
- rc.append(self._get_volume(k))
+ def access_group_grant(self, group, volume, access, flags=0):
+ return self.sim_array.access_group_grant(
+ group.id, volume.id, access, flags)

- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ def access_group_revoke(self, group, volume, flags=0):
+ return self.sim_array.access_group_revoke(
+ group.id, volume.id, flags)

- def access_groups_granted_to_volume(self, volume, flags=0):
- rc = []
+ def volumes_accessible_by_access_group(self, group, flags=0):
+ sim_vols = self.sim_array.volumes_accessible_by_access_group(
+ group.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- for (k, v) in self.s.group_grants.items():
- if volume.id in self.s.group_grants[k]:
- rc.append(self._get_access_group(k))
- return rc
+ def access_groups_granted_to_volume(self, volume, flags=0):
+ sim_vols = self.sim_array.access_groups_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

- def iscsi_chap_auth(self, initiator, in_user, in_password, out_user,
- out_password, flags=0):
- if initiator is None:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- 'Initiator is required')
+ def initiators(self, flags=0):
+ return self.sim_array.inits(flags)

def initiator_grant(self, initiator_id, initiator_type, volume, access,
flags=0):
- name = initiator_id + volume.id
- group = None
-
- try:
- group = self.access_group_create(name, initiator_id,
- initiator_type,
- volume.system_id)
- result = self.access_group_grant(group, volume, access)
-
- except Exception as e:
- if group:
- self.access_group_del(group)
- raise e
-
- return result
+ return self.sim_array.initiator_grant(
+ initiator_id, initiator_type, volume.id, access, flags)

def initiator_revoke(self, initiator, volume, flags=0):
- name = initiator.id + volume.id
-
- if any(x.id for x in self.initiators()):
- if volume.id in self.s.volumes:
- ag = self._new_access_group(name, self.s.access_groups[name])
-
- if ag:
- self.access_group_del(ag)
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- "No mapping of initiator "
- "and volume")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "Volume not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_INITIATOR,
- "Initiator not found")
-
- return None
+ return self.sim_array.initiator_revoke(initiator.id, volume.id, flags)

def volumes_accessible_by_initiator(self, initiator, flags=0):
- rc = []
- volumes = {}
-
- #Go through each access group, for each one see if our initiator
- #is one of them.
- for ag_name, ag_info in self.s.access_groups.items():
- # Check to see if the initiator is in the group.
- if initiator.id in [i.id for i in ag_info['initiators']]:
- # Look up the privileges for this group, if any
- ag_id = StorageSimulator._ag_id(ag_name)
- if ag_id in self.s.group_grants:
- # Loop through the volumes granted to this AG
- for volume_id in self.s.group_grants[ag_id].keys():
- volumes[volume_id] = None
-
- # We very well may have duplicates, thus the reason we enter the
- # volume id into the hash with no value, we are weeding out dupes
- for vol_id in volumes.keys():
- rc.append(self._get_volume(vol_id))
-
- return rc
+ sim_vols = self.sim_array.volumes_accessible_by_initiator(
+ initiator.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]

def initiators_granted_to_volume(self, volume, flags=0):
- return self._initiators(volume)
+ sim_inits = self.sim_array.initiators_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, initiator, in_user, in_password,
+ out_user, out_password, flags=0):
+ return self.sim_array.iscsi_chap_auth(
+ initiator.id, in_user, in_password, out_user, out_password, flags)

def volume_child_dependency(self, volume, flags=0):
- return False
+ return self.sim_array.volume_child_dependency(volume.id, flags)

def volume_child_dependency_rm(self, volume, flags=0):
- return None
+ return self.sim_array.volume_child_dependency_rm(volume.id, flags)

def fs(self, flags=0):
- return [e['fs'] for e in self.s.fs.itervalues()]
-
- def fs_delete(self, fs, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- self._deallocate_from_pool(p.id, f.total_space)
- del self.s.fs[fs.id]
+ sim_fss = self.sim_array.fs()
+ return [SimPlugin._sim_data_2_lsm(f) for f in sim_fss]

- #TODO: Check for exports and remove them.
+ def fs_create(self, pool, name, size_bytes, flags=0):
+ sim_fs = self.sim_array.fs_create(pool.id, name, size_bytes)
+ return SimPlugin._sim_data_2_lsm(sim_fs)

- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ def fs_delete(self, fs, flags=0):
+ return self.sim_array.fs_delete(fs.id, flags)

def fs_resize(self, fs, new_size_bytes, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- #TODO Check to make sure we have enough space before proceeding
- self._deallocate_from_pool(p.id, f.total_space)
- f.total_space = self._allocate_from_pool(p.id, new_size_bytes)
- f.free_space = f.total_space
- return self.__create_job(f)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def fs_create(self, pool, name, size_bytes, flags=0):
- return self._create_fs(pool, name, size_bytes)
+ sim_fs = self.sim_array.fs_resize(
+ fs.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_fs)

def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0):
- #TODO If snapshot is not None, then check for existence.
-
- if src_fs.id in self.s.fs:
- f = self.s.fs[src_fs.id]['fs']
- p = self.s.fs[src_fs.id]['pool']
- return self._create_fs(p, dest_fs_name, f.total_space)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, None, flags)
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, snapshot.id, flags)

def file_clone(self, fs, src_file_name, dest_file_name, snapshot=None,
flags=0):
- #TODO If snapshot is not None, then check for existence.
- if fs.id in self.s.fs:
- if src_file_name is not None and dest_file_name is not None:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Invalid src/destination file names")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, None, flags)
+
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, snapshot.id, flags)

def fs_snapshots(self, fs, flags=0):
- if fs.id in self.s.fs:
- rc = [e for e in self.s.fs[fs.id]['ss'].itervalues()]
- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_snaps = self.sim_array.fs_snapshots(fs.id, flags)
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_snaps]

def fs_snapshot_create(self, fs, snapshot_name, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- for e in self.s.fs[fs.id]['ss'].itervalues():
- if e.name == snapshot_name:
- raise LsmError(ErrorNumber.EXISTS_NAME,
- 'Snapshot name exists')
-
- s = Snapshot(md5(snapshot_name), snapshot_name, time.time())
- self.s.fs[fs.id]['ss'][s.id] = s
- return self.__create_job(s)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_create(
+ fs.id, snapshot_name, files, flags)

def fs_snapshot_delete(self, fs, snapshot, flags=0):
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- del self.s.fs[fs.id]['ss'][snapshot.id]
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_delete(
+ fs.id, snapshot.id, flags)

def fs_snapshot_revert(self, fs, snapshot, files, restore_files,
all_files=False, flags=0):
-
- StorageSimulator._check_sl(files)
- StorageSimulator._check_sl(files)
-
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_revert(
+ fs.id, snapshot.id, files, restore_files, all_files, flags)

def fs_child_dependency(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return False
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency(fs.id, files, flags)

def fs_child_dependency_rm(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency_rm(fs.id, files, flags)

def export_auth(self, flags=0):
+ # The API should change some day
return ["simple"]

def exports(self, flags=0):
- rc = []
- for fs in self.s.fs.itervalues():
- for exp in fs['exports'].values():
- rc.append(exp)
- return rc
+ sim_exps = self.sim_array.exports(flags)
+ return [SimPlugin._sim_data_2_lsm(e) for e in sim_exps]

def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list,
anon_uid, anon_gid, auth_type, options, flags=0):
-
- if fs_id in self.s.fs:
- if export_path is None:
- export_path = "/mnt/lsm/sim/%s" % self.s.fs[fs_id]['fs'].name
-
- export_id = md5(export_path)
-
- export = NfsExport(export_id, fs_id, export_path, auth_type,
- root_list, rw_list, ro_list, anon_uid, anon_gid,
- options)
-
- self.s.fs[fs_id]['exports'][export_id] = export
- return export
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_exp = self.sim_array.fs_export(
+ fs_id, export_path, root_list, rw_list, ro_list,
+ anon_uid, anon_gid, auth_type, options, flags=0)
+ return SimPlugin._sim_data_2_lsm(sim_exp)

def export_remove(self, export, flags=0):
- fs_id = export.fs_id
-
- if fs_id in self.s.fs:
- if export.id in self.s.fs[fs_id]['exports']:
- del self.s.fs[fs_id]['exports'][export.id]
- else:
- raise LsmError(ErrorNumber.FS_NOT_EXPORTED, "FS not exported")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def disks(self, flags=0):
+ return self.sim_array.fs_unexport(export.id, flags)

- rc = []
- # TODO Make these persistent and make it fit into the total model
-
- for i in range(0, 10):
- name = "Sim disk %d" % i
- optionals = None
-
- if flags == Disk.RETRIEVE_FULL_INFO:
- optionals = OptionalData()
- optionals.set('sn', self.__random_vpd(8))
-
- rc.append(Disk(md5(name), name, Disk.DISK_TYPE_HYBRID, 512,
- 1893933056, Disk.STATUS_OK,
- self.s.sys_info.id, optionals))
-
- return rc
diff --git a/lsm/sim_lsmplugin b/lsm/sim_lsmplugin
index a4439a5..d2bee09 100755
--- a/lsm/sim_lsmplugin
+++ b/lsm/sim_lsmplugin
@@ -22,10 +22,10 @@ import syslog

try:
from lsm.pluginrunner import PluginRunner
- from lsm.simulator import StorageSimulator
+ from lsm.simulator import SimPlugin

if __name__ == '__main__':
- PluginRunner(StorageSimulator, sys.argv).run()
+ PluginRunner(SimPlugin, sys.argv).run()
except Exception as e:
#This should be quite rare, but when it does happen this is pretty
#key in understanding what happened, especially when it happens when
--
1.8.3.1
Tony Asleson
2014-02-12 19:50:57 UTC
Permalink
Patch pushed!

Thanks,
Tony
Post by Gris Ge
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.
* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make check', 'make distcheck' and 'rpmbuild'.
* Fixed the complain message if old version state file found.
* Fixed the returns of initiator_grant() and etc.
* Fixed Makefile and rpm spec file for the new simarray.py files.
---
libstoragemgmt.spec.in | 1 +
lsm/Makefile.am | 1 +
lsm/lsm/__init__.py | 3 +-
lsm/lsm/simarray.py | 1309 ++++++++++++++++++++++++++++++++++++++++++++++++
lsm/lsm/simulator.py | 850 +++++--------------------------
lsm/sim_lsmplugin | 4 +-
6 files changed, 1454 insertions(+), 714 deletions(-)
create mode 100644 lsm/lsm/simarray.py
diff --git a/libstoragemgmt.spec.in b/libstoragemgmt.spec.in
index 6e64d8f..eacb3fd 100644
--- a/libstoragemgmt.spec.in
+++ b/libstoragemgmt.spec.in
@@ -246,6 +246,7 @@ fi
%{python_sitelib}/lsm/iplugin.*
%{python_sitelib}/lsm/pluginrunner.*
%{python_sitelib}/lsm/simulator.*
+%{python_sitelib}/lsm/simarray.*
%{python_sitelib}/lsm/transport.*
%{python_sitelib}/lsm/version.*
%{_bindir}/sim_lsmplugin
diff --git a/lsm/Makefile.am b/lsm/Makefile.am
index 3a405d5..f4dde10 100644
--- a/lsm/Makefile.am
+++ b/lsm/Makefile.am
@@ -24,6 +24,7 @@ lsm_PYTHON = lsm/__init__.py \
lsm/ontap.py \
lsm/pluginrunner.py \
lsm/simulator.py \
+ lsm/simarray.py \
lsm/smis.py \
lsm/smisproxy.py \
lsm/transport.py \
diff --git a/lsm/lsm/__init__.py b/lsm/lsm/__init__.py
index 3407382..71bb7e2 100644
--- a/lsm/lsm/__init__.py
+++ b/lsm/lsm/__init__.py
@@ -17,6 +17,7 @@ from data import DataEncoder, DataDecoder, IData, Initiator, Volume, Pool, \
from iplugin import IPlugin, IStorageAreaNetwork, INetworkAttachedStorage, INfs
from pluginrunner import PluginRunner
-from simulator import StorageSimulator, SimJob, SimState
+from simulator import SimPlugin
+from simarray import SimData, SimJob, SimArray
from transport import Transport
from version import VERSION
diff --git a/lsm/lsm/simarray.py b/lsm/lsm/simarray.py
new file mode 100644
index 0000000..8a19fd2
--- /dev/null
+++ b/lsm/lsm/simarray.py
@@ -0,0 +1,1309 @@
+# Copyright (C) 2011-2013 Red Hat, Inc.
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: tasleson
+
+# TODO: 1. Introduce constant check by using state_to_str() converting.
+# 2. Snapshot should consume space in pool.
+
+import random
+import pickle
+import tempfile
+import os
+import time
+
+from common import md5, LsmError, ErrorNumber, size_human_2_size_bytes, \
+ JobStatus
+from data import System, Volume, Disk, Pool, FileSystem, AccessGroup, \
+ Initiator, BlockRange, Snapshot, NfsExport
+
+ """
+ Simulates a longer running job, uses actual wall time. If test cases
+ take too long we can reduce time by shortening time duration.
+ """
+
+ end = self.start + self.duration
+ now = time.time()
+ self.percent = 100
+ self.status = JobStatus.COMPLETE
+ diff = now - self.start
+ self.percent = int(100 * (diff / self.duration))
+
+ duration = os.getenv("LSM_SIM_TIME", 1)
+ self.status = JobStatus.INPROGRESS
+ self.percent = 0
+ self.__item = item_to_return
+ self.start = time.time()
+ self.duration = float(random.randint(0, int(duration)))
+
+ """
+ Returns a tuple (status, percent, data)
+ """
+ self._calc_progress()
+ return self.status, self.percent, self.item
+
+ return self.__item
+ return None
+
+ self.__item = value
+
+
+ SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
+ tempfile.gettempdir() + '/lsm_sim_data')
+
+ raise LsmError(ErrorNumber.INVALID_ARGUMENT,
+ "Stored simulator state incompatible with "
+ "simulator, please move or delete %s" %
+ dump_file)
+
+ self.dump_file = SimArray.SIM_DATA_FILE
+ self.dump_file = dump_file
+
+ self.data = pickle.load(f)
+
+ # Going forward we could get smarter about handling this for
+ # changes that aren't invasive, but we at least need to check
+ # to make sure that the data will work and not cause any
+ # undo confusion.
+ if self.data.version != SimData.SIM_DATA_VERSION or \
+ SimArray._version_error(self.dump_file)
+ SimArray._version_error(self.dump_file)
+
+ self.data = SimData()
+
+ fh_dump_file = open(self.dump_file, 'wb')
+ pickle.dump(self.data, fh_dump_file)
+ fh_dump_file.close()
+
+ return self.data.job_status(job_id, flags=0)
+
+ return self.data.job_free(job_id, flags=0)
+
+ return self.data.set_time_out(ms, flags)
+
+ return self.data.get_time_out(flags)
+
+ return self.data.systems()
+
+ return Volume(sim_vol['vol_id'], sim_vol['name'], sim_vol['vpd83'],
+ SimData.SIM_DATA_BLK_SIZE,
+ int(sim_vol['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Volume.STATUS_OK, sim_vol['sys_id'],
+ sim_vol['pool_id'])
+
+ sim_vols = self.data.volumes()
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ rc = []
+ sim_pools = self.data.pools()
+ pool = Pool(sim_pool['pool_id'], sim_pool['name'],
+ sim_pool['total_space'], sim_pool['free_space'],
+ sim_pool['sys_id'])
+ rc.extend([pool])
+ return rc
+
+ rc = []
+ sim_disks = self.data.disks()
+ disk = Disk(sim_disk['disk_id'], sim_disk['name'],
+ sim_disk['disk_type'], SimData.SIM_DATA_BLK_SIZE,
+ int(sim_disk['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Disk.STATUS_OK, sim_disk['sys_id'])
+ rc.extend([disk])
+ return rc
+
+ sim_vol = self.data.volume_create(
+ pool_id, vol_name, size_bytes, thinp, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ self.data.volume_delete(vol_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ sim_vol = self.data.volume_resize(vol_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ sim_vol = self.data.volume_replicate(
+ dst_pool_id, rep_type, src_vol_id, new_vol_name, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ return self.data.volume_replicate_range_block_size(sys_id, flags)
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ return self.data.job_create(
+ self.data.volume_replicate_range(
+ rep_type, src_vol_id, dst_vol_id, ranges, flags))[0]
+
+ return self.data.volume_online(vol_id, flags)
+
+ return self.data.volume_offline(vol_id, flags)
+
+ return self.data.volume_child_dependency(vol_id, flags)
+
+ return self.data.job_create(
+ self.data.volume_child_dependency_rm(vol_id, flags))[0]
+
+ return FileSystem(sim_fs['fs_id'], sim_fs['name'],
+ sim_fs['total_space'], sim_fs['free_space'],
+ sim_fs['pool_id'], sim_fs['sys_id'])
+
+ sim_fss = self.data.fs()
+ return [SimArray._sim_fs_2_lsm(f) for f in sim_fss]
+
+ sim_fs = self.data.fs_create(pool_id, fs_name, size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ self.data.fs_delete(fs_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ sim_fs = self.data.fs_resize(fs_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ sim_fs = self.data.fs_clone(src_fs_id, dst_fs_name, snap_id, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ return self.data.job_create(
+ self.data.file_clone(
+ fs_id, src_fs_name, dst_fs_name, snap_id, flags))[0]
+
+ return Snapshot(sim_snap['snap_id'], sim_snap['name'],
+ sim_snap['timestamp'])
+
+ sim_snaps = self.data.fs_snapshots(fs_id, flags)
+ return [SimArray._sim_snap_2_lsm(s) for s in sim_snaps]
+
+ sim_snap = self.data.fs_snapshot_create(fs_id, snap_name, files,
+ flags)
+ return self.data.job_create(SimArray._sim_snap_2_lsm(sim_snap))
+
+ return self.data.job_create(
+ self.data.fs_snapshot_delete(fs_id, snap_id, flags))[0]
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ return self.data.job_create(
+ self.data.fs_snapshot_revert(
+ fs_id, snap_id, files, restore_files,
+ flag_all_files, flags))[0]
+
+ return self.data.fs_child_dependency(fs_id, files, flags)
+
+ return self.data.job_create(
+ self.data.fs_child_dependency_rm(fs_id, files, flags))[0]
+
+ return NfsExport(
+ sim_exp['exp_id'], sim_exp['fs_id'], sim_exp['exp_path'],
+ sim_exp['auth_type'], sim_exp['root_hosts'], sim_exp['rw_hosts'],
+ sim_exp['ro_hosts'], sim_exp['anon_uid'], sim_exp['anon_gid'],
+ sim_exp['options'])
+
+ sim_exps = self.data.exports(flags)
+ return [SimArray._sim_exp_2_lsm(e) for e in sim_exps]
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ sim_exp = self.data.fs_export(
+ fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags)
+ return SimArray._sim_exp_2_lsm(sim_exp)
+
+ return self.data.fs_unexport(exp_id, flags)
+
+ return AccessGroup(sim_ag['ag_id'], sim_ag['name'],
+ sim_ag['init_ids'], sim_ag['sys_id'])
+
+ sim_ags = self.data.ags()
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ sim_ag = self.data.access_group_create(
+ name, init_id, init_type, sys_id, flags)
+ return SimArray._sim_ag_2_lsm(sim_ag)
+
+ return self.data.access_group_del(ag_id, flags)
+
+ return self.data.access_group_add_initiator(
+ ag_id, init_id, init_type, flags)
+
+ return self.data.access_group_del_initiator(ag_id, init_id, flags)
+
+ return self.data.access_group_grant(ag_id, vol_id, access, flags)
+
+ return self.data.access_group_revoke(ag_id, vol_id, flags)
+
+ sim_vols = self.data.volumes_accessible_by_access_group(ag_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ sim_ags = self.data.access_groups_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ return Initiator(sim_init['init_id'], sim_init['init_type'],
+ sim_init['name'])
+
+ sim_inits = self.data.inits()
+ return [SimArray._sim_init_2_lsm(a) for a in sim_inits]
+
+ return self.data.initiator_grant(
+ init_id, init_type, vol_id, access, flags)
+
+ return self.data.initiator_revoke(init_id, vol_id, flags)
+
+ sim_vols = self.data.volumes_accessible_by_initiator(init_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ sim_inits = self.data.initiators_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_init_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ return self.data.iscsi_chap_auth(init_id, in_user, in_pass, out_user,
+ out_pass, flags)
+
+
+ """
+ * we don't store one data twice
+ * we don't srore data which could be caculated out
+
+ self.vol_dict = {
+ Volume.id = sim_vol,
+ }
+
+ sim_vol = {
+ 'vol_id': "VOL_ID_%s" % SimData._random_vpd(4),
+ 'vpd83': SimData._random_vpd(),
+ 'name': vol_name,
+ 'total_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': owner_pool_id,
+ 'consume_size': size_bytes,
+ 'replicate': {
+ dst_vol_id = [
+ {
+ 'src_start_blk': src_start_blk,
+ 'dst_start_blk': dst_start_blk,
+ 'blk_count': blk_count,
+ 'rep_type': Volume.REPLICATE_XXXX,
+ },
+ ],
+ },
+ 'mask': {
+ ag_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ },
+ 'mask_init': {
+ init_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ }
+ }
+
+ self.init_dict = {
+ Initiator.id = sim_init,
+ }
+ sim_init = {
+ 'init_id': Initiator.id,
+ 'init_type': Initiator.TYPE_XXXX,
+ 'name': SimData.SIM_DATA_INIT_NAME,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ self.ag_dict ={
+ AccessGroup.id = sim_ag,
+ }
+ sim_ag = {
+ 'init_ids': [init_id,],
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'name': name,
+ 'ag_id': "AG_ID_%s" % SimData._random_vpd(4),
+ }
+
+ self.fs_dict = {
+ FileSystem.id = sim_fs,
+ }
+ sim_fs = {
+ 'fs_id': "FS_ID_%s" % SimData._random_vpd(4),
+ 'name': fs_name,
+ 'total_space': size_bytes,
+ 'free_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': pool_id,
+ 'consume_size': size_bytes,
+ 'clone': {
+ dst_fs_id: {
+ 'snap_id': snap_id, # None if no snapshot
+ 'files': [ file_path, ] # [] if all files cloned.
+ },
+ },
+ 'snaps' = [snap_id, ],
+ }
+ self.snap_dict = {
+ Snapshot.id: sim_snap,
+ }
+ sim_snap = {
+ 'snap_id': "SNAP_ID_%s" % SimData._random_vpd(4),
+ 'name': snap_name,
+ 'fs_id': fs_id,
+ 'files': [file_path, ],
+ 'timestamp': time.time(),
+ }
+ self.exp_dict = {
+ Export.id: sim_exp,
+ }
+ sim_exp = {
+ 'exp_id': "EXP_ID_%s" % SimData._random_vpd(4),
+ 'fs_id': fs_id,
+ 'exp_path': exp_path,
+ 'auth_type': auth_type,
+ 'root_hosts': [root_host, ],
+ 'rw_hosts': [rw_host, ],
+ 'ro_hosts': [ro_host, ],
+ 'anon_uid': anon_uid,
+ 'anon_gid': anon_gid,
+ 'options': [option, ],
+ }
+ """
+ SIM_DATA_BLK_SIZE = 512
+ SIM_DATA_VERSION = "2.0"
+ SIM_DATA_SYS_ID = 'sim-01'
+ SIM_DATA_INIT_NAME = 'NULL'
+ SIM_DATA_TMO = 30000 # ms
+
+ return 'LSM_SIMULATOR_DATA_%s' % md5(SimData.SIM_DATA_VERSION)
+
+ self.tmo = SimData.SIM_DATA_TMO
+ self.version = SimData.SIM_DATA_VERSION
+ self.signature = SimData._state_signature()
+ self.job_num = 0
+ self.job_dict = {
+ # id: SimJob
+ }
+ self.syss = [System(SimData.SIM_DATA_SYS_ID,
+ 'LSM simulated storage plug-in',
+ System.STATUS_OK)]
+ pool_size_200g = size_human_2_size_bytes('200GiB')
+ self.pool_dict = {
+ 'POO1': {
+ 'pool_id': 'POO1',
+ 'name': 'Pool 1',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_000', 'DISK_ID_001'],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'POO2': {
+ 'pool_id': 'POO2',
+ 'name': 'Pool 2',
+ 'total_space': pool_size_200g,
+ 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_ids': ['POO1'],
+ 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ # lsm_test_aggr pool is requred by test/runtest.sh
+ 'lsm_test_aggr': {
+ 'pool_id': 'lsm_test_aggr',
+ 'name': 'lsm_test_aggr',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_002', 'DISK_ID_003'],
+ 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.vol_dict = {
+ }
+ self.fs_dict = {
+ }
+ self.snap_dict = {
+ }
+ self.exp_dict = {
+ }
+ disk_size_2t = size_human_2_size_bytes('2TiB')
+ self.disk_dict = {
+ 'DISK_ID_000': {
+ 'disk_id': 'DISK_ID_000',
+ 'name': 'SATA Disk 000',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_001': {
+ 'disk_id': 'DISK_ID_001',
+ 'name': 'SATA Disk 001',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_002': {
+ 'disk_id': 'DISK_ID_002',
+ 'name': 'SAS Disk 002',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_003': {
+ 'disk_id': 'DISK_ID_003',
+ 'name': 'SAS Disk 003',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.ag_dict = {
+ }
+ self.init_dict = {
+ }
+ # Create some volumes, fs and etc
+ self.volume_create(
+ 'POO1', 'Volume 000', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+ self.volume_create(
+ 'POO1', 'Volume 001', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+
+ self.pool_dict['POO3']= {
+ 'pool_id': 'POO3',
+ 'name': 'Pool 3',
+ 'member_type': Pool.MEMBER_TYPE_VOLUME,
+ 'member_ids': [
+ self.vol_dict.values()[0]['vol_id'],
+ self.vol_dict.values()[1]['vol_id'],
+ ],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ return
+
+ """
+ Calculate out the free size of certain pool.
+ """
+ free_space = self._pool_total_space(pool_id)
+ continue
+ return 0
+ free_space -= sim_vol['consume_size']
+ continue
+ return 0
+ free_space -= sim_fs['consume_size']
+ return free_space
+
+ """
+ Generate a random 16 digit number as hex
+ """
+ vpd = []
+ vpd.append(str('%02X' % (random.randint(0, 255))))
+ return "".join(vpd)
+
+ """
+ Find out the correct size of RAID pool
+ """
+ member_type = self.pool_dict[pool_id]['member_type']
+ return self.pool_dict[pool_id]['total_space']
+
+ all_size = 0
+ item_size = 0 # disk size, used by RAID 3/4/5/6
+ member_ids = self.pool_dict[pool_id]['member_ids']
+ raid_type = self.pool_dict[pool_id]['raid_type']
+ member_count = len(member_ids)
+
+ all_size += self.disk_dict[member_id]['total_space']
+ item_size = self.disk_dict[member_id]['total_space']
+
+ all_size += self.vol_dict[member_id]['total_space']
+ item_size = self.vol_dict[member_id]['total_space']
+
+ return int(all_size)
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID1 or \
+ return int(all_size/2)
+ elif raid_type == Pool.RAID_TYPE_RAID3 or \
+ raid_type == Pool.RAID_TYPE_RAID4 or \
+ raid_type == Pool.RAID_TYPE_RAID5 or \
+ return int(all_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID6 or \
+ return int(all_size - item_size - item_size)
+ return int((all_size - item_size)/2)
+ return int((all_size - item_size - item_size)/2)
+ return 0
+
+ return (size_bytes / SimData.SIM_DATA_BLK_SIZE + 1) * \
+ SimData.SIM_DATA_BLK_SIZE
+
+ self.job_num += 1
+ job_id = "JOB_%s" % self.job_num
+ self.job_dict[job_id] = SimJob(returned_item)
+ return job_id, None
+ return None, returned_item
+
+ return self.job_dict[job_id].progress()
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ del(self.job_dict[job_id])
+ return
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ self.tmo = ms
+ return None
+
+ return self.tmo
+
+ return self.syss
+
+ rc = []
+ sim_pool['total_space'] = \
+ self._pool_total_space(sim_pool['pool_id'])
+ sim_pool['free_space'] = \
+ self._pool_free_space(sim_pool['pool_id'])
+ rc.extend([sim_pool])
+ return rc
+
+ return self.vol_dict.values()
+
+ return self.disk_dict.values()
+
+ return self.ag_dict.values()
+
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['thinp'] = thinp
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+ return sim_vol
+
+ del(self.vol_dict[vol_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ pool_id = self.vol_dict[vol_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.vol_dict[vol_id]['total_space'] = new_size_bytes
+ self.vol_dict[vol_id]['consume_size'] = new_size_bytes
+ return self.vol_dict[vol_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+ size_bytes = self.vol_dict[src_vol_id]['total_space']
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(dst_pool_id)
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = new_vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = dst_pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+
+ dst_vol_id = vol_id
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ sim_rep = {
+ 'rep_type': rep_type,
+ 'src_start_blk': 0,
+ 'dst_start_blk': 0,
+ 'blk_count': size_bytes,
+ }
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_rep])
+
+ return sim_vol
+
+ return SimData.SIM_DATA_BLK_SIZE
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % src_vol_id)
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % dst_vol_id)
+
+ sim_reps = []
+ sim_rep = dict()
+ sim_rep['rep_type'] = rep_type
+ sim_rep['src_start_blk'] = rep_range.src_block
+ sim_rep['dst_start_blk'] = rep_range.dest_block
+ sim_rep['blk_count'] = rep_range.block_count
+ sim_reps.extend([sim_rep])
+
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_reps])
+
+ return None
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ """
+ If volume is a src or dst of a replication, we return True.
+ """
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ return True
+ return True
+ return False
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ del self.vol_dict[vol_id]['replicate']
+
+ del sim_vol['replicate'][vol_id]
+ return None
+
+ return self.ag_dict.values()
+
+ sim_ag = dict()
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+
+ sim_ag['init_ids'] = [init_id]
+ sim_ag['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_ag['name'] = name
+ sim_ag['ag_id'] = "AG_ID_%s" % SimData._random_vpd(4)
+ self.ag_dict[sim_ag['ag_id']] = sim_ag
+ return sim_ag
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ del(self.ag_dict[ag_id])
+ return None
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ return self.ag_dict[ag_id]
+
+ self.ag_dict[ag_id]['init_ids'].extend([init_id])
+
+ return None
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ return None
+
+ new_init_ids = []
+ new_init_ids.extend([cur_init_id])
+ del(self.ag_dict[ag_id]['init_ids'])
+ self.ag_dict[ag_id]['init_ids'] = new_init_ids
+ return None
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ self.vol_dict[vol_id]['mask'] = dict()
+
+ self.vol_dict[vol_id]['mask'][ag_id] = access
+ return None
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ return None
+
+ return None
+
+ del(self.vol_dict[vol_id]['mask'][ag_id])
+ return None
+
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ rc = []
+ continue
+ rc.extend([sim_vol])
+ return rc
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ sim_ags = []
+ ag_ids = self.vol_dict[vol_id]['mask'].keys()
+ sim_ags.extend([self.ag_dict[ag_id]])
+ return sim_ags
+
+ return self.init_dict.values()
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ self.vol_dict[vol_id]['mask_init'] = dict()
+
+ self.vol_dict[vol_id]['mask_init'][init_id] = access
+ return None
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+
+ del self.vol_dict[vol_id]['mask_init'][init_id]
+
+ return None
+
+ """
+ Find out the access groups defined initiator belong to.
+ Will return a list of access group id or []
+ """
+ rc = []
+ rc.extend([sim_ag['ag_id']])
+ return rc
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ rc_dedup_dict = dict()
+ ag_ids = self._ag_ids_of_init(init_id)
+ sim_vols = self.volumes_accessible_by_access_group(ag_id)
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+ return rc_dedup_dict.values()
+
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ rc_dedup_dict = dict()
+ sim_ags = self.access_groups_granted_to_volume(vol_id, flags)
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ return rc_dedup_dict.values()
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ raise LsmError(ErrorNumber.UNSUPPORTED_INITIATOR_TYPE,
+ "Initiator %s is not an iSCSI IQN" % init_id)
+ # No iscsi chap query API yet
+ return None
+
+ return self.fs_dict.values()
+
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_fs = dict()
+ fs_id = "FS_ID_%s" % SimData._random_vpd(4)
+ sim_fs['fs_id'] = fs_id
+ sim_fs['name'] = fs_name
+ sim_fs['total_space'] = size_bytes
+ sim_fs['free_space'] = size_bytes
+ sim_fs['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_fs['pool_id'] = pool_id
+ sim_fs['consume_size'] = size_bytes
+ self.fs_dict[fs_id] = sim_fs
+ return sim_fs
+
+ del(self.fs_dict[fs_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_FS,
+ "No such File System: %s" % fs_id)
+
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ pool_id = self.fs_dict[fs_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.fs_dict[fs_id]['total_space'] = new_size_bytes
+ self.fs_dict[fs_id]['free_space'] = new_size_bytes
+ self.fs_dict[fs_id]['consume_size'] = new_size_bytes
+ return self.fs_dict[fs_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such File System: %s" % fs_id)
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % src_fs_id)
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ src_sim_fs = self.fs_dict[src_fs_id]
+ dst_sim_fs = self.fs_create(
+ src_sim_fs['pool_id'], dst_fs_name, src_sim_fs['total_space'], 0)
+ src_sim_fs['clone'] = dict()
+ src_sim_fs['clone'][dst_sim_fs['fs_id']] = {
+ 'snap_id': snap_id,
+ }
+ return dst_sim_fs
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # TODO: No file clone query API yet, no need to do anything internally
+ return None
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ rc = []
+ rc.extend([self.snap_dict[snap_id]])
+ return rc
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ self.fs_dict[fs_id]['snaps'] = []
+
+ snap_id = "SNAP_ID_%s" % SimData._random_vpd(4)
+ sim_snap = dict()
+ sim_snap['snap_id'] = snap_id
+ sim_snap['name'] = snap_name
+ sim_snap['files'] = []
+ sim_snap['files'] = files
+ sim_snap['timestamp'] = time.time()
+ self.snap_dict[snap_id] = sim_snap
+ self.fs_dict[fs_id]['snaps'].extend([snap_id])
+ return sim_snap
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ del self.snap_dict[snap_id]
+ new_snap_ids = []
+ new_snap_ids.extend([old_snap_id])
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # Nothing need to done internally for revert.
+ return None
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ return False
+ return True
+ # We are snapshoting all files
+ return True
+ return True
+ return False
+
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ return None
+ snap_ids = self.fs_dict[fs_id]['snaps']
+ del self.snap_dict[snap_id]
+ del self.fs_dict[fs_id]['snaps']
+ snap_ids_to_rm = []
+ # BUG: if certain snapshot is againsting all files,
+ # what should we do if user request remove
+ # dependency on certain files.
+ # Currently, we do nothing
+ return None
+ new_files = []
+ new_files.extend([old_file])
+ # all files has been removed from snapshot list.
+ snap_ids_to_rm.extend([snap_id])
+ self.snap_dict[snap_id]['files'] = new_files
+ del self.snap_dict[snap_id]
+
+ new_snap_ids = []
+ new_snap_ids.extend([cur_snap_id])
+ del self.fs_dict[fs_id]['snaps']
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ return self.exp_dict.values()
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ sim_exp = dict()
+ sim_exp['exp_id'] = "EXP_ID_%s" % SimData._random_vpd(4)
+ sim_exp['fs_id'] = fs_id
+ sim_exp['exp_path'] = "/%s" % sim_exp['exp_id']
+ sim_exp['exp_path'] = exp_path
+ sim_exp['auth_type'] = auth_type
+ sim_exp['root_hosts'] = root_hosts
+ sim_exp['rw_hosts'] = rw_hosts
+ sim_exp['ro_hosts'] = ro_hosts
+ sim_exp['anon_uid'] = anon_uid
+ sim_exp['anon_gid'] = anon_gid
+ sim_exp['options'] = options
+ self.exp_dict[sim_exp['exp_id']] = sim_exp
+ return sim_exp
+
+ raise LsmError(ErrorNumber.INVALID_NFS,
+ "No such NFS Export: %s" % exp_id)
+ del self.exp_dict[exp_id]
+ return None
+
+ def pool_create(self,
+ system_id,
+ pool_name='',
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ member_type=Pool.MEMBER_TYPE_UNKNOWN,
+ member_ids=None,
+ member_count=0,
+ size_bytes=0,
+ thinp_type=Pool.THINP_TYPE_UNKNOWN,
+ pool_name = 'POOL %s' % SimData._random_vpd(4)
+
+ ## Coding
+ return
diff --git a/lsm/lsm/simulator.py b/lsm/lsm/simulator.py
index ca9d3c4..9a7a75c 100644
--- a/lsm/lsm/simulator.py
+++ b/lsm/lsm/simulator.py
@@ -26,270 +26,17 @@ from data import Pool, Initiator, Volume, BlockRange, System, AccessGroup, \
Snapshot, NfsExport, FileSystem, Capabilities, Disk, OptionalData
from iplugin import INfs, IStorageAreaNetwork
from version import VERSION
+from simarray import SimArray, SimJob
-SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
- tempfile.gettempdir() + '/lsm_sim_data')
-duration = os.getenv("LSM_SIM_TIME", 1)
-
-# Bump this when the sim data layout changes on disk
-SIM_DATA_VERSION = 1
-
-
- """
- Simulates a longer running job, uses actual wall time. If test cases
- take too long we can reduce time by shortening time duration.
- """
-
- end = self.start + self.duration
- now = time.time()
- self.percent = 100
- self.status = JobStatus.COMPLETE
- diff = now - self.start
- self.percent = int(100 * (diff / self.duration))
-
- self.status = JobStatus.INPROGRESS
- self.percent = 0
- self.__item = item_to_return
- self.start = time.time()
- self.duration = float(random.randint(0, int(duration)))
-
- """
- Returns a tuple (status, percent, volume)
- """
- self.__calc_progress()
- return self.status, self.percent, self.item
-
- return self.__item
- return None
-
- self.__item = value
-
-
- """
- Generate some kind of signature for this object, not sure this is ideal.
-
- Hopefully this will save some debug time.
- """
- sig = ''
- keys = obj.__dict__.keys()
- keys.sort()
-
- sig = md5(sig + k)
- return sig
-
-
- rc = ''
- objects = [Pool('', '', 0, 0, ''), Volume('', '', '', 1, 1, 0, '', ''),
- AccessGroup('', '', ['']), Initiator('', 0, ''),
- System('', '', 0), FileSystem('', '', 0, 0, '', ''),
- BlockRange(0, 100, 50), Capabilities(),
- NfsExport('', '', '', '', '', '', '', '', '', '', ),
- Snapshot('', '', 10)]
-
- rc = md5(rc + _signature(o))
-
- return rc
-
-
- self.version = SIM_DATA_VERSION
- self.sys_info = System('sim-01', 'LSM simulated storage plug-in',
- System.STATUS_OK)
- p1 = Pool('POO1', 'Pool 1', 2 ** 64, 2 ** 64, self.sys_info.id)
- p2 = Pool('POO2', 'Pool 2', 2 ** 64, 2 ** 64, self.sys_info.id)
- p3 = Pool('POO3', 'Pool 3', 2 ** 64, 2 ** 64, self.sys_info.id)
- p4 = Pool('POO4', 'lsm_test_aggr', 2 ** 64, 2 ** 64, self.sys_info.id)
-
- self.block_size = 512
-
- pm1 = {'pool': p1, 'volumes': {}}
- pm2 = {'pool': p2, 'volumes': {}}
- pm3 = {'pool': p3, 'volumes': {}}
- pm4 = {'pool': p4, 'volumes': {}}
-
- self.pools = {p1.id: pm1, p2.id: pm2, p3.id: pm3, p4.id: pm4}
- self.volumes = {}
- self.vol_num = 1
- self.access_groups = {}
-
- self.fs = {}
- self.fs_num = 1
-
- self.tmo = 30000
- self.jobs = {}
- self.job_num = 1
-
- #These express relationships between initiators and volumes. This
- #is done because if you delete either part of the relationship
- #you need to delete the association between them. Holding this stuff
- #in a db would be easier :-)
- self.group_grants = {} # {access group id : {volume id: access }}
-
- #Create a signature
- self.signature = _state_signature()
-
-
"""
Simple class that implements enough to allow the framework to be exercised.
"""
-
- """
- Generate a random 16 digit number as hex
- """
- vpd = []
- vpd.append(str('%02X' % (random.randint(0, 255))))
- return "".join(vpd)
-
- """
- Round the requested size to block size.
- """
- return (size_bytes / self.s.block_size) * self.s.block_size
-
- self.s.job_num += 1
- job = "JOB_" + str(self.s.job_num)
- self.s.jobs[job] = SimJob(returned_item)
- return job, None
- return None, returned_item
-
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Stored simulator state incompatible with "
- "simulator, please move or delete %s" %
- self.file)
-
- tmp = None
- tmp = pickle.load(f)
-
- # Going forward we could get smarter about handling this for
- # changes that aren't invasive, but we at least need to check
- # to make sure that the data will work and not cause any
- # undo confusion.
- if tmp.version != SIM_DATA_VERSION or \
- self._version_error()
- self._version_error()
-
- return tmp
-
- f = open(self.file, 'wb')
- pickle.dump(self.s, f)
- f.close()
-
- #If we run via the daemon the file will be owned by libstoragemgmt
- #and if we run sim_lsmplugin stand alone we will be unable to
- #change the permissions.
- os.chmod(self.file, 0666)
- pass
-
- prev = self._load()
- return prev
- return SimState()
-
- """
- String list should be an empty list or a list with items
- """
- pass
- raise LsmError(ErrorNumber.INVALID_SL, 'Invalid string list')
-
-
- self.file = SIM_DATA_FILE
- self.s = self._load_state()
self.uri = None
self.password = None
self.tmo = 0
- p = self.s.pools[pool_id]['pool']
-
- rounded_size = self.__block_rounding(size_bytes)
-
- p.free_space -= rounded_size
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- return rounded_size
-
- p = self.s.pools[pool_id]['pool']
- p.free_space += size_bytes
-
- return md5(name)
-
- return AccessGroup(StorageSimulator._ag_id(name), name,
- [i.id for i in h['initiators']], self.s.sys_info.id)
-
- actual_size = self._allocate_from_pool(pool.id, size_bytes)
-
- nv = Volume('Vol' + str(self.s.vol_num), name,
- StorageSimulator.__random_vpd(), self.s.block_size,
- (actual_size / self.s.block_size), Volume.STATUS_OK,
- self.s.sys_info.id,
- pool.id)
- self.s.volumes[nv.id] = {'pool': pool, 'volume': nv}
- self.s.vol_num += 1
- return self.__create_job(nv)
-
- p = self.s.pools[pool.id]['pool']
- actual_size = self._allocate_from_pool(p.id, size_bytes)
-
- new_fs = FileSystem('FS' + str(self.s.fs_num), name, actual_size,
- actual_size, p.id, self.s.sys_info.id)
-
- self.s.fs[new_fs.id] = {'pool': p, 'fs': new_fs, 'ss': {},
- 'exports': {}}
- self.s.fs_num += 1
- return self.__create_job(new_fs)
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Pool not found')
-
self.uri = uri
self.password = password
qp = uri_parse(uri)
if 'parameters' in qp and 'statefile' in qp['parameters'] \
- self.file = qp['parameters']['statefile']
- self._load_state()
+ self.sim_array = SimArray(qp['parameters']['statefile'])
+ self.sim_array = SimArray()
return None
+ self.sim_array.save_state()
+
+ return self.sim_array.job_status(job_id, flags)
+
+ return self.sim_array.job_free(job_id, flags)
+
+ """
+ Fake converter. SimArray already do SimData to LSM data convert.
+ We move data convert to SimArray to make this sample plugin looks
+ clean.
+ But in real world, data converting is often handled by plugin itself
+ rather than array.
+ """
+ return sim_data
+
- self.tmo = ms
+ self.sim_array.set_time_out(ms, flags)
return None
- return self.tmo
+ return self.sim_array.get_time_out(flags)
rc = Capabilities()
return "Storage simulator", VERSION
- self._save()
-
- return [self.s.sys_info]
-
- return self.s.jobs[job_id].progress()
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- del self.s.jobs[job_id]
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- return [e['volume'] for e in self.s.volumes.itervalues()]
-
- return v['volume']
- return None
+ sim_syss = self.sim_array.systems()
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_syss]
- return [e['pool'] for e in self.s.pools.itervalues()]
-
+ sim_pools = self.sim_array.pools()
+ return [SimPlugin._sim_data_2_lsm(p) for p in sim_pools]
- ag = self.s.group_grants[access_group_id]
-
- return True
-
- return False
-
- rc = []
- ag = self._new_access_group(k, v)
- rc.extend(v['initiators'])
- rc.extend(v['initiators'])
-
- #We can have multiples as the same initiator can be in multiple access
- #groups
- remove_dupes = {}
- remove_dupes[x.id] = x
-
- return list(remove_dupes.values())
+ sim_vols = self.sim_array.volumes()
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- return self._initiators()
+ sim_disks = self.sim_array.disks()
+ return [SimPlugin._sim_data_2_lsm(d) for d in sim_disks]
def volume_create(self, pool, volume_name, size_bytes, provisioning,
- assert provisioning is not None
- return self._create_vol(pool, volume_name, size_bytes)
+ sim_vol = self.sim_array.volume_create(
+ pool.id, volume_name, size_bytes, provisioning, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
- self._deallocate_from_pool(p.id, v.size_bytes)
- del self.s.volumes[volume.id]
-
- del self.s.group_grants[k][volume.id]
-
- #We only return null or job id.
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
+ return self.sim_array.volume_delete(volume.id, flags)
- assert rep_type is not None
+ sim_vol = self.sim_array.volume_resize(
+ volume.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)
- p_id = None
+ dst_pool_id = None
- p_id = pool.id
- p_id = volume_src.pool_id
-
- p = self.s.pools[p_id]['pool']
- v = self.s.volumes[volume_src.id]['volume']
-
- return self._create_vol(p, name, v.size_bytes)
+ dst_pool_id = pool.id
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Incorrect pool')
-
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- 'Volume not present')
- return None
+ dst_pool_id = volume_src.pool_id
+ return self.sim_array.volume_replicate(
+ dst_pool_id, rep_type, volume_src.id, name, flags)
- return self.s.block_size
+ return self.sim_array.volume_replicate_range_block_size(
+ system.id, flags)
def volume_replicate_range(self, rep_type, volume_src, volume_dest,
-
- if rep_type not in (Volume.REPLICATE_SNAPSHOT,
- Volume.REPLICATE_CLONE,
- Volume.REPLICATE_COPY,
- Volume.REPLICATE_MIRROR_ASYNC,
- raise LsmError(ErrorNumber.UNSUPPORTED_REPLICATION_TYPE,
- "Rep_type invalid")
-
- #We could do some overlap range testing etc. here.
- pass
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Range element not BlockRange")
-
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Ranges not a list")
-
- #Make sure all the arguments are validated
- if volume_src.id in self.s.volumes \
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_src not found")
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_dest not found")
+ return self.sim_array.volume_replicate_range(
+ rep_type, volume_src.id, volume_dest.id, ranges, flags)
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
+ return self.sim_array.volume_online(volume.id, flags)
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
-
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
-
- current_size = v.size_bytes
- new_size = self.__block_rounding(new_size_bytes)
-
- raise LsmError(ErrorNumber.SIZE_SAME,
- 'Volume same size')
-
- if new_size < current_size \
- p.free_space -= (new_size - current_size)
- v.num_of_blocks = new_size / self.s.block_size
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- return self.__create_job(v)
-
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- self.s.group_grants[group.id] = {volume.id: access}
- self.s.group_grants[group.id][volume.id] = access
- raise LsmError(ErrorNumber.IS_MAPPED, 'Existing access present')
-
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id in self.s.group_grants \
- del self.s.group_grants[group.id][volume.id]
- raise LsmError(ErrorNumber.NO_MAPPING,
- 'No volume access to revoke')
+ return self.sim_array.volume_online(volume.id, flags)
- rc = []
- rc.append(self._new_access_group(k, v))
- return rc
-
- groups = self.access_group_list()
- return g
- return None
+ sim_ags = self.sim_array.ags()
+ return [SimPlugin._sim_data_2_lsm(a) for a in sim_ags]
def access_group_create(self, name, initiator_id, id_type, system_id,
- self.s.access_groups[name] = \
- {'initiators': [Initiator(initiator_id, id_type, 'UNA')],
- 'access': {}}
- return self._new_access_group(name, self.s.access_groups[name])
- raise LsmError(ErrorNumber.EXISTS_ACCESS_GROUP,
- "Access group with name exists")
+ sim_ag = self.sim_array.access_group_create(name, initiator_id,
+ id_type, system_id, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)
- del self.s.access_groups[group.name]
-
- del self.s.group_grants[group.id]
-
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del(group.id, flags)
def access_group_add_initiator(self, group, initiator_id, id_type,
- self.s.access_groups[group.name]['initiators']. \
- append(Initiator(initiator_id, id_type, 'UNA'))
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ sim_ag = self.sim_array.access_group_add_initiator(
+ group.id, initiator_id, id_type, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)
- self.s.access_groups[group.name]['initiators']. \
- remove(i)
- return None
-
- raise LsmError(ErrorNumber.INITIATOR_NOT_IN_ACCESS_GROUP,
- "Initiator not found")
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del_initiator(
+ group.id, initiator_id, flags)
- rc = []
- rc.append(self._get_volume(k))
+ return self.sim_array.access_group_grant(
+ group.id, volume.id, access, flags)
- return rc
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_revoke(
+ group.id, volume.id, flags)
- rc = []
+ sim_vols = self.sim_array.volumes_accessible_by_access_group(
+ group.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- rc.append(self._get_access_group(k))
- return rc
+ sim_vols = self.sim_array.access_groups_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- def iscsi_chap_auth(self, initiator, in_user, in_password, out_user,
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- 'Initiator is required')
+ return self.sim_array.inits(flags)
def initiator_grant(self, initiator_id, initiator_type, volume, access,
- name = initiator_id + volume.id
- group = None
-
- group = self.access_group_create(name, initiator_id,
- initiator_type,
- volume.system_id)
- result = self.access_group_grant(group, volume, access)
-
- self.access_group_del(group)
- raise e
-
- return result
+ return self.sim_array.initiator_grant(
+ initiator_id, initiator_type, volume.id, access, flags)
- name = initiator.id + volume.id
-
- ag = self._new_access_group(name, self.s.access_groups[name])
-
- self.access_group_del(ag)
- raise LsmError(ErrorNumber.NO_MAPPING,
- "No mapping of initiator "
- "and volume")
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "Volume not found")
- raise LsmError(ErrorNumber.NOT_FOUND_INITIATOR,
- "Initiator not found")
-
- return None
+ return self.sim_array.initiator_revoke(initiator.id, volume.id, flags)
- rc = []
- volumes = {}
-
- #Go through each access group, for each one see if our initiator
- #is one of them.
- # Check to see if the initiator is in the group.
- # Look up the privileges for this group, if any
- ag_id = StorageSimulator._ag_id(ag_name)
- # Loop through the volumes granted to this AG
- volumes[volume_id] = None
-
- # We very well may have duplicates, thus the reason we enter the
- # volume id into the hash with no value, we are weeding out dupes
- rc.append(self._get_volume(vol_id))
-
- return rc
+ sim_vols = self.sim_array.volumes_accessible_by_initiator(
+ initiator.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- return self._initiators(volume)
+ sim_inits = self.sim_array.initiators_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, initiator, in_user, in_password,
+ return self.sim_array.iscsi_chap_auth(
+ initiator.id, in_user, in_password, out_user, out_password, flags)
- return False
+ return self.sim_array.volume_child_dependency(volume.id, flags)
- return None
+ return self.sim_array.volume_child_dependency_rm(volume.id, flags)
- return [e['fs'] for e in self.s.fs.itervalues()]
-
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- self._deallocate_from_pool(p.id, f.total_space)
- del self.s.fs[fs.id]
+ sim_fss = self.sim_array.fs()
+ return [SimPlugin._sim_data_2_lsm(f) for f in sim_fss]
- #TODO: Check for exports and remove them.
+ sim_fs = self.sim_array.fs_create(pool.id, name, size_bytes)
+ return SimPlugin._sim_data_2_lsm(sim_fs)
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_delete(fs.id, flags)
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- #TODO Check to make sure we have enough space before proceeding
- self._deallocate_from_pool(p.id, f.total_space)
- f.total_space = self._allocate_from_pool(p.id, new_size_bytes)
- f.free_space = f.total_space
- return self.__create_job(f)
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- return self._create_fs(pool, name, size_bytes)
+ sim_fs = self.sim_array.fs_resize(
+ fs.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_fs)
- #TODO If snapshot is not None, then check for existence.
-
- f = self.s.fs[src_fs.id]['fs']
- p = self.s.fs[src_fs.id]['pool']
- return self._create_fs(p, dest_fs_name, f.total_space)
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, None, flags)
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, snapshot.id, flags)
def file_clone(self, fs, src_file_name, dest_file_name, snapshot=None,
- #TODO If snapshot is not None, then check for existence.
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Invalid src/destination file names")
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, None, flags)
+
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, snapshot.id, flags)
- rc = [e for e in self.s.fs[fs.id]['ss'].itervalues()]
- return rc
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_snaps = self.sim_array.fs_snapshots(fs.id, flags)
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_snaps]
- StorageSimulator._check_sl(files)
- raise LsmError(ErrorNumber.EXISTS_NAME,
- 'Snapshot name exists')
-
- s = Snapshot(md5(snapshot_name), snapshot_name, time.time())
- self.s.fs[fs.id]['ss'][s.id] = s
- return self.__create_job(s)
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_create(
+ fs.id, snapshot_name, files, flags)
- del self.s.fs[fs.id]['ss'][snapshot.id]
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_delete(
+ fs.id, snapshot.id, flags)
def fs_snapshot_revert(self, fs, snapshot, files, restore_files,
-
- StorageSimulator._check_sl(files)
- StorageSimulator._check_sl(files)
-
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_revert(
+ fs.id, snapshot.id, files, restore_files, all_files, flags)
- StorageSimulator._check_sl(files)
- return False
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency(fs.id, files, flags)
- StorageSimulator._check_sl(files)
- return self.__create_job(None)[0]
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency_rm(fs.id, files, flags)
+ # The API should change some day
return ["simple"]
- rc = []
- rc.append(exp)
- return rc
+ sim_exps = self.sim_array.exports(flags)
+ return [SimPlugin._sim_data_2_lsm(e) for e in sim_exps]
def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list,
-
- export_path = "/mnt/lsm/sim/%s" % self.s.fs[fs_id]['fs'].name
-
- export_id = md5(export_path)
-
- export = NfsExport(export_id, fs_id, export_path, auth_type,
- root_list, rw_list, ro_list, anon_uid, anon_gid,
- options)
-
- self.s.fs[fs_id]['exports'][export_id] = export
- return export
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_exp = self.sim_array.fs_export(
+ fs_id, export_path, root_list, rw_list, ro_list,
+ anon_uid, anon_gid, auth_type, options, flags=0)
+ return SimPlugin._sim_data_2_lsm(sim_exp)
- fs_id = export.fs_id
-
- del self.s.fs[fs_id]['exports'][export.id]
- raise LsmError(ErrorNumber.FS_NOT_EXPORTED, "FS not exported")
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
+ return self.sim_array.fs_unexport(export.id, flags)
- rc = []
- # TODO Make these persistent and make it fit into the total model
-
- name = "Sim disk %d" % i
- optionals = None
-
- optionals = OptionalData()
- optionals.set('sn', self.__random_vpd(8))
-
- rc.append(Disk(md5(name), name, Disk.DISK_TYPE_HYBRID, 512,
- 1893933056, Disk.STATUS_OK,
- self.s.sys_info.id, optionals))
-
- return rc
diff --git a/lsm/sim_lsmplugin b/lsm/sim_lsmplugin
index a4439a5..d2bee09 100755
--- a/lsm/sim_lsmplugin
+++ b/lsm/sim_lsmplugin
@@ -22,10 +22,10 @@ import syslog
from lsm.pluginrunner import PluginRunner
- from lsm.simulator import StorageSimulator
+ from lsm.simulator import SimPlugin
- PluginRunner(StorageSimulator, sys.argv).run()
+ PluginRunner(SimPlugin, sys.argv).run()
#This should be quite rare, but when it does happen this is pretty
#key in understanding what happened, especially when it happens when
Tony Asleson
2014-02-12 19:23:20 UTC
Permalink
Post by Gris Ge
'make check' will fail in rare chances(1 out of 20) at 'test_smoke_test'
due to the incorrect initiator got after simulator plugin initiation.
I've never seen this failure, probably as I usually run with CK_FORK=no,
nice catch and root cause determination!
Post by Gris Ge
Every test case will be forked within 1 second which cause them to use the
same random seed which get the same random number.
When 'test_smoke_test' and 'test_initiator_methods' are sharing the
same state files, the initiator count might not be 0.
The random seed is different now after adding with their PID.
Your fix introduces a problem. You moved the srandom out of the
conditional, so every time the function is called it re-seeds thus
causing the sequence to be the same every time it is used. I ammended
patch and moved it back to original location to ensure we get a random
character sequence.

Regards,
Tony


Example:

#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <time.h>
#include <sys/types.h>
#include <unistd.h>

void generateRandom(char *buff, uint32_t len)
{
uint32_t i = 0;
static int seed = 0;

if( !seed ) {
seed = time(NULL);
}
srandom(seed + getpid());

if( buff && (len > 1) ) {
for(i = 0; i < (len - 1); ++i) {
buff[i] = 97 + rand()%26;
}
buff[len-1] = '\0';
}
}

int main(void)
{
char buffer[33];
int i = 0;

for( i = 2; i < sizeof(buffer); ++i ) {
generateRandom(buffer, i);
printf("%s\n", buffer);
}
return 0;
}

a
aj
ajl
ajlc
ajlcy
ajlcyq
ajlcyqa
ajlcyqaq
ajlcyqaqd
ajlcyqaqdu
ajlcyqaqdug
ajlcyqaqdugo
ajlcyqaqdugou
ajlcyqaqdugoua
ajlcyqaqdugouaq
ajlcyqaqdugouaqg
ajlcyqaqdugouaqgd
ajlcyqaqdugouaqgdk
ajlcyqaqdugouaqgdkr
ajlcyqaqdugouaqgdkri
ajlcyqaqdugouaqgdkrio
ajlcyqaqdugouaqgdkrioj
ajlcyqaqdugouaqgdkriojt
ajlcyqaqdugouaqgdkriojtj
ajlcyqaqdugouaqgdkriojtje
ajlcyqaqdugouaqgdkriojtjet
ajlcyqaqdugouaqgdkriojtjety
ajlcyqaqdugouaqgdkriojtjetys
ajlcyqaqdugouaqgdkriojtjetysr
ajlcyqaqdugouaqgdkriojtjetysru
ajlcyqaqdugouaqgdkriojtjetysruh
Gris Ge
2014-02-12 22:51:44 UTC
Permalink
Post by Tony Asleson
Your fix introduces a problem. You moved the srandom out of the
conditional, so every time the function is called it re-seeds thus
causing the sequence to be the same every time it is used. I ammended
patch and moved it back to original location to ensure we get a random
character sequence.
Regards,
Tony
Thanks, 're-seed' is not needed.

I am considering document this as a blog post after I get every detail
cleared.

My understanding is, forks will start a new sequence here as forks was
created before setup() was called. Every forks is using the same
sequence -- 0.

Beside of duplicate works concern, why we should move 're-seed' in
original location?

Attached is my test code for this matter, if that helps.
--
Gris Ge
Tony Asleson
2014-02-13 18:37:44 UTC
Permalink
Post by Gris Ge
Beside of duplicate works concern, why we should move 're-seed' in
original location?
The intention of the function generateRandom was to generate a random
character string each and every time is called. There should be no
repetitions or duplications.

The function has a static variable seed. Each time we fork the value is
copied in the new process address space. We fork before we call the
function so yes the seed value is zero. As you found out if we fork
fast enough we get:

pid= 7194, id= testing a, value= xtozomiffugayzyrxlrbvezgtmdsxli
pid= 7194, id= testing a, value= uhzvxlgcscisajstgdmjbqijjumetam
pid= 7194, id= testing a, value= nhljeypiqtqktbdohibqltyvfujjnlx
pid= 7195, id= testing b, value= xtozomiffugayzyrxlrbvezgtmdsxli
pid= 7195, id= testing b, value= uhzvxlgcscisajstgdmjbqijjumetam
pid= 7195, id= testing b, value= nhljeypiqtqktbdohibqltyvfujjnlx
pid= 7196, id= testing c, value= xtozomiffugayzyrxlrbvezgtmdsxli
pid= 7196, id= testing c, value= uhzvxlgcscisajstgdmjbqijjumetam
pid= 7196, id= testing c, value= nhljeypiqtqktbdohibqltyvfujjnlx
pid= 7197, id= testing d, value= xtozomiffugayzyrxlrbvezgtmdsxli
pid= 7197, id= testing d, value= uhzvxlgcscisajstgdmjbqijjumetam
pid= 7197, id= testing d, value= nhljeypiqtqktbdohibqltyvfujjnlx
pid= 7198, id= testing e, value= xtozomiffugayzyrxlrbvezgtmdsxli
pid= 7198, id= testing e, value= uhzvxlgcscisajstgdmjbqijjumetam
pid= 7198, id= testing e, value= nhljeypiqtqktbdohibqltyvfujjnlx

Your fix which includes re-seeding using the process id every time
results in:

pid= 7199, id= testing a, value= zmrwnwkojpbfinzhcaoxcajupyhxsgw
pid= 7199, id= testing a, value= zmrwnwkojpbfinzhcaoxcajupyhxsgw
pid= 7199, id= testing a, value= zmrwnwkojpbfinzhcaoxcajupyhxsgw
pid= 7200, id= testing b, value= wqgdkmbtfobjwhjqmjuabacnvrtwlwr
pid= 7200, id= testing b, value= wqgdkmbtfobjwhjqmjuabacnvrtwlwr
pid= 7200, id= testing b, value= wqgdkmbtfobjwhjqmjuabacnvrtwlwr
pid= 7201, id= testing c, value= gefpdawkxsbwcvmxkwqdzixxavjfcas
pid= 7201, id= testing c, value= gefpdawkxsbwcvmxkwqdzixxavjfcas
pid= 7201, id= testing c, value= gefpdawkxsbwcvmxkwqdzixxavjfcas
pid= 7203, id= testing d, value= uhwfwvrzyvkpzxaogzlixusjdpatntg
pid= 7203, id= testing d, value= uhwfwvrzyvkpzxaogzlixusjdpatntg
pid= 7203, id= testing d, value= uhwfwvrzyvkpzxaogzlixusjdpatntg
pid= 7204, id= testing e, value= brqtiwozhysdbplekeadazwxngclnxj
pid= 7204, id= testing e, value= brqtiwozhysdbplekeadazwxngclnxj
pid= 7204, id= testing e, value= brqtiwozhysdbplekeadazwxngclnxj

This corrects the problem you were specifically running into, but
doesn't produce the expected output of different random string. This
will cause problems if the call is used more than once in the same
process expecting different unique values.

The code currently checked in results in:

pid= 7930, id= testing a, value= mzflxcspurfobtlpawabnarrvpejure
pid= 7930, id= testing a, value= irjtqnlgjdnyggjvihxmvyfotuserki
pid= 7930, id= testing a, value= zdtttigzrlmrrvcnfkmrfkwtfrojkat
pid= 7931, id= testing b, value= zqatjqfvehdpicsyzwgkzqrywwazlpw
pid= 7931, id= testing b, value= nfygoqlmutpldredtajfbbwzatbbgrz
pid= 7931, id= testing b, value= vwxbnppzkkqxnkbtddekeggfibhjkai
pid= 7932, id= testing c, value= jmoabazkervpbwyngcltfefibfxedkz
pid= 7932, id= testing c, value= oypqbrqnxjjmmhkanoniurncutayymz
pid= 7932, id= testing c, value= mnqdoivcfenuqwesjtfupyjrsettcis
pid= 7933, id= testing d, value= wrheeviraayaazxnuyltjzxzpxgkjav
pid= 7933, id= testing d, value= gtckzztqztpbwqalnzygkxglodsbnsw
pid= 7933, id= testing d, value= tlyfnxydyuucqncbabzglyoxpsrqhlm
pid= 7934, id= testing e, value= mxgbhcxbqvxbnyzymhfiecfrnhebfct
pid= 7934, id= testing e, value= tzzuieujwriyfjxfxekfkmlbbuhczjy
pid= 7934, id= testing e, value= skzpsdleacmyjvvovbaalmlophvughu

Which meets the expectation of a function that is called to generate
random strings.

However, if the function is called before we start forking then the code
that is checked in breaks, so I have updated the code again to address
that issue.

Example code attached.

Regards,
Tony

Tony Asleson
2014-02-12 19:51:22 UTC
Permalink
Amended patch pushed!

Thanks,
Tony
Post by Gris Ge
'make check' will fail in rare chances(1 out of 20) at 'test_smoke_test'
due to the incorrect initiator got after simulator plugin initiation.
Every test case will be forked within 1 second which cause them to use the
same random seed which get the same random number.
When 'test_smoke_test' and 'test_initiator_methods' are sharing the
same state files, the initiator count might not be 0.
The random seed is different now after adding with their PID.
---
test/tester.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/tester.c b/test/tester.c
index 4e8de1f..d409aa7 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -43,8 +43,8 @@ void generateRandom(char *buff, uint32_t len)
if( !seed ) {
seed = time(NULL);
- srandom(seed);
}
+ srandom(seed + getpid());
if( buff && (len > 1) ) {
for(i = 0; i < (len - 1); ++i) {
Tony Asleson
2014-02-11 22:50:48 UTC
Permalink
Post by Gris Ge
initiator_grant() to support ASYNC job as their comments said.
Looking at the C code it appears that the comment is incorrect. The
following functions do not return job ids, but the comments incorrectly
say they do or don't. I fear these are copy and paste errors. In
addition the tests must be lacking in this area as these checks are done
at run-time and we should have been getting exceptions if we had a
miss-match.

initiator_grant
initiator_revoke
access_group_grant
access_group_revoke
access_group_del
access_group_add_initiator
access_group_del_initiator

Regards,
Tony
Loading...