* New design of simulator:
* simarray.py: Storage array simulator.
SimArray -- Converting SimData into LSM class.
SimData -- Handling storage resources management.
* simulator.py: Sample plugin code for plugin developer.
SimPlugin -- Provide plugin API to LSM
* with this change, 'simulator.py' could be a good sample plugin with
limited non-required codes. We need more documents to explain every calls.
* SimData is acting as a storage array to provide more accurate and flexible
resources management.
* Provide all old features of previous simulator.py.
* Passed the 'make check', 'make distcheck' and 'rpmbuild'.
Changes since V1:
* Fixed the complain message if old version state file found.
* Fixed the returns of initiator_grant() and etc.
* Fixed Makefile and rpm spec file for the new simarray.py files.
Signed-off-by: Gris Ge <***@redhat.com>
---
libstoragemgmt.spec.in | 1 +
lsm/Makefile.am | 1 +
lsm/lsm/__init__.py | 3 +-
lsm/lsm/simarray.py | 1309 ++++++++++++++++++++++++++++++++++++++++++++++++
lsm/lsm/simulator.py | 850 +++++--------------------------
lsm/sim_lsmplugin | 4 +-
6 files changed, 1454 insertions(+), 714 deletions(-)
create mode 100644 lsm/lsm/simarray.py
diff --git a/libstoragemgmt.spec.in b/libstoragemgmt.spec.in
index 6e64d8f..eacb3fd 100644
--- a/libstoragemgmt.spec.in
+++ b/libstoragemgmt.spec.in
@@ -246,6 +246,7 @@ fi
%{python_sitelib}/lsm/iplugin.*
%{python_sitelib}/lsm/pluginrunner.*
%{python_sitelib}/lsm/simulator.*
+%{python_sitelib}/lsm/simarray.*
%{python_sitelib}/lsm/transport.*
%{python_sitelib}/lsm/version.*
%{_bindir}/sim_lsmplugin
diff --git a/lsm/Makefile.am b/lsm/Makefile.am
index 3a405d5..f4dde10 100644
--- a/lsm/Makefile.am
+++ b/lsm/Makefile.am
@@ -24,6 +24,7 @@ lsm_PYTHON = lsm/__init__.py \
lsm/ontap.py \
lsm/pluginrunner.py \
lsm/simulator.py \
+ lsm/simarray.py \
lsm/smis.py \
lsm/smisproxy.py \
lsm/transport.py \
diff --git a/lsm/lsm/__init__.py b/lsm/lsm/__init__.py
index 3407382..71bb7e2 100644
--- a/lsm/lsm/__init__.py
+++ b/lsm/lsm/__init__.py
@@ -17,6 +17,7 @@ from data import DataEncoder, DataDecoder, IData, Initiator, Volume, Pool, \
from iplugin import IPlugin, IStorageAreaNetwork, INetworkAttachedStorage, INfs
from pluginrunner import PluginRunner
-from simulator import StorageSimulator, SimJob, SimState
+from simulator import SimPlugin
+from simarray import SimData, SimJob, SimArray
from transport import Transport
from version import VERSION
diff --git a/lsm/lsm/simarray.py b/lsm/lsm/simarray.py
new file mode 100644
index 0000000..8a19fd2
--- /dev/null
+++ b/lsm/lsm/simarray.py
@@ -0,0 +1,1309 @@
+# Copyright (C) 2011-2013 Red Hat, Inc.
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Author: tasleson
+# Gris Ge <***@redhat.com>
+
+# TODO: 1. Introduce constant check by using state_to_str() converting.
+# 2. Snapshot should consume space in pool.
+
+import random
+import pickle
+import tempfile
+import os
+import time
+
+from common import md5, LsmError, ErrorNumber, size_human_2_size_bytes, \
+ JobStatus
+from data import System, Volume, Disk, Pool, FileSystem, AccessGroup, \
+ Initiator, BlockRange, Snapshot, NfsExport
+
+class SimJob(object):
+ """
+ Simulates a longer running job, uses actual wall time. If test cases
+ take too long we can reduce time by shortening time duration.
+ """
+
+ def _calc_progress(self):
+ if self.percent < 100:
+ end = self.start + self.duration
+ now = time.time()
+ if now >= end:
+ self.percent = 100
+ self.status = JobStatus.COMPLETE
+ else:
+ diff = now - self.start
+ self.percent = int(100 * (diff / self.duration))
+
+ def __init__(self, item_to_return):
+ duration = os.getenv("LSM_SIM_TIME", 1)
+ self.status = JobStatus.INPROGRESS
+ self.percent = 0
+ self.__item = item_to_return
+ self.start = time.time()
+ self.duration = float(random.randint(0, int(duration)))
+
+ def progress(self):
+ """
+ Returns a tuple (status, percent, data)
+ """
+ self._calc_progress()
+ return self.status, self.percent, self.item
+
+ @property
+ def item(self):
+ if self.percent >= 100:
+ return self.__item
+ return None
+
+ @item.setter
+ def item(self, value):
+ self.__item = value
+
+
+class SimArray(object):
+ SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
+ tempfile.gettempdir() + '/lsm_sim_data')
+
+ @staticmethod
+ def _version_error(dump_file):
+ raise LsmError(ErrorNumber.INVALID_ARGUMENT,
+ "Stored simulator state incompatible with "
+ "simulator, please move or delete %s" %
+ dump_file)
+
+ def __init__(self, dump_file=None):
+ if dump_file is None:
+ self.dump_file = SimArray.SIM_DATA_FILE
+ else:
+ self.dump_file = dump_file
+
+ if os.path.exists(self.dump_file):
+ try:
+ with open(self.dump_file, 'rb') as f:
+ self.data = pickle.load(f)
+
+ # Going forward we could get smarter about handling this for
+ # changes that aren't invasive, but we at least need to check
+ # to make sure that the data will work and not cause any
+ # undo confusion.
+ if self.data.version != SimData.SIM_DATA_VERSION or \
+ self.data.signature != SimData._state_signature():
+ SimArray._version_error(self.dump_file)
+ except AttributeError:
+ SimArray._version_error(self.dump_file)
+
+ else:
+ self.data = SimData()
+
+ def save_state(self):
+ fh_dump_file = open(self.dump_file, 'wb')
+ pickle.dump(self.data, fh_dump_file)
+ fh_dump_file.close()
+
+ def job_status(self, job_id, flags=0):
+ return self.data.job_status(job_id, flags=0)
+
+ def job_free(self, job_id, flags=0):
+ return self.data.job_free(job_id, flags=0)
+
+ def set_time_out(self, ms, flags=0):
+ return self.data.set_time_out(ms, flags)
+
+ def get_time_out(self, flags=0):
+ return self.data.get_time_out(flags)
+
+ def systems(self):
+ return self.data.systems()
+
+ @staticmethod
+ def _sim_vol_2_lsm(sim_vol):
+ return Volume(sim_vol['vol_id'], sim_vol['name'], sim_vol['vpd83'],
+ SimData.SIM_DATA_BLK_SIZE,
+ int(sim_vol['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Volume.STATUS_OK, sim_vol['sys_id'],
+ sim_vol['pool_id'])
+
+ def volumes(self):
+ sim_vols = self.data.volumes()
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def pools(self):
+ rc = []
+ sim_pools = self.data.pools()
+ for sim_pool in sim_pools:
+ pool = Pool(sim_pool['pool_id'], sim_pool['name'],
+ sim_pool['total_space'], sim_pool['free_space'],
+ sim_pool['sys_id'])
+ rc.extend([pool])
+ return rc
+
+ def disks(self):
+ rc = []
+ sim_disks = self.data.disks()
+ for sim_disk in sim_disks:
+ disk = Disk(sim_disk['disk_id'], sim_disk['name'],
+ sim_disk['disk_type'], SimData.SIM_DATA_BLK_SIZE,
+ int(sim_disk['total_space']/SimData.SIM_DATA_BLK_SIZE),
+ Disk.STATUS_OK, sim_disk['sys_id'])
+ rc.extend([disk])
+ return rc
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ sim_vol = self.data.volume_create(
+ pool_id, vol_name, size_bytes, thinp, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_delete(self, vol_id, flags=0):
+ self.data.volume_delete(vol_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ sim_vol = self.data.volume_resize(vol_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ sim_vol = self.data.volume_replicate(
+ dst_pool_id, rep_type, src_vol_id, new_vol_name, flags)
+ return self.data.job_create(SimArray._sim_vol_2_lsm(sim_vol))
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return self.data.volume_replicate_range_block_size(sys_id, flags)
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ return self.data.job_create(
+ self.data.volume_replicate_range(
+ rep_type, src_vol_id, dst_vol_id, ranges, flags))[0]
+
+ def volume_online(self, vol_id, flags=0):
+ return self.data.volume_online(vol_id, flags)
+
+ def volume_offline(self, vol_id, flags=0):
+ return self.data.volume_offline(vol_id, flags)
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ return self.data.volume_child_dependency(vol_id, flags)
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ return self.data.job_create(
+ self.data.volume_child_dependency_rm(vol_id, flags))[0]
+
+ @staticmethod
+ def _sim_fs_2_lsm(sim_fs):
+ return FileSystem(sim_fs['fs_id'], sim_fs['name'],
+ sim_fs['total_space'], sim_fs['free_space'],
+ sim_fs['pool_id'], sim_fs['sys_id'])
+
+ def fs(self):
+ sim_fss = self.data.fs()
+ return [SimArray._sim_fs_2_lsm(f) for f in sim_fss]
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ sim_fs = self.data.fs_create(pool_id, fs_name, size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_delete(self, fs_id, flags=0):
+ self.data.fs_delete(fs_id, flags=0)
+ return self.data.job_create(None)[0]
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ sim_fs = self.data.fs_resize(fs_id, new_size_bytes, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ sim_fs = self.data.fs_clone(src_fs_id, dst_fs_name, snap_id, flags)
+ return self.data.job_create(SimArray._sim_fs_2_lsm(sim_fs))
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.file_clone(
+ fs_id, src_fs_name, dst_fs_name, snap_id, flags))[0]
+
+ @staticmethod
+ def _sim_snap_2_lsm(sim_snap):
+ return Snapshot(sim_snap['snap_id'], sim_snap['name'],
+ sim_snap['timestamp'])
+
+ def fs_snapshots(self, fs_id, flags=0):
+ sim_snaps = self.data.fs_snapshots(fs_id, flags)
+ return [SimArray._sim_snap_2_lsm(s) for s in sim_snaps]
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ sim_snap = self.data.fs_snapshot_create(fs_id, snap_name, files,
+ flags)
+ return self.data.job_create(SimArray._sim_snap_2_lsm(sim_snap))
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ return self.data.job_create(
+ self.data.fs_snapshot_delete(fs_id, snap_id, flags))[0]
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ return self.data.job_create(
+ self.data.fs_snapshot_revert(
+ fs_id, snap_id, files, restore_files,
+ flag_all_files, flags))[0]
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ return self.data.fs_child_dependency(fs_id, files, flags)
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ return self.data.job_create(
+ self.data.fs_child_dependency_rm(fs_id, files, flags))[0]
+
+ @staticmethod
+ def _sim_exp_2_lsm(sim_exp):
+ return NfsExport(
+ sim_exp['exp_id'], sim_exp['fs_id'], sim_exp['exp_path'],
+ sim_exp['auth_type'], sim_exp['root_hosts'], sim_exp['rw_hosts'],
+ sim_exp['ro_hosts'], sim_exp['anon_uid'], sim_exp['anon_gid'],
+ sim_exp['options'])
+
+ def exports(self, flags=0):
+ sim_exps = self.data.exports(flags)
+ return [SimArray._sim_exp_2_lsm(e) for e in sim_exps]
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ sim_exp = self.data.fs_export(
+ fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags)
+ return SimArray._sim_exp_2_lsm(sim_exp)
+
+ def fs_unexport(self, exp_id, flags=0):
+ return self.data.fs_unexport(exp_id, flags)
+
+ @staticmethod
+ def _sim_ag_2_lsm(sim_ag):
+ return AccessGroup(sim_ag['ag_id'], sim_ag['name'],
+ sim_ag['init_ids'], sim_ag['sys_id'])
+
+ def ags(self):
+ sim_ags = self.data.ags()
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = self.data.access_group_create(
+ name, init_id, init_type, sys_id, flags)
+ return SimArray._sim_ag_2_lsm(sim_ag)
+
+ def access_group_del(self, ag_id, flags=0):
+ return self.data.access_group_del(ag_id, flags)
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ return self.data.access_group_add_initiator(
+ ag_id, init_id, init_type, flags)
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ return self.data.access_group_del_initiator(ag_id, init_id, flags)
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ return self.data.access_group_grant(ag_id, vol_id, access, flags)
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ return self.data.access_group_revoke(ag_id, vol_id, flags)
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_access_group(ag_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ sim_ags = self.data.access_groups_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_ag_2_lsm(a) for a in sim_ags]
+
+ @staticmethod
+ def _sim_init_2_lsm(sim_init):
+ return Initiator(sim_init['init_id'], sim_init['init_type'],
+ sim_init['name'])
+
+ def inits(self, flags=0):
+ sim_inits = self.data.inits()
+ return [SimArray._sim_init_2_lsm(a) for a in sim_inits]
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags=0):
+ return self.data.initiator_grant(
+ init_id, init_type, vol_id, access, flags)
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ return self.data.initiator_revoke(init_id, vol_id, flags)
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ sim_vols = self.data.volumes_accessible_by_initiator(init_id, flags)
+ return [SimArray._sim_vol_2_lsm(v) for v in sim_vols]
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ sim_inits = self.data.initiators_granted_to_volume(vol_id, flags)
+ return [SimArray._sim_init_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ return self.data.iscsi_chap_auth(init_id, in_user, in_pass, out_user,
+ out_pass, flags)
+
+
+class SimData(object):
+ """
+ Rules here are:
+ * we don't store one data twice
+ * we don't srore data which could be caculated out
+
+ self.vol_dict = {
+ Volume.id = sim_vol,
+ }
+
+ sim_vol = {
+ 'vol_id': "VOL_ID_%s" % SimData._random_vpd(4),
+ 'vpd83': SimData._random_vpd(),
+ 'name': vol_name,
+ 'total_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': owner_pool_id,
+ 'consume_size': size_bytes,
+ 'replicate': {
+ dst_vol_id = [
+ {
+ 'src_start_blk': src_start_blk,
+ 'dst_start_blk': dst_start_blk,
+ 'blk_count': blk_count,
+ 'rep_type': Volume.REPLICATE_XXXX,
+ },
+ ],
+ },
+ 'mask': {
+ ag_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ },
+ 'mask_init': {
+ init_id = Volume.ACCESS_READ_WRITE|Volume.ACCESS_READ_ONLY,
+ }
+ }
+
+ self.init_dict = {
+ Initiator.id = sim_init,
+ }
+ sim_init = {
+ 'init_id': Initiator.id,
+ 'init_type': Initiator.TYPE_XXXX,
+ 'name': SimData.SIM_DATA_INIT_NAME,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ self.ag_dict ={
+ AccessGroup.id = sim_ag,
+ }
+ sim_ag = {
+ 'init_ids': [init_id,],
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'name': name,
+ 'ag_id': "AG_ID_%s" % SimData._random_vpd(4),
+ }
+
+ self.fs_dict = {
+ FileSystem.id = sim_fs,
+ }
+ sim_fs = {
+ 'fs_id': "FS_ID_%s" % SimData._random_vpd(4),
+ 'name': fs_name,
+ 'total_space': size_bytes,
+ 'free_space': size_bytes,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ 'pool_id': pool_id,
+ 'consume_size': size_bytes,
+ 'clone': {
+ dst_fs_id: {
+ 'snap_id': snap_id, # None if no snapshot
+ 'files': [ file_path, ] # [] if all files cloned.
+ },
+ },
+ 'snaps' = [snap_id, ],
+ }
+ self.snap_dict = {
+ Snapshot.id: sim_snap,
+ }
+ sim_snap = {
+ 'snap_id': "SNAP_ID_%s" % SimData._random_vpd(4),
+ 'name': snap_name,
+ 'fs_id': fs_id,
+ 'files': [file_path, ],
+ 'timestamp': time.time(),
+ }
+ self.exp_dict = {
+ Export.id: sim_exp,
+ }
+ sim_exp = {
+ 'exp_id': "EXP_ID_%s" % SimData._random_vpd(4),
+ 'fs_id': fs_id,
+ 'exp_path': exp_path,
+ 'auth_type': auth_type,
+ 'root_hosts': [root_host, ],
+ 'rw_hosts': [rw_host, ],
+ 'ro_hosts': [ro_host, ],
+ 'anon_uid': anon_uid,
+ 'anon_gid': anon_gid,
+ 'options': [option, ],
+ }
+ """
+ SIM_DATA_BLK_SIZE = 512
+ SIM_DATA_VERSION = "2.0"
+ SIM_DATA_SYS_ID = 'sim-01'
+ SIM_DATA_INIT_NAME = 'NULL'
+ SIM_DATA_TMO = 30000 # ms
+
+ @staticmethod
+ def _state_signature():
+ return 'LSM_SIMULATOR_DATA_%s' % md5(SimData.SIM_DATA_VERSION)
+
+ def __init__(self):
+ self.tmo = SimData.SIM_DATA_TMO
+ self.version = SimData.SIM_DATA_VERSION
+ self.signature = SimData._state_signature()
+ self.job_num = 0
+ self.job_dict = {
+ # id: SimJob
+ }
+ self.syss = [System(SimData.SIM_DATA_SYS_ID,
+ 'LSM simulated storage plug-in',
+ System.STATUS_OK)]
+ pool_size_200g = size_human_2_size_bytes('200GiB')
+ self.pool_dict = {
+ 'POO1': {
+ 'pool_id': 'POO1',
+ 'name': 'Pool 1',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_000', 'DISK_ID_001'],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'POO2': {
+ 'pool_id': 'POO2',
+ 'name': 'Pool 2',
+ 'total_space': pool_size_200g,
+ 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_ids': ['POO1'],
+ 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ # lsm_test_aggr pool is requred by test/runtest.sh
+ 'lsm_test_aggr': {
+ 'pool_id': 'lsm_test_aggr',
+ 'name': 'lsm_test_aggr',
+ 'member_type': Pool.MEMBER_TYPE_DISK,
+ 'member_ids': ['DISK_ID_002', 'DISK_ID_003'],
+ 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.vol_dict = {
+ }
+ self.fs_dict = {
+ }
+ self.snap_dict = {
+ }
+ self.exp_dict = {
+ }
+ disk_size_2t = size_human_2_size_bytes('2TiB')
+ self.disk_dict = {
+ 'DISK_ID_000': {
+ 'disk_id': 'DISK_ID_000',
+ 'name': 'SATA Disk 000',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_001': {
+ 'disk_id': 'DISK_ID_001',
+ 'name': 'SATA Disk 001',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SATA,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_002': {
+ 'disk_id': 'DISK_ID_002',
+ 'name': 'SAS Disk 002',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ 'DISK_ID_003': {
+ 'disk_id': 'DISK_ID_003',
+ 'name': 'SAS Disk 003',
+ 'total_space': disk_size_2t,
+ 'disk_type': Disk.DISK_TYPE_SAS,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ },
+ }
+ self.ag_dict = {
+ }
+ self.init_dict = {
+ }
+ # Create some volumes, fs and etc
+ self.volume_create(
+ 'POO1', 'Volume 000', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+ self.volume_create(
+ 'POO1', 'Volume 001', size_human_2_size_bytes('200GiB'),
+ Volume.PROVISION_DEFAULT)
+
+ self.pool_dict['POO3']= {
+ 'pool_id': 'POO3',
+ 'name': 'Pool 3',
+ 'member_type': Pool.MEMBER_TYPE_VOLUME,
+ 'member_ids': [
+ self.vol_dict.values()[0]['vol_id'],
+ self.vol_dict.values()[1]['vol_id'],
+ ],
+ 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'sys_id': SimData.SIM_DATA_SYS_ID,
+ }
+
+ return
+
+ def _pool_free_space(self, pool_id):
+ """
+ Calculate out the free size of certain pool.
+ """
+ free_space = self._pool_total_space(pool_id)
+ for sim_vol in self.vol_dict.values():
+ if sim_vol['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_vol['consume_size']:
+ return 0
+ free_space -= sim_vol['consume_size']
+ for sim_fs in self.fs_dict.values():
+ if sim_fs['pool_id'] != pool_id:
+ continue
+ if free_space <= sim_fs['consume_size']:
+ return 0
+ free_space -= sim_fs['consume_size']
+ return free_space
+
+ @staticmethod
+ def _random_vpd(l=16):
+ """
+ Generate a random 16 digit number as hex
+ """
+ vpd = []
+ for i in range(0, l):
+ vpd.append(str('%02X' % (random.randint(0, 255))))
+ return "".join(vpd)
+
+ def _pool_total_space(self, pool_id):
+ """
+ Find out the correct size of RAID pool
+ """
+ member_type = self.pool_dict[pool_id]['member_type']
+ if member_type == Pool.MEMBER_TYPE_POOL:
+ return self.pool_dict[pool_id]['total_space']
+
+ all_size = 0
+ item_size = 0 # disk size, used by RAID 3/4/5/6
+ member_ids = self.pool_dict[pool_id]['member_ids']
+ raid_type = self.pool_dict[pool_id]['raid_type']
+ member_count = len(member_ids)
+
+ if member_type == Pool.MEMBER_TYPE_DISK:
+ for member_id in member_ids:
+ all_size += self.disk_dict[member_id]['total_space']
+ item_size = self.disk_dict[member_id]['total_space']
+
+ elif member_type == Pool.MEMBER_TYPE_VOLUME:
+ for member_id in member_ids:
+ all_size += self.vol_dict[member_id]['total_space']
+ item_size = self.vol_dict[member_id]['total_space']
+
+ if raid_type == Pool.RAID_TYPE_JBOD:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID0:
+ return int(all_size)
+ elif raid_type == Pool.RAID_TYPE_RAID1 or \
+ raid_type == Pool.RAID_TYPE_RAID10:
+ return int(all_size/2)
+ elif raid_type == Pool.RAID_TYPE_RAID3 or \
+ raid_type == Pool.RAID_TYPE_RAID4 or \
+ raid_type == Pool.RAID_TYPE_RAID5 or \
+ raid_type == Pool.RAID_TYPE_RAID50:
+ return int(all_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID6 or \
+ raid_type == Pool.RAID_TYPE_RAID60:
+ return int(all_size - item_size - item_size)
+ elif raid_type == Pool.RAID_TYPE_RAID51:
+ return int((all_size - item_size)/2)
+ elif raid_type == Pool.RAID_TYPE_RAID61:
+ return int((all_size - item_size - item_size)/2)
+ return 0
+
+ @staticmethod
+ def _block_rounding(size_bytes):
+ return (size_bytes / SimData.SIM_DATA_BLK_SIZE + 1) * \
+ SimData.SIM_DATA_BLK_SIZE
+
+ def job_create(self, returned_item):
+ if True:
+ #if random.randint(0,5) == 1:
+ self.job_num += 1
+ job_id = "JOB_%s" % self.job_num
+ self.job_dict[job_id] = SimJob(returned_item)
+ return job_id, None
+ else:
+ return None, returned_item
+
+ def job_status(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ return self.job_dict[job_id].progress()
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def job_free(self, job_id, flags=0):
+ if job_id in self.job_dict.keys():
+ del(self.job_dict[job_id])
+ return
+ raise LsmError(ErrorNumber.NOT_FOUND_JOB,
+ 'Non-existent job: %s' % job_id)
+
+ def set_time_out(self, ms, flags=0):
+ self.tmo = ms
+ return None
+
+ def get_time_out(self, flags=0):
+ return self.tmo
+
+ def systems(self):
+ return self.syss
+
+ def pools(self):
+ rc = []
+ for sim_pool in self.pool_dict.values():
+ sim_pool['total_space'] = \
+ self._pool_total_space(sim_pool['pool_id'])
+ sim_pool['free_space'] = \
+ self._pool_free_space(sim_pool['pool_id'])
+ rc.extend([sim_pool])
+ return rc
+
+ def volumes(self):
+ return self.vol_dict.values()
+
+ def disks(self):
+ return self.disk_dict.values()
+
+ def access_group_list(self):
+ return self.ag_dict.values()
+
+ def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['thinp'] = thinp
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+ return sim_vol
+
+ def volume_delete(self, vol_id, flags=0):
+ if vol_id in self.vol_dict.keys():
+ del(self.vol_dict[vol_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_resize(self, vol_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if vol_id in self.vol_dict.keys():
+ pool_id = self.vol_dict[vol_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.vol_dict[vol_id]['total_space'] = new_size_bytes
+ self.vol_dict[vol_id]['consume_size'] = new_size_bytes
+ return self.vol_dict[vol_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+
+ def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such volume: %s" % vol_id)
+ size_bytes = self.vol_dict[src_vol_id]['total_space']
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(dst_pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_vol = dict()
+ vol_id = "VOL_ID_%s" % SimData._random_vpd(4)
+ sim_vol['vol_id'] = vol_id
+ sim_vol['vpd83'] = SimData._random_vpd()
+ sim_vol['name'] = new_vol_name
+ sim_vol['total_space'] = size_bytes
+ sim_vol['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_vol['pool_id'] = dst_pool_id
+ sim_vol['consume_size'] = size_bytes
+ self.vol_dict[vol_id] = sim_vol
+
+ dst_vol_id = vol_id
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ sim_rep = {
+ 'rep_type': rep_type,
+ 'src_start_blk': 0,
+ 'dst_start_blk': 0,
+ 'blk_count': size_bytes,
+ }
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_rep])
+
+ return sim_vol
+
+ def volume_replicate_range_block_size(self, sys_id, flags=0):
+ return SimData.SIM_DATA_BLK_SIZE
+
+ def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges,
+ flags=0):
+ if src_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % src_vol_id)
+
+ if dst_vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % dst_vol_id)
+
+ sim_reps = []
+ for rep_range in ranges:
+ sim_rep = dict()
+ sim_rep['rep_type'] = rep_type
+ sim_rep['src_start_blk'] = rep_range.src_block
+ sim_rep['dst_start_blk'] = rep_range.dest_block
+ sim_rep['blk_count'] = rep_range.block_count
+ sim_reps.extend([sim_rep])
+
+ if 'replicate' not in self.vol_dict[src_vol_id].keys():
+ self.vol_dict[src_vol_id]['replicate'] = dict()
+
+ if dst_vol_id not in self.vol_dict[src_vol_id]['replicate'].keys():
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id] = list()
+
+ self.vol_dict[src_vol_id]['replicate'][dst_vol_id].extend(
+ [sim_reps])
+
+ return None
+
+ def volume_online(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_offline(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ # TODO: Volume.STATUS_XXX does have indication about volume offline
+ # or online, meanwhile, cmdline does not support volume_online()
+ # yet
+ return None
+
+ def volume_child_dependency(self, vol_id, flags=0):
+ """
+ If volume is a src or dst of a replication, we return True.
+ """
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ return True
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ return True
+ return False
+
+ def volume_child_dependency_rm(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'replicate' in self.vol_dict[vol_id].keys() and \
+ self.vol_dict[vol_id]['replicate']:
+ del self.vol_dict[vol_id]['replicate']
+
+ for sim_vol in self.vol_dict.values():
+ if 'replicate' in sim_vol.keys():
+ if vol_id in sim_vol['replicate'].keys():
+ del sim_vol['replicate'][vol_id]
+ return None
+
+ def ags(self, flags=0):
+ return self.ag_dict.values()
+
+ def access_group_create(self, name, init_id, init_type, sys_id, flags=0):
+ sim_ag = dict()
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+
+ sim_ag['init_ids'] = [init_id]
+ sim_ag['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_ag['name'] = name
+ sim_ag['ag_id'] = "AG_ID_%s" % SimData._random_vpd(4)
+ self.ag_dict[sim_ag['ag_id']] = sim_ag
+ return sim_ag
+
+ def access_group_del(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ del(self.ag_dict[ag_id])
+ return None
+
+ def access_group_add_initiator(self, ag_id, init_id, init_type, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found")
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ return self.ag_dict[ag_id]
+
+ self.ag_dict[ag_id]['init_ids'].extend([init_id])
+
+ return None
+
+ def access_group_del_initiator(self, ag_id, init_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if init_id not in self.init_dict.keys():
+ return None
+
+ if init_id in self.ag_dict[ag_id]['init_ids']:
+ new_init_ids = []
+ for cur_init_id in self.ag_dict[ag_id]['init_ids']:
+ if cur_init_id != init_id:
+ new_init_ids.extend([cur_init_id])
+ del(self.ag_dict[ag_id]['init_ids'])
+ self.ag_dict[ag_id]['init_ids'] = new_init_ids
+ return None
+
+ def access_group_grant(self, ag_id, vol_id, access, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask'] = dict()
+
+ self.vol_dict[vol_id]['mask'][ag_id] = access
+ return None
+
+ def access_group_revoke(self, ag_id, vol_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if 'mask' not in self.vol_dict[vol_id].keys():
+ return None
+
+ if ag_id not in self.vol_dict[vol_id]['mask'].keys():
+ return None
+
+ del(self.vol_dict[vol_id]['mask'][ag_id])
+ return None
+
+ def volumes_accessible_by_access_group(self, ag_id, flags=0):
+ if ag_id not in self.ag_dict.keys():
+ raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
+ "Access group not found: %s" % ag_id)
+ rc = []
+ for sim_vol in self.vol_dict.values():
+ if 'mask' not in sim_vol:
+ continue
+ if ag_id in sim_vol['mask'].keys():
+ rc.extend([sim_vol])
+ return rc
+
+ def access_groups_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ sim_ags = []
+ if 'mask' in self.vol_dict[vol_id].keys():
+ ag_ids = self.vol_dict[vol_id]['mask'].keys()
+ for ag_id in ag_ids:
+ sim_ags.extend([self.ag_dict[ag_id]])
+ return sim_ags
+
+ def inits(self, flags=0):
+ return self.init_dict.values()
+
+ def initiator_grant(self, init_id, init_type, vol_id, access, flags):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ sim_init = dict()
+ sim_init['init_id'] = init_id
+ sim_init['init_type'] = init_type
+ sim_init['name'] = SimData.SIM_DATA_INIT_NAME
+ sim_init['sys_id'] = SimData.SIM_DATA_SYS_ID
+ self.init_dict[init_id] = sim_init
+ if 'mask_init' not in self.vol_dict[vol_id].keys():
+ self.vol_dict[vol_id]['mask_init'] = dict()
+
+ self.vol_dict[vol_id]['mask_init'][init_id] = access
+ return None
+
+ def initiator_revoke(self, init_id, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ if init_id in self.vol_dict[vol_id]['mask_init'].keys():
+ del self.vol_dict[vol_id]['mask_init'][init_id]
+
+ return None
+
+ def _ag_ids_of_init(self, init_id):
+ """
+ Find out the access groups defined initiator belong to.
+ Will return a list of access group id or []
+ """
+ rc = []
+ for sim_ag in self.ag_dict.values():
+ if init_id in sim_ag['init_ids']:
+ rc.extend([sim_ag['ag_id']])
+ return rc
+
+ def volumes_accessible_by_initiator(self, init_id, flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ rc_dedup_dict = dict()
+ ag_ids = self._ag_ids_of_init(init_id)
+ for ag_id in ag_ids:
+ sim_vols = self.volumes_accessible_by_access_group(ag_id)
+ for sim_vol in sim_vols:
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+
+ for sim_vol in self.vol_dict.values():
+ if 'mask_init' in sim_vol:
+ if init_id in sim_vol['mask_init'].keys():
+ rc_dedup_dict[sim_vol['vol_id']] = sim_vol
+ return rc_dedup_dict.values()
+
+ def initiators_granted_to_volume(self, vol_id, flags=0):
+ if vol_id not in self.vol_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such Volume: %s" % vol_id)
+ rc_dedup_dict = dict()
+ sim_ags = self.access_groups_granted_to_volume(vol_id, flags)
+ for sim_ag in sim_ags:
+ for init_id in sim_ag['init_ids']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ if 'mask_init' in self.vol_dict[vol_id].keys():
+ for init_id in self.vol_dict[vol_id]['mask_init']:
+ rc_dedup_dict[init_id] = self.init_dict[init_id]
+
+ return rc_dedup_dict.values()
+
+ def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass,
+ flags=0):
+ if init_id not in self.init_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such Initiator: %s" % init_id)
+ if self.init_dict[init_id]['init_type'] != Initiator.TYPE_ISCSI:
+ raise LsmError(ErrorNumber.UNSUPPORTED_INITIATOR_TYPE,
+ "Initiator %s is not an iSCSI IQN" % init_id)
+ # No iscsi chap query API yet
+ return None
+
+ def fs(self):
+ return self.fs_dict.values()
+
+ def fs_create(self, pool_id, fs_name, size_bytes, flags=0):
+ size_bytes = SimData._block_rounding(size_bytes)
+ # check free size
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+ sim_fs = dict()
+ fs_id = "FS_ID_%s" % SimData._random_vpd(4)
+ sim_fs['fs_id'] = fs_id
+ sim_fs['name'] = fs_name
+ sim_fs['total_space'] = size_bytes
+ sim_fs['free_space'] = size_bytes
+ sim_fs['sys_id'] = SimData.SIM_DATA_SYS_ID
+ sim_fs['pool_id'] = pool_id
+ sim_fs['consume_size'] = size_bytes
+ self.fs_dict[fs_id] = sim_fs
+ return sim_fs
+
+ def fs_delete(self, fs_id, flags=0):
+ if fs_id in self.fs_dict.keys():
+ del(self.fs_dict[fs_id])
+ return
+ raise LsmError(ErrorNumber.INVALID_FS,
+ "No such File System: %s" % fs_id)
+
+ def fs_resize(self, fs_id, new_size_bytes, flags=0):
+ new_size_bytes = SimData._block_rounding(new_size_bytes)
+ if fs_id in self.fs_dict.keys():
+ pool_id = self.fs_dict[fs_id]['pool_id']
+ free_space = self._pool_free_space(pool_id)
+ if (free_space < new_size_bytes):
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Insufficient space in pool")
+
+ self.fs_dict[fs_id]['total_space'] = new_size_bytes
+ self.fs_dict[fs_id]['free_space'] = new_size_bytes
+ self.fs_dict[fs_id]['consume_size'] = new_size_bytes
+ return self.fs_dict[fs_id]
+ raise LsmError(ErrorNumber.INVALID_VOLUME,
+ "No such File System: %s" % fs_id)
+
+ def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0):
+ if src_fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % src_fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ src_sim_fs = self.fs_dict[src_fs_id]
+ dst_sim_fs = self.fs_create(
+ src_sim_fs['pool_id'], dst_fs_name, src_sim_fs['total_space'], 0)
+ if 'clone' not in src_sim_fs.keys():
+ src_sim_fs['clone'] = dict()
+ src_sim_fs['clone'][dst_sim_fs['fs_id']] = {
+ 'snap_id': snap_id,
+ }
+ return dst_sim_fs
+
+ def file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id and snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # TODO: No file clone query API yet, no need to do anything internally
+ return None
+
+ def fs_snapshots(self, fs_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ rc = []
+ if 'snaps' in self.fs_dict[fs_id].keys():
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ rc.extend([self.snap_dict[snap_id]])
+ return rc
+
+ def fs_snapshot_create(self, fs_id, snap_name, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ self.fs_dict[fs_id]['snaps'] = []
+
+ snap_id = "SNAP_ID_%s" % SimData._random_vpd(4)
+ sim_snap = dict()
+ sim_snap['snap_id'] = snap_id
+ sim_snap['name'] = snap_name
+ if files is None:
+ sim_snap['files'] = []
+ else:
+ sim_snap['files'] = files
+ sim_snap['timestamp'] = time.time()
+ self.snap_dict[snap_id] = sim_snap
+ self.fs_dict[fs_id]['snaps'].extend([snap_id])
+ return sim_snap
+
+ def fs_snapshot_delete(self, fs_id, snap_id, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ del self.snap_dict[snap_id]
+ new_snap_ids = []
+ for old_snap_id in self.fs_dict[fs_id]['snaps']:
+ if old_snap_id != snap_id:
+ new_snap_ids.extend([old_snap_id])
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def fs_snapshot_revert(self, fs_id, snap_id, files, restore_files,
+ flag_all_files, flags):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if snap_id not in self.snap_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_SS,
+ "No such Snapshot: %s" % snap_id)
+ # Nothing need to done internally for revert.
+ return None
+
+ def fs_child_dependency(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return False
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ return True
+ else:
+ for req_file in files:
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # We are snapshoting all files
+ return True
+ if req_file in self.snap_dict[snap_id]['files']:
+ return True
+ return False
+
+ def fs_child_dependency_rm(self, fs_id, files, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ if 'snaps' not in self.fs_dict[fs_id].keys():
+ return None
+ if files is None or len(files) == 0:
+ if len(self.fs_dict[fs_id]['snaps']) >= 0:
+ snap_ids = self.fs_dict[fs_id]['snaps']
+ for snap_id in snap_ids:
+ del self.snap_dict[snap_id]
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ for req_file in files:
+ snap_ids_to_rm = []
+ for snap_id in self.fs_dict[fs_id]['snaps']:
+ if len(self.snap_dict[snap_id]['files']) == 0:
+ # BUG: if certain snapshot is againsting all files,
+ # what should we do if user request remove
+ # dependency on certain files.
+ # Currently, we do nothing
+ return None
+ if req_file in self.snap_dict[snap_id]['files']:
+ new_files = []
+ for old_file in self.snap_dict[snap_id]['files']:
+ if old_file != req_file:
+ new_files.extend([old_file])
+ if len(new_files) == 0:
+ # all files has been removed from snapshot list.
+ snap_ids_to_rm.extend([snap_id])
+ else:
+ self.snap_dict[snap_id]['files'] = new_files
+ for snap_id in snap_ids_to_rm:
+ del self.snap_dict[snap_id]
+
+ new_snap_ids = []
+ for cur_snap_id in self.fs_dict[fs_id]['snaps']:
+ if cur_snap_id not in snap_ids_to_rm:
+ new_snap_ids.extend([cur_snap_id])
+ if len(new_snap_ids) == 0:
+ del self.fs_dict[fs_id]['snaps']
+ else:
+ self.fs_dict[fs_id]['snaps'] = new_snap_ids
+ return None
+
+ def exports(self, flags=0):
+ return self.exp_dict.values()
+
+ def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts,
+ anon_uid, anon_gid, auth_type, options, flags=0):
+ if fs_id not in self.fs_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_INIT,
+ "No such File System: %s" % fs_id)
+ sim_exp = dict()
+ sim_exp['exp_id'] = "EXP_ID_%s" % SimData._random_vpd(4)
+ sim_exp['fs_id'] = fs_id
+ if exp_path is None:
+ sim_exp['exp_path'] = "/%s" % sim_exp['exp_id']
+ else:
+ sim_exp['exp_path'] = exp_path
+ sim_exp['auth_type'] = auth_type
+ sim_exp['root_hosts'] = root_hosts
+ sim_exp['rw_hosts'] = rw_hosts
+ sim_exp['ro_hosts'] = ro_hosts
+ sim_exp['anon_uid'] = anon_uid
+ sim_exp['anon_gid'] = anon_gid
+ sim_exp['options'] = options
+ self.exp_dict[sim_exp['exp_id']] = sim_exp
+ return sim_exp
+
+ def fs_unexport(self, exp_id, flags=0):
+ if exp_id not in self.exp_dict.keys():
+ raise LsmError(ErrorNumber.INVALID_NFS,
+ "No such NFS Export: %s" % exp_id)
+ del self.exp_dict[exp_id]
+ return None
+
+ def pool_create(self,
+ system_id,
+ pool_name='',
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ member_type=Pool.MEMBER_TYPE_UNKNOWN,
+ member_ids=None,
+ member_count=0,
+ size_bytes=0,
+ thinp_type=Pool.THINP_TYPE_UNKNOWN,
+ flags=0):
+ if pool_name == '':
+ pool_name = 'POOL %s' % SimData._random_vpd(4)
+
+ ## Coding
+ return
diff --git a/lsm/lsm/simulator.py b/lsm/lsm/simulator.py
index ca9d3c4..9a7a75c 100644
--- a/lsm/lsm/simulator.py
+++ b/lsm/lsm/simulator.py
@@ -26,270 +26,17 @@ from data import Pool, Initiator, Volume, BlockRange, System, AccessGroup, \
Snapshot, NfsExport, FileSystem, Capabilities, Disk, OptionalData
from iplugin import INfs, IStorageAreaNetwork
from version import VERSION
+from simarray import SimArray, SimJob
-SIM_DATA_FILE = os.getenv("LSM_SIM_DATA",
- tempfile.gettempdir() + '/lsm_sim_data')
-duration = os.getenv("LSM_SIM_TIME", 1)
-
-# Bump this when the sim data layout changes on disk
-SIM_DATA_VERSION = 1
-
-
-class SimJob(object):
- """
- Simulates a longer running job, uses actual wall time. If test cases
- take too long we can reduce time by shortening time duration.
- """
-
- def __calc_progress(self):
- if self.percent < 100:
- end = self.start + self.duration
- now = time.time()
- if now >= end:
- self.percent = 100
- self.status = JobStatus.COMPLETE
- else:
- diff = now - self.start
- self.percent = int(100 * (diff / self.duration))
-
- def __init__(self, item_to_return):
- self.status = JobStatus.INPROGRESS
- self.percent = 0
- self.__item = item_to_return
- self.start = time.time()
- self.duration = float(random.randint(0, int(duration)))
-
- def progress(self):
- """
- Returns a tuple (status, percent, volume)
- """
- self.__calc_progress()
- return self.status, self.percent, self.item
-
- @property
- def item(self):
- if self.percent >= 100:
- return self.__item
- return None
-
- @item.setter
- def item(self, value):
- self.__item = value
-
-
-def _signature(obj):
- """
- Generate some kind of signature for this object, not sure this is ideal.
-
- Hopefully this will save some debug time.
- """
- sig = ''
- keys = obj.__dict__.keys()
- keys.sort()
-
- for k in keys:
- sig = md5(sig + k)
- return sig
-
-
-def _state_signature():
- rc = ''
- objects = [Pool('', '', 0, 0, ''), Volume('', '', '', 1, 1, 0, '', ''),
- AccessGroup('', '', ['']), Initiator('', 0, ''),
- System('', '', 0), FileSystem('', '', 0, 0, '', ''),
- BlockRange(0, 100, 50), Capabilities(),
- NfsExport('', '', '', '', '', '', '', '', '', '', ),
- Snapshot('', '', 10)]
-
- for o in objects:
- rc = md5(rc + _signature(o))
-
- return rc
-
-
-class SimState(object):
- def __init__(self):
- self.version = SIM_DATA_VERSION
- self.sys_info = System('sim-01', 'LSM simulated storage plug-in',
- System.STATUS_OK)
- p1 = Pool('POO1', 'Pool 1', 2 ** 64, 2 ** 64, self.sys_info.id)
- p2 = Pool('POO2', 'Pool 2', 2 ** 64, 2 ** 64, self.sys_info.id)
- p3 = Pool('POO3', 'Pool 3', 2 ** 64, 2 ** 64, self.sys_info.id)
- p4 = Pool('POO4', 'lsm_test_aggr', 2 ** 64, 2 ** 64, self.sys_info.id)
-
- self.block_size = 512
-
- pm1 = {'pool': p1, 'volumes': {}}
- pm2 = {'pool': p2, 'volumes': {}}
- pm3 = {'pool': p3, 'volumes': {}}
- pm4 = {'pool': p4, 'volumes': {}}
-
- self.pools = {p1.id: pm1, p2.id: pm2, p3.id: pm3, p4.id: pm4}
- self.volumes = {}
- self.vol_num = 1
- self.access_groups = {}
-
- self.fs = {}
- self.fs_num = 1
-
- self.tmo = 30000
- self.jobs = {}
- self.job_num = 1
-
- #These express relationships between initiators and volumes. This
- #is done because if you delete either part of the relationship
- #you need to delete the association between them. Holding this stuff
- #in a db would be easier :-)
- self.group_grants = {} # {access group id : {volume id: access }}
-
- #Create a signature
- self.signature = _state_signature()
-
-
-class StorageSimulator(INfs, IStorageAreaNetwork):
+class SimPlugin(INfs, IStorageAreaNetwork):
"""
Simple class that implements enough to allow the framework to be exercised.
"""
-
- @staticmethod
- def __random_vpd(l=16):
- """
- Generate a random 16 digit number as hex
- """
- vpd = []
- for i in range(0, l):
- vpd.append(str('%02X' % (random.randint(0, 255))))
- return "".join(vpd)
-
- def __block_rounding(self, size_bytes):
- """
- Round the requested size to block size.
- """
- return (size_bytes / self.s.block_size) * self.s.block_size
-
- def __create_job(self, returned_item):
- if True:
- #if random.randint(0,5) == 1:
- self.s.job_num += 1
- job = "JOB_" + str(self.s.job_num)
- self.s.jobs[job] = SimJob(returned_item)
- return job, None
- else:
- return None, returned_item
-
- def _version_error(self):
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Stored simulator state incompatible with "
- "simulator, please move or delete %s" %
- self.file)
-
- def _load(self):
- tmp = None
- if os.path.exists(self.file):
- with open(self.file, 'rb') as f:
- tmp = pickle.load(f)
-
- # Going forward we could get smarter about handling this for
- # changes that aren't invasive, but we at least need to check
- # to make sure that the data will work and not cause any
- # undo confusion.
- try:
- if tmp.version != SIM_DATA_VERSION or \
- tmp.signature != _state_signature():
- self._version_error()
- except AttributeError:
- self._version_error()
-
- return tmp
-
- def _save(self):
- f = open(self.file, 'wb')
- pickle.dump(self.s, f)
- f.close()
-
- #If we run via the daemon the file will be owned by libstoragemgmt
- #and if we run sim_lsmplugin stand alone we will be unable to
- #change the permissions.
- try:
- os.chmod(self.file, 0666)
- except OSError:
- pass
-
- def _load_state(self):
- prev = self._load()
- if prev:
- return prev
- return SimState()
-
- @staticmethod
- def _check_sl(string_list):
- """
- String list should be an empty list or a list with items
- """
- if string_list is not None and isinstance(string_list, list):
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_SL, 'Invalid string list')
-
def __init__(self):
-
- self.file = SIM_DATA_FILE
- self.s = self._load_state()
self.uri = None
self.password = None
self.tmo = 0
- def _allocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
-
- rounded_size = self.__block_rounding(size_bytes)
-
- if p.free_space >= rounded_size:
- p.free_space -= rounded_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- return rounded_size
-
- def _deallocate_from_pool(self, pool_id, size_bytes):
- p = self.s.pools[pool_id]['pool']
- p.free_space += size_bytes
-
- @staticmethod
- def _ag_id(name):
- return md5(name)
-
- def _new_access_group(self, name, h):
- return AccessGroup(StorageSimulator._ag_id(name), name,
- [i.id for i in h['initiators']], self.s.sys_info.id)
-
- def _create_vol(self, pool, name, size_bytes):
- actual_size = self._allocate_from_pool(pool.id, size_bytes)
-
- nv = Volume('Vol' + str(self.s.vol_num), name,
- StorageSimulator.__random_vpd(), self.s.block_size,
- (actual_size / self.s.block_size), Volume.STATUS_OK,
- self.s.sys_info.id,
- pool.id)
- self.s.volumes[nv.id] = {'pool': pool, 'volume': nv}
- self.s.vol_num += 1
- return self.__create_job(nv)
-
- def _create_fs(self, pool, name, size_bytes):
- if pool.id in self.s.pools:
- p = self.s.pools[pool.id]['pool']
- actual_size = self._allocate_from_pool(p.id, size_bytes)
-
- new_fs = FileSystem('FS' + str(self.s.fs_num), name, actual_size,
- actual_size, p.id, self.s.sys_info.id)
-
- self.s.fs[new_fs.id] = {'pool': p, 'fs': new_fs, 'ss': {},
- 'exports': {}}
- self.s.fs_num += 1
- return self.__create_job(new_fs)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Pool not found')
-
def startup(self, uri, password, timeout, flags=0):
self.uri = uri
self.password = password
@@ -299,17 +46,38 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
qp = uri_parse(uri)
if 'parameters' in qp and 'statefile' in qp['parameters'] \
and qp['parameters']['statefile'] is not None:
- self.file = qp['parameters']['statefile']
- self._load_state()
+ self.sim_array = SimArray(qp['parameters']['statefile'])
+ else:
+ self.sim_array = SimArray()
return None
+ def shutdown(self, flags=0):
+ self.sim_array.save_state()
+
+ def job_status(self, job_id, flags=0):
+ return self.sim_array.job_status(job_id, flags)
+
+ def job_free(self, job_id, flags=0):
+ return self.sim_array.job_free(job_id, flags)
+
+ @staticmethod
+ def _sim_data_2_lsm(sim_data):
+ """
+ Fake converter. SimArray already do SimData to LSM data convert.
+ We move data convert to SimArray to make this sample plugin looks
+ clean.
+ But in real world, data converting is often handled by plugin itself
+ rather than array.
+ """
+ return sim_data
+
def set_time_out(self, ms, flags=0):
- self.tmo = ms
+ self.sim_array.set_time_out(ms, flags)
return None
def get_time_out(self, flags=0):
- return self.tmo
+ return self.sim_array.get_time_out(flags)
def capabilities(self, system, flags=0):
rc = Capabilities()
@@ -319,544 +87,204 @@ class StorageSimulator(INfs, IStorageAreaNetwork):
def plugin_info(self, flags=0):
return "Storage simulator", VERSION
- def shutdown(self, flags=0):
- self._save()
-
def systems(self, flags=0):
- return [self.s.sys_info]
-
- def job_status(self, job_id, flags=0):
- if job_id in self.s.jobs:
- return self.s.jobs[job_id].progress()
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def job_free(self, job_id, flags=0):
- if job_id in self.s.jobs:
- del self.s.jobs[job_id]
- return None
- raise LsmError(ErrorNumber.NOT_FOUND_JOB, 'Non-existent job')
-
- def volumes(self, flags=0):
- return [e['volume'] for e in self.s.volumes.itervalues()]
-
- def _get_volume(self, volume_id):
- for v in self.s.volumes.itervalues():
- if v['volume'].id == volume_id:
- return v['volume']
- return None
+ sim_syss = self.sim_array.systems()
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_syss]
def pools(self, flags=0):
- return [e['pool'] for e in self.s.pools.itervalues()]
-
- def _volume_accessible(self, access_group_id, volume):
+ sim_pools = self.sim_array.pools()
+ return [SimPlugin._sim_data_2_lsm(p) for p in sim_pools]
- if access_group_id in self.s.group_grants:
- ag = self.s.group_grants[access_group_id]
-
- if volume.id in ag:
- return True
-
- return False
-
- def _initiators(self, volume_filter=None):
- rc = []
- if len(self.s.access_groups):
- for k, v in self.s.access_groups.items():
- if volume_filter:
- ag = self._new_access_group(k, v)
- if self._volume_accessible(ag.id, volume_filter):
- rc.extend(v['initiators'])
- else:
- rc.extend(v['initiators'])
-
- #We can have multiples as the same initiator can be in multiple access
- #groups
- remove_dupes = {}
- for x in rc:
- remove_dupes[x.id] = x
-
- return list(remove_dupes.values())
+ def volumes(self, flags=0):
+ sim_vols = self.sim_array.volumes()
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- def initiators(self, flags=0):
- return self._initiators()
+ def disks(self, flags=0):
+ sim_disks = self.sim_array.disks()
+ return [SimPlugin._sim_data_2_lsm(d) for d in sim_disks]
def volume_create(self, pool, volume_name, size_bytes, provisioning,
flags=0):
- assert provisioning is not None
- return self._create_vol(pool, volume_name, size_bytes)
+ sim_vol = self.sim_array.volume_create(
+ pool.id, volume_name, size_bytes, provisioning, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)
def volume_delete(self, volume, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
- self._deallocate_from_pool(p.id, v.size_bytes)
- del self.s.volumes[volume.id]
-
- for (k, v) in self.s.group_grants.items():
- if volume.id in v:
- del self.s.group_grants[k][volume.id]
-
- #We only return null or job id.
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
+ return self.sim_array.volume_delete(volume.id, flags)
- def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
- assert rep_type is not None
+ def volume_resize(self, volume, new_size_bytes, flags=0):
+ sim_vol = self.sim_array.volume_resize(
+ volume.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_vol)
- p_id = None
+ def volume_replicate(self, pool, rep_type, volume_src, name, flags=0):
+ dst_pool_id = None
if pool is not None:
- p_id = pool.id
- else:
- p_id = volume_src.pool_id
-
- if p_id in self.s.pools and volume_src.id in self.s.volumes:
- p = self.s.pools[p_id]['pool']
- v = self.s.volumes[volume_src.id]['volume']
-
- return self._create_vol(p, name, v.size_bytes)
+ dst_pool_id = pool.id
else:
- if pool.id not in self.s.pools:
- raise LsmError(ErrorNumber.NOT_FOUND_POOL, 'Incorrect pool')
-
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- 'Volume not present')
- return None
+ dst_pool_id = volume_src.pool_id
+ return self.sim_array.volume_replicate(
+ dst_pool_id, rep_type, volume_src.id, name, flags)
def volume_replicate_range_block_size(self, system, flags=0):
- return self.s.block_size
+ return self.sim_array.volume_replicate_range_block_size(
+ system.id, flags)
def volume_replicate_range(self, rep_type, volume_src, volume_dest,
ranges, flags=0):
-
- if rep_type not in (Volume.REPLICATE_SNAPSHOT,
- Volume.REPLICATE_CLONE,
- Volume.REPLICATE_COPY,
- Volume.REPLICATE_MIRROR_ASYNC,
- Volume.REPLICATE_MIRROR_SYNC):
- raise LsmError(ErrorNumber.UNSUPPORTED_REPLICATION_TYPE,
- "Rep_type invalid")
-
- if ranges:
- if isinstance(ranges, list):
- for r in ranges:
- if isinstance(r, BlockRange):
- #We could do some overlap range testing etc. here.
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Range element not BlockRange")
-
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Ranges not a list")
-
- #Make sure all the arguments are validated
- if volume_src.id in self.s.volumes \
- and volume_dest.id in self.s.volumes:
- return None
- else:
- if volume_src.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_src not found")
- if volume_dest.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "volume_dest not found")
+ return self.sim_array.volume_replicate_range(
+ rep_type, volume_src.id, volume_dest.id, ranges, flags)
def volume_online(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
+ return self.sim_array.volume_online(volume.id, flags)
def volume_offline(self, volume, flags=0):
- if volume.id in self.s.volumes:
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not present')
-
- def volume_resize(self, volume, new_size_bytes, flags=0):
- if volume.id in self.s.volumes:
- v = self.s.volumes[volume.id]['volume']
- p = self.s.volumes[volume.id]['pool']
-
- current_size = v.size_bytes
- new_size = self.__block_rounding(new_size_bytes)
-
- if new_size == current_size:
- raise LsmError(ErrorNumber.SIZE_SAME,
- 'Volume same size')
-
- if new_size < current_size \
- or p.free_space >= (new_size - current_size):
- p.free_space -= (new_size - current_size)
- v.num_of_blocks = new_size / self.s.block_size
- else:
- raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
- 'Insufficient space in pool')
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- return self.__create_job(v)
-
- def access_group_grant(self, group, volume, access, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id not in self.s.group_grants:
- self.s.group_grants[group.id] = {volume.id: access}
- elif volume.id not in self.s.group_grants[group.id]:
- self.s.group_grants[group.id][volume.id] = access
- else:
- raise LsmError(ErrorNumber.IS_MAPPED, 'Existing access present')
-
- def access_group_revoke(self, group, volume, flags=0):
- if group.name not in self.s.access_groups:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not present")
-
- if volume.id not in self.s.volumes:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME, 'Volume not found')
-
- if group.id in self.s.group_grants \
- and volume.id in self.s.group_grants[group.id]:
- del self.s.group_grants[group.id][volume.id]
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- 'No volume access to revoke')
+ return self.sim_array.volume_online(volume.id, flags)
def access_group_list(self, flags=0):
- rc = []
- for (k, v) in self.s.access_groups.items():
- rc.append(self._new_access_group(k, v))
- return rc
-
- def _get_access_group(self, ag_id):
- groups = self.access_group_list()
- for g in groups:
- if g.id == ag_id:
- return g
- return None
+ sim_ags = self.sim_array.ags()
+ return [SimPlugin._sim_data_2_lsm(a) for a in sim_ags]
def access_group_create(self, name, initiator_id, id_type, system_id,
flags=0):
- if name not in self.s.access_groups:
- self.s.access_groups[name] = \
- {'initiators': [Initiator(initiator_id, id_type, 'UNA')],
- 'access': {}}
- return self._new_access_group(name, self.s.access_groups[name])
- else:
- raise LsmError(ErrorNumber.EXISTS_ACCESS_GROUP,
- "Access group with name exists")
+ sim_ag = self.sim_array.access_group_create(name, initiator_id,
+ id_type, system_id, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)
def access_group_del(self, group, flags=0):
- if group.name in self.s.access_groups:
- del self.s.access_groups[group.name]
-
- if group.id in self.s.group_grants:
- del self.s.group_grants[group.id]
-
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del(group.id, flags)
def access_group_add_initiator(self, group, initiator_id, id_type,
flags=0):
- if group.name in self.s.access_groups:
- self.s.access_groups[group.name]['initiators']. \
- append(Initiator(initiator_id, id_type, 'UNA'))
- return None
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ sim_ag = self.sim_array.access_group_add_initiator(
+ group.id, initiator_id, id_type, flags)
+ return SimPlugin._sim_data_2_lsm(sim_ag)
def access_group_del_initiator(self, group, initiator_id, flags=0):
- if group.name in self.s.access_groups:
- for i in self.s.access_groups[group.name]['initiators']:
- if i.id == initiator_id:
- self.s.access_groups[group.name]['initiators']. \
- remove(i)
- return None
-
- raise LsmError(ErrorNumber.INITIATOR_NOT_IN_ACCESS_GROUP,
- "Initiator not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ return self.sim_array.access_group_del_initiator(
+ group.id, initiator_id, flags)
- def volumes_accessible_by_access_group(self, group, flags=0):
- rc = []
- if group.name in self.s.access_groups:
- if group.id in self.s.group_grants:
- for (k, v) in self.s.group_grants[group.id].items():
- rc.append(self._get_volume(k))
+ def access_group_grant(self, group, volume, access, flags=0):
+ return self.sim_array.access_group_grant(
+ group.id, volume.id, access, flags)
- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP,
- "Access group not found")
+ def access_group_revoke(self, group, volume, flags=0):
+ return self.sim_array.access_group_revoke(
+ group.id, volume.id, flags)
- def access_groups_granted_to_volume(self, volume, flags=0):
- rc = []
+ def volumes_accessible_by_access_group(self, group, flags=0):
+ sim_vols = self.sim_array.volumes_accessible_by_access_group(
+ group.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- for (k, v) in self.s.group_grants.items():
- if volume.id in self.s.group_grants[k]:
- rc.append(self._get_access_group(k))
- return rc
+ def access_groups_granted_to_volume(self, volume, flags=0):
+ sim_vols = self.sim_array.access_groups_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
- def iscsi_chap_auth(self, initiator, in_user, in_password, out_user,
- out_password, flags=0):
- if initiator is None:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- 'Initiator is required')
+ def initiators(self, flags=0):
+ return self.sim_array.inits(flags)
def initiator_grant(self, initiator_id, initiator_type, volume, access,
flags=0):
- name = initiator_id + volume.id
- group = None
-
- try:
- group = self.access_group_create(name, initiator_id,
- initiator_type,
- volume.system_id)
- result = self.access_group_grant(group, volume, access)
-
- except Exception as e:
- if group:
- self.access_group_del(group)
- raise e
-
- return result
+ return self.sim_array.initiator_grant(
+ initiator_id, initiator_type, volume.id, access, flags)
def initiator_revoke(self, initiator, volume, flags=0):
- name = initiator.id + volume.id
-
- if any(x.id for x in self.initiators()):
- if volume.id in self.s.volumes:
- ag = self._new_access_group(name, self.s.access_groups[name])
-
- if ag:
- self.access_group_del(ag)
- else:
- raise LsmError(ErrorNumber.NO_MAPPING,
- "No mapping of initiator "
- "and volume")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_VOLUME,
- "Volume not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_INITIATOR,
- "Initiator not found")
-
- return None
+ return self.sim_array.initiator_revoke(initiator.id, volume.id, flags)
def volumes_accessible_by_initiator(self, initiator, flags=0):
- rc = []
- volumes = {}
-
- #Go through each access group, for each one see if our initiator
- #is one of them.
- for ag_name, ag_info in self.s.access_groups.items():
- # Check to see if the initiator is in the group.
- if initiator.id in [i.id for i in ag_info['initiators']]:
- # Look up the privileges for this group, if any
- ag_id = StorageSimulator._ag_id(ag_name)
- if ag_id in self.s.group_grants:
- # Loop through the volumes granted to this AG
- for volume_id in self.s.group_grants[ag_id].keys():
- volumes[volume_id] = None
-
- # We very well may have duplicates, thus the reason we enter the
- # volume id into the hash with no value, we are weeding out dupes
- for vol_id in volumes.keys():
- rc.append(self._get_volume(vol_id))
-
- return rc
+ sim_vols = self.sim_array.volumes_accessible_by_initiator(
+ initiator.id, flags)
+ return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols]
def initiators_granted_to_volume(self, volume, flags=0):
- return self._initiators(volume)
+ sim_inits = self.sim_array.initiators_granted_to_volume(
+ volume.id, flags)
+ return [SimPlugin._sim_data_2_lsm(i) for i in sim_inits]
+
+ def iscsi_chap_auth(self, initiator, in_user, in_password,
+ out_user, out_password, flags=0):
+ return self.sim_array.iscsi_chap_auth(
+ initiator.id, in_user, in_password, out_user, out_password, flags)
def volume_child_dependency(self, volume, flags=0):
- return False
+ return self.sim_array.volume_child_dependency(volume.id, flags)
def volume_child_dependency_rm(self, volume, flags=0):
- return None
+ return self.sim_array.volume_child_dependency_rm(volume.id, flags)
def fs(self, flags=0):
- return [e['fs'] for e in self.s.fs.itervalues()]
-
- def fs_delete(self, fs, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- self._deallocate_from_pool(p.id, f.total_space)
- del self.s.fs[fs.id]
+ sim_fss = self.sim_array.fs()
+ return [SimPlugin._sim_data_2_lsm(f) for f in sim_fss]
- #TODO: Check for exports and remove them.
+ def fs_create(self, pool, name, size_bytes, flags=0):
+ sim_fs = self.sim_array.fs_create(pool.id, name, size_bytes)
+ return SimPlugin._sim_data_2_lsm(sim_fs)
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ def fs_delete(self, fs, flags=0):
+ return self.sim_array.fs_delete(fs.id, flags)
def fs_resize(self, fs, new_size_bytes, flags=0):
- if fs.id in self.s.fs:
- f = self.s.fs[fs.id]['fs']
- p = self.s.fs[fs.id]['pool']
-
- #TODO Check to make sure we have enough space before proceeding
- self._deallocate_from_pool(p.id, f.total_space)
- f.total_space = self._allocate_from_pool(p.id, new_size_bytes)
- f.free_space = f.total_space
- return self.__create_job(f)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def fs_create(self, pool, name, size_bytes, flags=0):
- return self._create_fs(pool, name, size_bytes)
+ sim_fs = self.sim_array.fs_resize(
+ fs.id, new_size_bytes, flags)
+ return SimPlugin._sim_data_2_lsm(sim_fs)
def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0):
- #TODO If snapshot is not None, then check for existence.
-
- if src_fs.id in self.s.fs:
- f = self.s.fs[src_fs.id]['fs']
- p = self.s.fs[src_fs.id]['pool']
- return self._create_fs(p, dest_fs_name, f.total_space)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, None, flags)
+ return self.sim_array.fs_clone(
+ src_fs.id, dest_fs_name, snapshot.id, flags)
def file_clone(self, fs, src_file_name, dest_file_name, snapshot=None,
flags=0):
- #TODO If snapshot is not None, then check for existence.
- if fs.id in self.s.fs:
- if src_file_name is not None and dest_file_name is not None:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.INVALID_VALUE,
- "Invalid src/destination file names")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ if snapshot is None:
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, None, flags)
+
+ return self.sim_array.file_clone(
+ fs.id, src_file_name, dest_file_name, snapshot.id, flags)
def fs_snapshots(self, fs, flags=0):
- if fs.id in self.s.fs:
- rc = [e for e in self.s.fs[fs.id]['ss'].itervalues()]
- return rc
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_snaps = self.sim_array.fs_snapshots(fs.id, flags)
+ return [SimPlugin._sim_data_2_lsm(s) for s in sim_snaps]
def fs_snapshot_create(self, fs, snapshot_name, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- for e in self.s.fs[fs.id]['ss'].itervalues():
- if e.name == snapshot_name:
- raise LsmError(ErrorNumber.EXISTS_NAME,
- 'Snapshot name exists')
-
- s = Snapshot(md5(snapshot_name), snapshot_name, time.time())
- self.s.fs[fs.id]['ss'][s.id] = s
- return self.__create_job(s)
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_create(
+ fs.id, snapshot_name, files, flags)
def fs_snapshot_delete(self, fs, snapshot, flags=0):
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- del self.s.fs[fs.id]['ss'][snapshot.id]
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_delete(
+ fs.id, snapshot.id, flags)
def fs_snapshot_revert(self, fs, snapshot, files, restore_files,
all_files=False, flags=0):
-
- StorageSimulator._check_sl(files)
- StorageSimulator._check_sl(files)
-
- if fs.id in self.s.fs:
- if snapshot.id in self.s.fs[fs.id]['ss']:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_SS, "Snapshot not found")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_snapshot_revert(
+ fs.id, snapshot.id, files, restore_files, all_files, flags)
def fs_child_dependency(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return False
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency(fs.id, files, flags)
def fs_child_dependency_rm(self, fs, files, flags=0):
- StorageSimulator._check_sl(files)
- if fs.id in self.s.fs:
- return self.__create_job(None)[0]
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ return self.sim_array.fs_child_dependency_rm(fs.id, files, flags)
def export_auth(self, flags=0):
+ # The API should change some day
return ["simple"]
def exports(self, flags=0):
- rc = []
- for fs in self.s.fs.itervalues():
- for exp in fs['exports'].values():
- rc.append(exp)
- return rc
+ sim_exps = self.sim_array.exports(flags)
+ return [SimPlugin._sim_data_2_lsm(e) for e in sim_exps]
def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list,
anon_uid, anon_gid, auth_type, options, flags=0):
-
- if fs_id in self.s.fs:
- if export_path is None:
- export_path = "/mnt/lsm/sim/%s" % self.s.fs[fs_id]['fs'].name
-
- export_id = md5(export_path)
-
- export = NfsExport(export_id, fs_id, export_path, auth_type,
- root_list, rw_list, ro_list, anon_uid, anon_gid,
- options)
-
- self.s.fs[fs_id]['exports'][export_id] = export
- return export
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
+ sim_exp = self.sim_array.fs_export(
+ fs_id, export_path, root_list, rw_list, ro_list,
+ anon_uid, anon_gid, auth_type, options, flags=0)
+ return SimPlugin._sim_data_2_lsm(sim_exp)
def export_remove(self, export, flags=0):
- fs_id = export.fs_id
-
- if fs_id in self.s.fs:
- if export.id in self.s.fs[fs_id]['exports']:
- del self.s.fs[fs_id]['exports'][export.id]
- else:
- raise LsmError(ErrorNumber.FS_NOT_EXPORTED, "FS not exported")
- else:
- raise LsmError(ErrorNumber.NOT_FOUND_FS, 'Filesystem not found')
-
- def disks(self, flags=0):
+ return self.sim_array.fs_unexport(export.id, flags)
- rc = []
- # TODO Make these persistent and make it fit into the total model
-
- for i in range(0, 10):
- name = "Sim disk %d" % i
- optionals = None
-
- if flags == Disk.RETRIEVE_FULL_INFO:
- optionals = OptionalData()
- optionals.set('sn', self.__random_vpd(8))
-
- rc.append(Disk(md5(name), name, Disk.DISK_TYPE_HYBRID, 512,
- 1893933056, Disk.STATUS_OK,
- self.s.sys_info.id, optionals))
-
- return rc
diff --git a/lsm/sim_lsmplugin b/lsm/sim_lsmplugin
index a4439a5..d2bee09 100755
--- a/lsm/sim_lsmplugin
+++ b/lsm/sim_lsmplugin
@@ -22,10 +22,10 @@ import syslog
try:
from lsm.pluginrunner import PluginRunner
- from lsm.simulator import StorageSimulator
+ from lsm.simulator import SimPlugin
if __name__ == '__main__':
- PluginRunner(StorageSimulator, sys.argv).run()
+ PluginRunner(SimPlugin, sys.argv).run()
except Exception as e:
#This should be quite rare, but when it does happen this is pretty
#key in understanding what happened, especially when it happens when
--
1.8.3.1