Gris Ge
2014-08-11 13:28:09 UTC
Due to complexity of pool creation on different arrays. We would like to
suggest user to create pool using vendor specific tools.
* Removed these methods:
C:
lsm_pool_create()
lsm_pool_create_from_disks()
lsm_pool_create_from()
lsm_pool_create_from_pool()
Python:
pool_create()
pool_create_from_disks()
pool_create_from_pool()
pool_create_from_volumes()
pool_delete()
* Removed unused RAID_TYPE and MEMBER_TYPE.
* Removed related capabilities.
* Removed error number:
C:
LSM_ERR_NOT_FOUND_DISK
LSM_ERR_DISK_BUSY
Python:
ErrorNumber.NOT_FOUND_DISK
ErrorNumber.DISK_BUSY
* Plugins cleaned.
* For 'sim://' plugin, it use raid type and member type to calculate pool
size. We move pool constants into PoolRAID class for future use.
* For 'ontap://' plugin, it's raid_type detection code just be commented out.
We might use it in the future.
* lsmcli cleaned.
* Test updated:
tester.c
plugin_test.py
* "make check" and "make distcheck" passed.
Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 87 ---
.../libstoragemgmt/libstoragemgmt_capabilities.h | 22 -
.../include/libstoragemgmt/libstoragemgmt_error.h | 2 -
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 68 --
.../include/libstoragemgmt/libstoragemgmt_types.h | 38 --
c_binding/lsm_mgmt.cpp | 217 -------
c_binding/lsm_plugin_ipc.cpp | 167 -----
doc/man/lsmcli.1.in | 90 ---
plugin/ontap/ontap.py | 24 +-
plugin/sim/simarray.py | 557 +++--------------
plugin/sim/simulator.py | 20 -
plugin/simc/simc_lsmplugin.c | 142 -----
plugin/smispy/smis.py | 694 ---------------------
python_binding/lsm/_client.py | 98 ---
python_binding/lsm/_common.py | 2 -
python_binding/lsm/_data.py | 169 -----
python_binding/lsm/_iplugin.py | 34 -
test/plugin_test.py | 10 +-
test/tester.c | 210 -------
tools/lsmcli/cmdline.py | 174 ------
tools/lsmcli/data_display.py | 78 ---
21 files changed, 104 insertions(+), 2799 deletions(-)
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 968bfdd..1587f0d 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -237,93 +237,6 @@ extern "C" {
uint32_t *count, lsm_flag flags);
/**
- * Create new pool allowing the array to make the most decisions.
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name Name of new pool
- * @param [in] size_bytes Size of new pool in bytes
- * @param [in] raid_type Optional. If defined, new pool should
- * using defined RAID type. When
- * member_type was set to LSM_POOL_MEMBER_TYPE_POOL,
- * only allowed raid_type is LSM_POOL_RAID_TYPE_UNKNOWN or
- * LSM_POOL_RAID_TYPE_NOT_APPLICABLE
- * @param [in] member_type Optional. If defined, new pool will be assembled
- * by defined member types. For example;
- * when member_type == LSM_POOL_MEMBER_TYPE_DISK_SAS,
- * new pool will be created from SAS disks
- * only.
- * @param [out] pool Newly created pool
- * @param [out] job Job ID of async. operation
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create(lsm_connect *conn,
- lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags);
-
- /**
- * Create a pool specifying specific disks to use.
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name The name of the new pool, will not fail
- * if request name cannot be fulfilled
- * @param [in] disks An array of disk pointers to create new
- * pool from.
- * The new pool could contain more disks
- * than requested due to internal needs,
- * but if possible should only contain
- * requested disks.
- * @param [in] num_disks Number of disks in disks array
- * @param [in] raid_type The RAID type for new pool
- * @param [out] pool Newly created pool
- * @param [out] job Job ID of async. operation
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create_from_disks(lsm_connect *conn,
- lsm_system *system, const char *pool_name,
- lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags);
-
- /**
- * Create new pool from an existing pool
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name The name of the new pool, will not fail
- * if request name cannot be fulfilled
- * @param [in] pool The pool to create new pool from
- * @param [in] size_bytes Desired size of new pool
- * @param [out] created_pool Newly created pool
- * @param [out] job Job ID of async.
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create_from_pool(lsm_connect *conn,
- lsm_system *system, const char *pool_name,
- lsm_pool *pool, uint64_t size_bytes,
- lsm_pool** created_pool, char **job, lsm_flag flags);
-
- /**
- * Deletes a pool
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] pool The pool to delete
- * @param [out] job_id Job id of job if async.
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_delete(lsm_connect *conn, lsm_pool *pool,
- char **job_id, lsm_flag flags);
-
-
- /**
* Volume management functions
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index bc45f7e..818c8b2 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -101,28 +101,6 @@ typedef enum {
LSM_CAP_EXPORT_REMOVE = 123, /**< Remove an export */
LSM_CAP_EXPORT_CUSTOM_PATH = 124, /**< Plug-in allows user to define custome export path */
- LSM_CAP_POOL_CREATE = 130, /**< Pool create support */
- LSM_CAP_POOL_CREATE_FROM_DISKS = 131, /**< Pool create from disks */
- LSM_CAP_POOL_CREATE_FROM_POOL = 133, /**< Pool create from pool */
-
- LSM_CAP_POOL_CREATE_DISK_RAID_0 = 140,
- LSM_CAP_POOL_CREATE_DISK_RAID_1 = 141,
- LSM_CAP_POOL_CREATE_DISK_RAID_JBOD = 142,
- LSM_CAP_POOL_CREATE_DISK_RAID_3 = 143,
- LSM_CAP_POOL_CREATE_DISK_RAID_4 = 144,
- LSM_CAP_POOL_CREATE_DISK_RAID_5 = 145,
- LSM_CAP_POOL_CREATE_DISK_RAID_6 = 146,
- LSM_CAP_POOL_CREATE_DISK_RAID_10 = 147,
- LSM_CAP_POOL_CREATE_DISK_RAID_50 = 148,
- LSM_CAP_POOL_CREATE_DISK_RAID_51 = 149,
- LSM_CAP_POOL_CREATE_DISK_RAID_60 = 150,
- LSM_CAP_POOL_CREATE_DISK_RAID_61 = 151,
- LSM_CAP_POOL_CREATE_DISK_RAID_15 = 152,
- LSM_CAP_POOL_CREATE_DISK_RAID_16 = 153,
- LSM_CAP_POOL_CREATE_DISK_RAID_NOT_APPLICABLE = 154,
-
- LSM_CAP_POOL_DELETE = 200, /**< Pool delete support */
-
LSM_CAP_POOLS_QUICK_SEARCH = 210, /**< Seach occurs on array */
LSM_CAP_VOLUMES_QUICK_SEARCH = 211, /**< Seach occurs on array */
LSM_CAP_DISKS_QUICK_SEARCH = 212, /**< Seach occurs on array */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
index 5ce1d2c..d381912 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
@@ -80,7 +80,6 @@ typedef enum {
LSM_ERR_NOT_FOUND_NFS_EXPORT = 206, /**< NFS export not found */
LSM_ERR_NOT_FOUND_INITIATOR = 207, /**< Initiator not found */
LSM_ERR_NOT_FOUND_SYSTEM = 208, /**< System not found */
- LSM_ERR_NOT_FOUND_DISK = 209, /**< Disk not found */
LSM_ERR_NOT_LICENSED = 226, /**< Need license for feature */
@@ -103,7 +102,6 @@ typedef enum {
LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**< Transport serialization error */
LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**< Parameter transported over IPC is invalid */
- LSM_ERR_DISK_BUSY = 500, /* Disk already in use */
LSM_ERR_VOLUME_BUSY = 501, /* Volume already in use */
ACCESS_GROUP_MASKED = 502,
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index 3296252..1da1aa3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -230,70 +230,6 @@ typedef int (*lsm_plug_target_port_list)( lsm_plugin_ptr c,
uint32_t *count, lsm_flag flags);
/**
- * Create a pool.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System
- * @param[in] pool_name Human name of pool
- * @param[in] size_bytes Desired size of pool
- * @param[in] raid_type Raid type for pool
- * @param[in] member_type Type of individual members eg. SAS/FC/SSD etc.
- * @param[out] pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create)( lsm_plugin_ptr c, lsm_system* system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type, lsm_pool_member_type member_type,
- lsm_pool **pool, char **job, lsm_flag flags);
-
-/**
- * Create a pool and specifying disks to use.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System
- * @param[in] pool_name Human name of pool
- * @param[in] disks Array of disk pointers to create pool from
- * @param[in] num_disks Number of disks
- * @param[in] raid_type Raid type for pool
- * @param[out] pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create_from_disks)( lsm_plugin_ptr c,
- lsm_system *system,
- const char *pool_name, lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type, lsm_pool **pool, char **job,
- lsm_flag flags);
-
-
-/**
- * Create a pool and specifying pool to use.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System id
- * @param[in] pool_name Human name of pool
- * @param[in] pool Pool to create pool from
- * @param[in] size_bytes Size of pool
- * @param[out] created_pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create_from_pool)( lsm_plugin_ptr c,
- lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags );
-
-
-/**
- * Delete a pool.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] pool Pool to delete
- * @param[out] job Job pointer if job is async
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_delete)( lsm_plugin_ptr c, lsm_pool *pool, char **job,
- lsm_flag flags);
-
-/**
* Creates a volume, callback function signature
* @param[in] c Valid lsm plug-in pointer
* @param[in] pool Pool to allocated storage from
@@ -822,10 +758,6 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
lsm_plug_disk_list disk_get; /**< retrieve disks */
- lsm_plug_pool_create pool_create; /**< Pool create */
- lsm_plug_pool_create_from_disks pool_create_from_disks; /**< Pool create from disks */
- lsm_plug_pool_create_from_pool pool_create_from_pool; /**< Pool creation from pool */
- lsm_plug_pool_delete pool_delete; /**< Delete a pool */
lsm_plug_volume_create vol_create; /**< creating a lun */
lsm_plug_volume_replicate vol_replicate; /**< replicating lun */
lsm_plug_volume_replicate_range_block_size vol_rep_range_bs; /**< volume replication range block size */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 4d3b6bd..82a23a8 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -254,44 +254,6 @@ typedef enum {
#define LSM_POOL_ELEMENT_TYPE_SYS_RESERVED 0x0000000000000400
typedef enum {
- LSM_POOL_MEMBER_TYPE_UNKNOWN = 0,
- LSM_POOL_MEMBER_TYPE_DISK = 1,
- LSM_POOL_MEMBER_TYPE_POOL = 2,
- LSM_POOL_MEMBER_TYPE_DISK_MIX = 10,
- LSM_POOL_MEMBER_TYPE_DISK_ATA = 11,
- LSM_POOL_MEMBER_TYPE_DISK_SATA = 12,
- LSM_POOL_MEMBER_TYPE_DISK_SAS = 13,
- LSM_POOL_MEMBER_TYPE_DISK_FC = 14,
- LSM_POOL_MEMBER_TYPE_DISK_SOP = 15,
- LSM_POOL_MEMBER_TYPE_DISK_SCSI = 16,
- LSM_POOL_MEMBER_TYPE_DISK_NL_SAS = 17,
- LSM_POOL_MEMBER_TYPE_DISK_HDD = 18,
- LSM_POOL_MEMBER_TYPE_DISK_SSD = 19,
- LSM_POOL_MEMBER_TYPE_DISK_HYBRID = 110,
- LSM_POOL_MEMBER_TYPE_DISK_LUN = 111
-} lsm_pool_member_type;
-
-typedef enum {
- LSM_POOL_RAID_TYPE_0 = 0,
- LSM_POOL_RAID_TYPE_1 = 1,
- LSM_POOL_RAID_TYPE_3 = 3,
- LSM_POOL_RAID_TYPE_4 = 4,
- LSM_POOL_RAID_TYPE_5 = 5,
- LSM_POOL_RAID_TYPE_6 = 6,
- LSM_POOL_RAID_TYPE_10 = 10,
- LSM_POOL_RAID_TYPE_15 = 15,
- LSM_POOL_RAID_TYPE_16 = 16,
- LSM_POOL_RAID_TYPE_50 = 50,
- LSM_POOL_RAID_TYPE_60 = 60,
- LSM_POOL_RAID_TYPE_51 = 51,
- LSM_POOL_RAID_TYPE_61 = 61,
- LSM_POOL_RAID_TYPE_JBOD = 20,
- LSM_POOL_RAID_TYPE_UNKNOWN = 21,
- LSM_POOL_RAID_TYPE_NOT_APPLICABLE = 22,
- LSM_POOL_RAID_TYPE_MIXED = 23
-} lsm_pool_raid_type;
-
-typedef enum {
LSM_PORT_TYPE_UNKNOWN = 0,
LSM_PORT_TYPE_OTHER = 1,
LSM_PORT_TYPE_FC = 2,
diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 5805cf9..9d86714 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -888,223 +888,6 @@ static void* parse_job_response(lsm_connect *c, Value response, int &rc,
return val;
}
-static int valid_pool_raid_type(lsm_pool_raid_type validate)
-{
- switch(validate) {
- case (LSM_POOL_RAID_TYPE_0):
- case (LSM_POOL_RAID_TYPE_1):
- case (LSM_POOL_RAID_TYPE_3):
- case (LSM_POOL_RAID_TYPE_5):
- case (LSM_POOL_RAID_TYPE_6):
- case (LSM_POOL_RAID_TYPE_10):
- case (LSM_POOL_RAID_TYPE_15):
- case (LSM_POOL_RAID_TYPE_16):
- case (LSM_POOL_RAID_TYPE_50):
- case (LSM_POOL_RAID_TYPE_60):
- case (LSM_POOL_RAID_TYPE_51):
- case (LSM_POOL_RAID_TYPE_61):
- case (LSM_POOL_RAID_TYPE_JBOD):
- case (LSM_POOL_RAID_TYPE_UNKNOWN):
- case (LSM_POOL_RAID_TYPE_NOT_APPLICABLE):
- case (LSM_POOL_RAID_TYPE_MIXED):
- break;
- default:
- return 0;
- }
- return 1;
-}
-
-static int valid_pool_member_type(lsm_pool_member_type validate)
-{
- switch(validate) {
- case (LSM_POOL_MEMBER_TYPE_UNKNOWN):
- case (LSM_POOL_MEMBER_TYPE_DISK):
- case (LSM_POOL_MEMBER_TYPE_POOL):
- case (LSM_POOL_MEMBER_TYPE_DISK_MIX):
- case (LSM_POOL_MEMBER_TYPE_DISK_ATA):
- case (LSM_POOL_MEMBER_TYPE_DISK_SATA):
- case (LSM_POOL_MEMBER_TYPE_DISK_SAS):
- case (LSM_POOL_MEMBER_TYPE_DISK_FC):
- case (LSM_POOL_MEMBER_TYPE_DISK_SOP):
- case (LSM_POOL_MEMBER_TYPE_DISK_SCSI):
- case (LSM_POOL_MEMBER_TYPE_DISK_NL_SAS):
- case (LSM_POOL_MEMBER_TYPE_DISK_HDD):
- case (LSM_POOL_MEMBER_TYPE_DISK_SSD):
- case (LSM_POOL_MEMBER_TYPE_DISK_HYBRID):
- case (LSM_POOL_MEMBER_TYPE_DISK_LUN):
- break;
- default:
- return 0;
- }
- return 1;
-}
-
-int lsm_pool_create(lsm_connect *c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags)
-{
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || CHECK_STR(pool_name) || !size_bytes ||
- CHECK_RP(pool)|| CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ||
- !valid_pool_raid_type(raid_type) ||
- !valid_pool_member_type(member_type)) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p["size_bytes"] = Value(size_bytes);
- p["raid_type"] = Value((int32_t)raid_type);
- p["member_type"] = Value((int32_t)member_type);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, "pool_create", parameters, response);
- if( LSM_ERR_OK == rc ) {
- *pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
-}
-
-
-static int lsm_pool_create_from(lsm_connect *c,
- lsm_system *system, const char *pool_name,
- std::vector<Value> &member_ids, lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags,
- const char *member_id, const char *method)
-{
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || CHECK_STR(pool_name) ||
- CHECK_RP(pool)|| CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ||
- !valid_pool_raid_type(raid_type) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p[member_id] = Value(member_ids);
- p["raid_type"] = Value((int32_t)raid_type);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, method, parameters, response);
- if( LSM_ERR_OK == rc ) {
- *pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
-}
-
-int LSM_DLL_EXPORT lsm_pool_create_from_disks(lsm_connect *c,
- lsm_system *system, const char *pool_name,
- lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags)
-{
- uint32_t i;
-
- CONN_SETUP(c);
-
- if( !disks || !num_disks ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- /* Create disks container */
- std::vector<Value> d;
- for( i = 0; i < num_disks; ++i ) {
- d.push_back(disk_to_value(disks[i]));
- }
-
- return lsm_pool_create_from(c, system, pool_name, d, raid_type, pool, job,
- flags, "disks", "pool_create_from_disks");
-
-}
-
-
- int lsm_pool_create_from_pool(lsm_connect *c, lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags)
- {
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || !LSM_IS_POOL(pool) || CHECK_STR(pool_name) ||
- !size_bytes || CHECK_RP(created_pool)|| CHECK_RP(job) ||
- LSM_FLAG_UNUSED_CHECK(flags) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p["size_bytes"] = Value(size_bytes);
- p["pool"] = pool_to_value(pool);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, "pool_create_from_pool", parameters, response);
- if( LSM_ERR_OK == rc ) {
- *created_pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
- }
-
-int lsm_pool_delete(lsm_connect *c, lsm_pool *pool, char **job, lsm_flag flags)
-{
- int rc;
- CONN_SETUP(c);
-
- if( !LSM_IS_POOL(pool) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- try {
-
- std::map<std::string, Value> p;
- p["pool"] = pool_to_value(pool);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- rc = rpc(c, "pool_delete", parameters, response);
- if( LSM_ERR_OK == rc ) {
- //We get a value back, either null or job id.
- if( Value::string_t == response.valueType() ) {
- *job = strdup(response.asString().c_str());
-
- if( *job ) {
- rc = LSM_ERR_JOB_STARTED;
- } else {
- rc = LSM_ERR_NO_MEMORY;
- }
- }
- }
- } catch( const ValueException &ve ) {
- rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
- ve.what());
- }
- return rc;
- }
-
int lsm_volume_create(lsm_connect *c, lsm_pool *pool, const char *volumeName,
uint64_t size, lsm_provision_type provisioning,
lsm_volume **newVolume, char **job, lsm_flag flags)
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index b174d09..eef07db 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -543,169 +543,6 @@ static int handle_target_ports(lsm_plugin_ptr p, Value ¶ms, Value &response)
return rc;
}
-static int handle_pool_create(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_size = params["size_bytes"];
- Value v_raid_t = params["raid_type"];
- Value v_member_t = params["member_type"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::numeric_t == v_size.valueType() &&
- Value::numeric_t == v_raid_t.valueType() &&
- Value::numeric_t == v_member_t.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- lsm_system *system = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- uint64_t size = v_size.asUint64_t();
- lsm_pool_raid_type raid_type = (lsm_pool_raid_type)v_raid_t.asInt32_t();
- lsm_pool_member_type member_type = (lsm_pool_member_type)v_member_t.asInt32_t();
- lsm_pool *pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create(p, system, pool_name, size, raid_type,
- member_type, &pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(pool);
- response = job_handle(p, job);
- lsm_pool_record_free(pool);
- lsm_system_record_free(system);
- free(job);
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_create_from_disks(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create_from_disks ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_disks = params["disks"];
- Value v_raid_t = params["raid_type"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::array_t == v_disks.valueType() &&
- Value::numeric_t == v_raid_t.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- /* Get the array of disks */
- lsm_disk **disks = NULL;
- uint32_t num_disks = 0;
- rc = value_array_to_disks(v_disks, &disks, &num_disks);
-
- if( LSM_ERR_OK == rc ) {
- lsm_system *sys = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- lsm_pool_raid_type raid_type = (lsm_pool_raid_type)v_raid_t.asInt32_t();
-
- lsm_pool *pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create_from_disks(p, sys, pool_name,
- disks, num_disks, raid_type,
- &pool, &job, LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(pool);
- response = job_handle(p, job);
- lsm_disk_record_array_free(disks, num_disks);
- lsm_pool_record_free(pool);
- lsm_system_record_free(sys);
- free(job);
- }
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_create_from_pool(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create_from_pool ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_pool = params["pool"];
- Value v_size = params["size_bytes"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::object_t == v_pool.valueType() &&
- Value::numeric_t == v_size.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- lsm_system *sys = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- lsm_pool *pool = value_to_pool(v_pool);
- uint64_t size = v_size.asUint64_t();
-
- lsm_pool *created_pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create_from_pool(p, sys, pool_name,
- pool, size, &created_pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(created_pool);
- response = job_handle(p, job);
- lsm_pool_record_free(created_pool);
- lsm_pool_record_free(pool);
- lsm_system_record_free(sys);
- free(job);
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_delete(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_delete ) {
- Value v_pool = params["pool"];
-
- if(Value::object_t == v_pool.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params) ) {
- lsm_pool *pool = value_to_pool(v_pool);
-
- if( pool ) {
- char *job = NULL;
-
- rc = p->san_ops->pool_delete(p, pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- response = Value(job);
- }
-
- lsm_pool_record_free(pool);
- free(job);
- } else {
- rc = LSM_ERR_NO_MEMORY;
- }
-
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
static int capabilities(lsm_plugin_ptr p, Value ¶ms, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2289,10 +2126,6 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("job_status", handle_job_status)
("plugin_info", handle_plugin_info)
("pools", handle_pools)
- ("pool_create", handle_pool_create)
- ("pool_create_from_disks", handle_pool_create_from_disks)
- ("pool_create_from_pool", handle_pool_create_from_pool)
- ("pool_delete", handle_pool_delete)
("target_ports", handle_target_ports)
("time_out_set", handle_set_time_out)
("plugin_unregister", handle_unregister)
diff --git a/doc/man/lsmcli.1.in b/doc/man/lsmcli.1.in
index d73467a..7d96fb5 100644
--- a/doc/man/lsmcli.1.in
+++ b/doc/man/lsmcli.1.in
@@ -567,96 +567,6 @@ Required. Repeatable. Destination file to clone (relative path).
For two or more files/paths:
'\fB--src fileA --src fileB --dst old_fileA --dst old_fileB\fR'.
-.SS pool-create
-Creates a storage pool. LibStorageMgmt will automatically choose the correct
-pool members to assemble new pool. This require POOL_CREATE capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--size\fR \fI<POOL_SIZE>\fR
-Required. The size of new pool. Due to data alignment or other issue, the
-size of new pool might larger than requested.
-See \fBSIZE OPTION\fR for allowed formats.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-.TP
-\fB--raid-type\fR \fI<RAID_TYPE>\fR
-Optional. The RAID type of new pool. Valid values are:
-\fBNOT_APPLICABLE\fR, \fBJBOD\fR, \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR,
-\fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID51\fR, \fBRAID60\fR,
-\fBRAID61\fR.
-.br
-The \fBNOT_APPLICABLE\fR means pool only contain 1 member.
-If not defined, will let array to determine the RAID type.
-.br
-When using with \fB--member-type POOL\fR, \fB--raid-type\fR should be unset or
-defined as \fBNOT_APPLICABLE\fR.
-.TP
-\fB--member-type\fR \fI<MEM_TYPE>\fR
-Optional. The type of resource to create new pool. Valid values are:
-\fBDISK\fR, \fBVOLUME\fR, \fBPOOL\fR, \fBDISK_ATA\fR, \fBDISK_SATA\fR,
-\fBDISK_SAS\fR, \fBDISK_FC\fR, \fBDISK_SOP\fR \fBDISK_SCSI\fR, \fBDISK_NL_SAS,
-\fBDISK_HDD\fR, \fBDISK_SSD\fR, \fBDISK_HYBRID\fR.
-.br
-The \fBDISK\fR member type means creating pool from disk(s). For \fBDISK_\fR
-prefixed types, they are used to request new pool creating from certain type
-of DISK.
-\fBDISK_SOP\fR indicate SCSI over PCI-E, normally a PCI-E based SSD.
-\fBDISK_HYBRID\fR indicate HDD and SSD hybrid(mixed) disk.
-.br
-The \fBVOLUME\fR member type means creating pool from volume(s).
-.br
-The \fBPOOL\fR member type means create sub-pool from another pool.
-
-.SS pool-create-from-disks
-Create a new pool by specifying which disks to use with which RAID type.
-This require POOL_CREATE_FROM_DISKS capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--raid-type\fR \fI<RAID_TYPE>\fR
-Required. The RAID type of new pool. Valid values are:
-\fBNOT_APPLICABLE\fR, \fBJBOD\fR, \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR,
-\fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID51\fR, \fBRAID60\fR,
-\fBRAID61\fR.
-.br
-The \fBNOT_APPLICABLE\fR means pool only contain 1 disks.
-For supported RAID types of certain array, please use \fBcapabilities\fR
-command for POOL_CREATE_DISK_RAID_XXX entries.
-.TP
-\fB--member-id\fR \fI<DISK_ID>\fR
-Required. Repeatable. The ID of disk to create new pool.
-For two or more members: '\fB--member-id DISK_ID_A --member DISK_ID_B\fR'.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-
-.SS pool-create-from-pool
-Create a new sub-pool from specified pool. This require POOL_CREATE_FROM_POOLS
-capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--size\fR \fI<POOL_SIZE>\fR
-Required. The spaces of new pool.
-See \fBSIZE OPTION\fR for allowed formats.
-.TP
-\fB--member-id\fR \fI<POOL_ID>\fR
-Required. The ID of pool to create new pool from.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-
-.SS pool-delete
-Deletes a storage pool.
-.TP 15
-\fB--pool\fR \fI<POOL_ID>\fR
-Required. The ID of pool to delete.
-
.IP
.SH ALIAS
.SS ls
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index 067fc3f..88c3ddd 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -312,18 +312,18 @@ class Ontap(IStorageAreaNetwork, INfs):
"""
return na_xxx['uuid']
- @staticmethod
- def _raid_type_of_na_aggr(na_aggr):
- na_raid_statuses = na_aggr['raid-status'].split(',')
- if 'raid0' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID0
- if 'raid4' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID4
- if 'raid_dp' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID6
- if 'mixed_raid_type' in na_raid_statuses:
- return Pool.RAID_TYPE_MIXED
- return Pool.RAID_TYPE_UNKNOWN
+# @staticmethod
+# def _raid_type_of_na_aggr(na_aggr):
+# na_raid_statuses = na_aggr['raid-status'].split(',')
+# if 'raid0' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID0
+# if 'raid4' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID4
+# if 'raid_dp' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID6
+# if 'mixed_raid_type' in na_raid_statuses:
+# return Pool.RAID_TYPE_MIXED
+# return Pool.RAID_TYPE_UNKNOWN
@staticmethod
def _status_of_na_aggr(na_aggr):
diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index c81c851..d0d291c 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -34,6 +34,69 @@ from lsm import (System, Volume, Disk, Pool, FileSystem, AccessGroup,
D_FMT = 5
+class PoolRAID(object):
+ RAID_TYPE_RAID0 = 0
+ RAID_TYPE_RAID1 = 1
+ RAID_TYPE_RAID3 = 3
+ RAID_TYPE_RAID4 = 4
+ RAID_TYPE_RAID5 = 5
+ RAID_TYPE_RAID6 = 6
+ RAID_TYPE_RAID10 = 10
+ RAID_TYPE_RAID15 = 15
+ RAID_TYPE_RAID16 = 16
+ RAID_TYPE_RAID50 = 50
+ RAID_TYPE_RAID60 = 60
+ RAID_TYPE_RAID51 = 51
+ RAID_TYPE_RAID61 = 61
+ # number 2x is reserved for non-numbered RAID.
+ RAID_TYPE_JBOD = 20
+ RAID_TYPE_UNKNOWN = 21
+ RAID_TYPE_NOT_APPLICABLE = 22
+ # NOT_APPLICABLE indicate current pool only has one member.
+ RAID_TYPE_MIXED = 23
+
+ MEMBER_TYPE_UNKNOWN = 0
+ MEMBER_TYPE_DISK = 1
+ MEMBER_TYPE_DISK_MIX = 10
+ MEMBER_TYPE_DISK_ATA = 11
+ MEMBER_TYPE_DISK_SATA = 12
+ MEMBER_TYPE_DISK_SAS = 13
+ MEMBER_TYPE_DISK_FC = 14
+ MEMBER_TYPE_DISK_SOP = 15
+ MEMBER_TYPE_DISK_SCSI = 16
+ MEMBER_TYPE_DISK_NL_SAS = 17
+ MEMBER_TYPE_DISK_HDD = 18
+ MEMBER_TYPE_DISK_SSD = 19
+ MEMBER_TYPE_DISK_HYBRID = 110
+ MEMBER_TYPE_DISK_LUN = 111
+
+ MEMBER_TYPE_POOL = 2
+
+ _MEMBER_TYPE_2_DISK_TYPE = {
+ MEMBER_TYPE_DISK: Disk.DISK_TYPE_UNKNOWN,
+ MEMBER_TYPE_DISK_MIX: Disk.DISK_TYPE_UNKNOWN,
+ MEMBER_TYPE_DISK_ATA: Disk.DISK_TYPE_ATA,
+ MEMBER_TYPE_DISK_SATA: Disk.DISK_TYPE_SATA,
+ MEMBER_TYPE_DISK_SAS: Disk.DISK_TYPE_SAS,
+ MEMBER_TYPE_DISK_FC: Disk.DISK_TYPE_FC,
+ MEMBER_TYPE_DISK_SOP: Disk.DISK_TYPE_SOP,
+ MEMBER_TYPE_DISK_SCSI: Disk.DISK_TYPE_SCSI,
+ MEMBER_TYPE_DISK_NL_SAS: Disk.DISK_TYPE_NL_SAS,
+ MEMBER_TYPE_DISK_HDD: Disk.DISK_TYPE_HDD,
+ MEMBER_TYPE_DISK_SSD: Disk.DISK_TYPE_SSD,
+ MEMBER_TYPE_DISK_HYBRID: Disk.DISK_TYPE_HYBRID,
+ MEMBER_TYPE_DISK_LUN: Disk.DISK_TYPE_LUN,
+ }
+
+ @staticmethod
+ def member_type_is_disk(member_type):
+ """
+ Returns True if defined 'member_type' is disk.
+ False when else.
+ """
+ return member_type in PoolRAID._MEMBER_TYPE_2_DISK_TYPE
+
+
class SimJob(object):
"""
Simulates a longer running job, uses actual wall time. If test cases
@@ -169,31 +232,6 @@ class SimArray(object):
rc.extend([self._sim_pool_2_lsm(sim_pool, flags)])
return SimArray._sort_by_id(rc)
- def pool_create(self, sys_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- sim_pool = self.data.pool_create(
- sys_id, pool_name, size_bytes, raid_type, member_type, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_create_from_disks(self, sys_id, pool_name, disks_ids, raid_type,
- flags=0):
- sim_pool = self.data.pool_create_from_disks(
- sys_id, pool_name, disks_ids, raid_type, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_create_from_pool(self, sys_id, pool_name, member_id, size_bytes,
- flags=0):
- sim_pool = self.data.pool_create_from_pool(
- sys_id, pool_name, member_id, size_bytes, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_delete(self, pool_id, flags=0):
- return self.data.job_create(self.data.pool_delete(pool_id, flags))[0]
-
def disks(self):
rc = []
sim_disks = self.data.disks()
@@ -489,9 +527,9 @@ class SimData(object):
sim_pool = {
'name': pool_name,
'pool_id': Pool.id,
- 'raid_type': Pool.RAID_TYPE_XXXX,
+ 'raid_type': PoolRAID.RAID_TYPE_XXXX,
'member_ids': [ disk_id or pool_id or volume_id ],
- 'member_type': Pool.MEMBER_TYPE_XXXX,
+ 'member_type': PoolRAID.MEMBER_TYPE_XXXX,
'member_size': size_bytes # space allocated from each member pool.
# only for MEMBER_TYPE_POOL
'status': SIM_DATA_POOL_STATUS,
@@ -500,14 +538,12 @@ class SimData(object):
}
"""
SIM_DATA_BLK_SIZE = 512
- SIM_DATA_VERSION = "2.6"
+ SIM_DATA_VERSION = "2.7"
SIM_DATA_SYS_ID = 'sim-01'
SIM_DATA_INIT_NAME = 'NULL'
SIM_DATA_TMO = 30000 # ms
SIM_DATA_POOL_STATUS = Pool.STATUS_OK
SIM_DATA_POOL_STATUS_INFO = ''
- SIM_DATA_DISK_DEFAULT_RAID = Pool.RAID_TYPE_RAID0
- SIM_DATA_VOLUME_DEFAULT_RAID = Pool.RAID_TYPE_RAID0
SIM_DATA_POOL_ELEMENT_TYPE = Pool.ELEMENT_TYPE_FS \
| Pool.ELEMENT_TYPE_POOL \
| Pool.ELEMENT_TYPE_VOLUME
@@ -570,9 +606,9 @@ class SimData(object):
'POO1': {
'pool_id': 'POO1',
'name': 'Pool 1',
- 'member_type': Pool.MEMBER_TYPE_DISK_SATA,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SATA,
'member_ids': [SimData._disk_id(0), SimData._disk_id(1)],
- 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID1,
'status': SimData.SIM_DATA_POOL_STATUS,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -581,10 +617,10 @@ class SimData(object):
'POO2': {
'pool_id': 'POO2',
'name': 'Pool 2',
- 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_type': PoolRAID.MEMBER_TYPE_POOL,
'member_ids': ['POO1'],
'member_size': pool_size_200g,
- 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -594,9 +630,9 @@ class SimData(object):
'lsm_test_aggr': {
'pool_id': 'lsm_test_aggr',
'name': 'lsm_test_aggr',
- 'member_type': Pool.MEMBER_TYPE_DISK_SAS,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SAS,
'member_ids': [SimData._disk_id(2), SimData._disk_id(3)],
- 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID0,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -651,12 +687,12 @@ class SimData(object):
self.pool_dict['POO3'] = {
'pool_id': 'POO3',
'name': 'Pool 3',
- 'member_type': Pool.MEMBER_TYPE_DISK_SSD,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SSD,
'member_ids': [
self.disk_dict[SimData._disk_id(9)]['disk_id'],
self.disk_dict[SimData._disk_id(10)]['disk_id'],
],
- 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID1,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -731,7 +767,7 @@ class SimData(object):
return 0
free_space -= sim_fs['consume_size']
for sim_pool in self.pool_dict.values():
- if sim_pool['member_type'] != Pool.MEMBER_TYPE_POOL:
+ if sim_pool['member_type'] != PoolRAID.MEMBER_TYPE_POOL:
continue
if pool_id in sim_pool['member_ids']:
free_space -= sim_pool['member_size']
@@ -750,11 +786,11 @@ class SimData(object):
def _size_of_raid(self, member_type, member_ids, raid_type,
pool_each_size=0):
member_sizes = []
- if Pool.member_type_is_disk(member_type):
+ if PoolRAID.member_type_is_disk(member_type):
for member_id in member_ids:
member_sizes.extend([self.disk_dict[member_id]['total_space']])
- elif member_type == Pool.MEMBER_TYPE_POOL:
+ elif member_type == PoolRAID.MEMBER_TYPE_POOL:
for member_id in member_ids:
member_sizes.extend([pool_each_size])
@@ -768,38 +804,38 @@ class SimData(object):
for member_size in member_sizes:
all_size += member_size
- if raid_type == Pool.RAID_TYPE_JBOD or \
- raid_type == Pool.RAID_TYPE_NOT_APPLICABLE or \
- raid_type == Pool.RAID_TYPE_RAID0:
+ if raid_type == PoolRAID.RAID_TYPE_JBOD or \
+ raid_type == PoolRAID.RAID_TYPE_NOT_APPLICABLE or \
+ raid_type == PoolRAID.RAID_TYPE_RAID0:
return int(all_size)
- elif (raid_type == Pool.RAID_TYPE_RAID1 or
- raid_type == Pool.RAID_TYPE_RAID10):
+ elif (raid_type == PoolRAID.RAID_TYPE_RAID1 or
+ raid_type == PoolRAID.RAID_TYPE_RAID10):
if member_count % 2 == 1:
return 0
return int(all_size / 2)
- elif (raid_type == Pool.RAID_TYPE_RAID3 or
- raid_type == Pool.RAID_TYPE_RAID4 or
- raid_type == Pool.RAID_TYPE_RAID5):
+ elif (raid_type == PoolRAID.RAID_TYPE_RAID3 or
+ raid_type == PoolRAID.RAID_TYPE_RAID4 or
+ raid_type == PoolRAID.RAID_TYPE_RAID5):
if member_count < 3:
return 0
return int(all_size - member_size)
- elif raid_type == Pool.RAID_TYPE_RAID50:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID50:
if member_count < 6 or member_count % 2 == 1:
return 0
return int(all_size - member_size * 2)
- elif raid_type == Pool.RAID_TYPE_RAID6:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID6:
if member_count < 4:
return 0
return int(all_size - member_size * 2)
- elif raid_type == Pool.RAID_TYPE_RAID60:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID60:
if member_count < 8 or member_count % 2 == 1:
return 0
return int(all_size - member_size * 4)
- elif raid_type == Pool.RAID_TYPE_RAID51:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID51:
if member_count < 6 or member_count % 2 == 1:
return 0
return int(all_size / 2 - member_size)
- elif raid_type == Pool.RAID_TYPE_RAID61:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID61:
if member_count < 8 or member_count % 2 == 1:
return 0
print "%s" % size_bytes_2_size_human(all_size)
@@ -817,7 +853,7 @@ class SimData(object):
sim_pool = self.pool_dict[pool_id]
each_pool_size_bytes = 0
member_type = sim_pool['member_type']
- if sim_pool['member_type'] == Pool.MEMBER_TYPE_POOL:
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
each_pool_size_bytes = sim_pool['member_size']
return self._size_of_raid(
@@ -1412,416 +1448,5 @@ class SimData(object):
del self.exp_dict[exp_id]
return None
- def _free_disks_list(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
- """
- Return a list of free sim_disk.
- Return [] if no free disk found.
- """
- free_sim_disks = []
- for sim_disk in self.disk_dict.values():
- if disk_type != Disk.DISK_TYPE_UNKNOWN and \
- sim_disk['disk_type'] != disk_type:
- continue
- flag_free = True
- for sim_pool in self.pool_dict.values():
- if Pool.member_type_is_disk(sim_pool['member_type']) and \
- sim_disk['disk_id'] in sim_pool['member_ids']:
- flag_free = False
- break
- if flag_free is True:
- free_sim_disks.extend([sim_disk])
- return sorted(free_sim_disks, key=lambda k: (k['disk_id']))
-
- def _free_disks(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
- """
- Return a dictionary like this:
- {
- Disk.DISK_TYPE_XXX: {
- Disk.total_space: [sim_disk, ]
- }
- }
- Return None if not free.
- """
- free_sim_disks = self._free_disks_list()
- rc = dict()
- for sim_disk in free_sim_disks:
- if disk_type != Disk.DISK_TYPE_UNKNOWN and \
- sim_disk['disk_type'] != disk_type:
- continue
-
- cur_type = sim_disk['disk_type']
- cur_size = sim_disk['total_space']
-
- if cur_type not in rc.keys():
- rc[cur_type] = dict()
-
- if cur_size not in rc[cur_type]:
- rc[cur_type][cur_size] = []
-
- rc[cur_type][cur_size].extend([sim_disk])
-
- return rc
-
- def _free_pools_list(self):
- """
- Return a list of sim_pool or []
- """
- free_sim_pools = []
- for sim_pool in self.pool_dict.values():
- # TODO: one day we will introduce free_size of Volume.
- # in that case we will check whether
- # total_space == pool_free_size(sim_pool['pool_id'])
- pool_id = sim_pool['pool_id']
- if self.pool_free_space(pool_id) > 0:
- free_sim_pools.extend([sim_pool])
- return sorted(
- free_sim_pools,
- key=lambda k: (k['pool_id'].isupper(), k['pool_id']))
-
- def _pool_create_from_disks(self, pool_name, member_ids, raid_type,
- raise_error=False):
- # Check:
- # 1. The disk_id is valid
- # 2. All disks are the same disk type.
- # 3. All disks are free.
- # 4. All disks' total space is the same.
- if len(member_ids) <= 0:
- if raise_error:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "No disk ID defined")
- else:
- return None
-
- if raid_type == Pool.RAID_TYPE_NOT_APPLICABLE and \
- len(member_ids) >= 2:
- if raise_error:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Pool.RAID_TYPE_NOT_APPLICABLE means only 1 " +
- "member, but got 2 or more: %s" %
- ', '.join(member_ids))
- else:
- return None
-
- current_disk_type = None
- current_total_space = None
- for disk_id in member_ids:
- if disk_id not in self.disk_dict.keys():
- if raise_error:
- raise LsmError(ErrorNumber.NOT_FOUND_DISK,
- "The disk ID %s does not exist" % disk_id)
- else:
- return None
- sim_disk = self.disk_dict[disk_id]
- if current_disk_type is None:
- current_disk_type = sim_disk['disk_type']
- elif current_disk_type != sim_disk['disk_type']:
- if raise_error:
- raise LsmError(
- ErrorNumber.NO_SUPPORT,
- "Mixing disk types in one pool " +
- "is not supported: %s and %s" %
- (Disk.disk_type_to_str(current_disk_type),
- Disk.disk_type_to_str(sim_disk['disk_type'])))
- else:
- return None
- if current_total_space is None:
- current_total_space = sim_disk['total_space']
- elif current_total_space != sim_disk['total_space']:
- if raise_error:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Mixing different size of disks is not " +
- "supported")
- else:
- return None
-
- all_free_disks = self._free_disks_list()
- if all_free_disks is None:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free disk to create new pool")
- else:
- return None
- all_free_disk_ids = [d['disk_id'] for d in all_free_disks]
- for disk_id in member_ids:
- if disk_id not in all_free_disk_ids:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "Disk %s is used by other pool" % disk_id)
- else:
- return None
-
- if raid_type == Pool.RAID_TYPE_UNKNOWN or \
- raid_type == Pool.RAID_TYPE_MIXED:
- if raise_error:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "RAID type %s(%d) is not supported" %
- (Pool.raid_type_to_str(raid_type), raid_type))
- else:
- return None
-
- pool_id = self._next_pool_id()
- if pool_name == '':
- pool_name = 'POOL %s' % SimData._random_vpd(4)
-
- sim_pool = dict()
- sim_pool['name'] = pool_name
- sim_pool['pool_id'] = pool_id
- if len(member_ids) == 1:
- sim_pool['raid_type'] = Pool.RAID_TYPE_NOT_APPLICABLE
- else:
- sim_pool['raid_type'] = raid_type
- sim_pool['member_ids'] = member_ids
- sim_pool['member_type'] = \
- Pool.disk_type_to_member_type(current_disk_type)
- sim_pool['sys_id'] = SimData.SIM_DATA_SYS_ID
- sim_pool['element_type'] = SimData.SIM_DATA_POOL_ELEMENT_TYPE
- sim_pool['status'] = SimData.SIM_DATA_POOL_STATUS
- sim_pool['status_info'] = SimData.SIM_DATA_POOL_STATUS_INFO
- self.pool_dict[pool_id] = sim_pool
- return sim_pool
-
- def pool_create_from_disks(self, sys_id, pool_name, member_ids, raid_type,
- flags=0):
- """
- return newly create sim_pool or None.
- """
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
-
- return self._pool_create_from_disks(pool_name, member_ids, raid_type,
- raise_error=True)
-
- def _pool_create_from_pool(self, pool_name, member_id,
- size_bytes, raise_error=False):
-
- size_bytes = SimData._block_rounding(size_bytes)
- free_sim_pools = self._free_pools_list()
- free_sim_pool_ids = [p['pool_id'] for p in free_sim_pools]
- if len(free_sim_pool_ids) == 0 or \
- member_id not in free_sim_pool_ids:
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "Pool %s " % member_id +
- "is full, no space to create new pool")
- else:
- return None
-
- free_size = self.pool_free_space(member_id)
- if free_size < size_bytes:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "Pool %s does not have requested free" %
- member_id + "to create new pool")
-
- pool_id = self._next_pool_id()
- if pool_name == '':
- pool_name = 'POOL %s' % SimData._random_vpd(4)
- sim_pool = dict()
- sim_pool['name'] = pool_name
- sim_pool['pool_id'] = pool_id
- sim_pool['raid_type'] = Pool.RAID_TYPE_NOT_APPLICABLE
- sim_pool['member_ids'] = [member_id]
- sim_pool['member_type'] = Pool.MEMBER_TYPE_POOL
- sim_pool['member_size'] = size_bytes
- sim_pool['sys_id'] = SimData.SIM_DATA_SYS_ID
- sim_pool['element_type'] = SimData.SIM_DATA_POOL_ELEMENT_TYPE
- sim_pool['status'] = SimData.SIM_DATA_POOL_STATUS
- sim_pool['status_info'] = SimData.SIM_DATA_POOL_STATUS_INFO
- self.pool_dict[pool_id] = sim_pool
- return sim_pool
-
- def pool_create_from_pool(self, sys_id, pool_name, member_id, size_bytes,
- flags=0):
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
- return self._pool_create_from_pool(pool_name, member_id, size_bytes,
- raise_error=True)
-
- def _auto_choose_disk(self, size_bytes, raid_type, disk_type,
- raise_error=False):
- """
- Return a list of member ids suitable for creating RAID pool with
- required size_bytes.
- Return [] if nothing found.
- if raise_error is True, raise error if not found
- """
- disk_type_str = "disk"
- if disk_type != Disk.DISK_TYPE_UNKNOWN:
- disk_type_str = "disk(type: %s)" % Disk.disk_type_to_str(disk_type)
-
- if raid_type == Pool.RAID_TYPE_NOT_APPLICABLE:
- # NOT_APPLICABLE means pool will only contain one disk.
- sim_disks = self._free_disks_list(disk_type)
- if len(sim_disks) == 0:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free %s found" % disk_type_str)
- else:
- return []
-
- for sim_disk in sim_disks:
- if sim_disk['total_space'] >= size_bytes:
- return [sim_disk]
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No %s is bigger than " % disk_type_str +
- "expected size: %s(%d)" %
- (size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- if raid_type == Pool.RAID_TYPE_JBOD:
- # JBOD does not require all disks in the same size or the same type.
- sim_disks = self._free_disks_list(disk_type)
- if len(sim_disks) == 0:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free %s found" % disk_type_str)
- else:
- return []
-
- chose_sim_disks = []
- all_free_size = 0
- for sim_disk in sim_disks:
- chose_sim_disks.extend([sim_disk])
- all_free_size += sim_disk['total_space']
- if all_free_size >= size_bytes:
- return chose_sim_disks
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough %s to provide size %s(%d)" %
- (disk_type_str,
- size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- # All rest RAID type require member are in the same size and same
- # type.
- sim_disks_struct = self._free_disks(disk_type)
- for cur_disk_type in sim_disks_struct.keys():
- for cur_disk_size in sim_disks_struct[cur_disk_type].keys():
- cur_sim_disks = sim_disks_struct[cur_disk_type][cur_disk_size]
- if len(cur_sim_disks) == 0:
- continue
- chose_sim_disks = []
- for member_count in range(1, len(cur_sim_disks) + 1):
- partial_sim_disks = cur_sim_disks[0:member_count]
- member_ids = [x['disk_id'] for x in partial_sim_disks]
- raid_actual_size = self._size_of_raid(
- Pool.MEMBER_TYPE_DISK, member_ids, raid_type)
- if size_bytes <= raid_actual_size:
- return cur_sim_disks[0:member_count]
-
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough %s " % disk_type_str +
- "to create %s providing size: %s(%d)" %
- (Pool.raid_type_to_str(raid_type),
- size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- def _auto_choose_pool(self, size_bytes, raise_error=False):
- """
- Return a sim_pool.
- Return None if not found.
- """
- sim_pools = self._free_pools_list()
- if len(sim_pools) >= 1:
- for sim_pool in sim_pools:
- pool_id = sim_pool['pool_id']
- free_size = self.pool_free_space(pool_id)
- if free_size >= size_bytes:
- return sim_pool
-
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No pool is bigger than expected size: " +
- "%s(%d)" %
- (size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return None
-
- def pool_create(self, sys_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
-
- size_bytes = SimData._block_rounding(size_bytes)
-
- raise_error = False
- if member_type != Pool.MEMBER_TYPE_UNKNOWN:
- raise_error = True
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN or \
- Pool.member_type_is_disk(member_type):
- disk_raid_type = raid_type
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- disk_raid_type = SimData.SIM_DATA_DISK_DEFAULT_RAID
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- disk_type = Disk.DISK_TYPE_UNKNOWN
- else:
- disk_type = Pool.member_type_to_disk_type(member_type)
- sim_disks = self._auto_choose_disk(
- size_bytes, disk_raid_type, disk_type, raise_error)
- if len(sim_disks) >= 1:
- member_ids = [d['disk_id'] for d in sim_disks]
- sim_pool = self._pool_create_from_disks(
- pool_name, member_ids, disk_raid_type, raise_error)
- if sim_pool:
- return sim_pool
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN or \
- member_type == Pool.MEMBER_TYPE_POOL:
- if raid_type != Pool.RAID_TYPE_UNKNOWN and \
- raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Pool based pool does not support " +
- "raid_type: %s(%d)" %
- (Pool.raid_type_to_str(raid_type),
- raid_type))
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- if raid_type != Pool.RAID_TYPE_UNKNOWN and \
- raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "No enough free disk or volume spaces " +
- "to create new pool. And pool based " +
- "pool does not support raid_type: %s" %
- Pool.raid_type_to_str(raid_type))
-
- member_sim_pool = self._auto_choose_pool(size_bytes, raise_error)
- if member_sim_pool:
- member_id = member_sim_pool['pool_id']
- sim_pool = self._pool_create_from_pool(
- pool_name, member_id, size_bytes, raise_error)
- if sim_pool:
- return sim_pool
-
- # only member_type == Pool.MEMBER_TYPE_UNKNOWN can reach here.
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough free spaces to create new pool")
-
- def pool_delete(self, pool_id, flags=0):
- if pool_id not in self.pool_dict.keys():
- raise LsmError(ErrorNumber.NOT_FOUND_POOL,
- "Pool not found: %s" % pool_id)
-
- volumes = self.volumes()
- for v in volumes:
- if v['pool_id'] == pool_id:
- raise LsmError(ErrorNumber.EXISTS_VOLUME,
- "Volumes exist on pool")
-
- del(self.pool_dict[pool_id])
- return None
-
def target_ports(self):
return self.tgt_dict.values()
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index f6c26de..f84f6bd 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -101,26 +101,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
[SimPlugin._sim_data_2_lsm(p) for p in sim_pools],
search_key, search_value)
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- return self.sim_array.pool_create(
- system.id, pool_name, size_bytes, raid_type, member_type, flags)
-
- def pool_create_from_disks(self, system, pool_name, disks,
- raid_type, flags=0):
- member_ids = [x.id for x in disks]
- return self.sim_array.pool_create_from_disks(
- system.id, pool_name, member_ids, raid_type, flags)
-
- def pool_create_from_pool(self, system, pool_name, pool,
- size_bytes, flags=0):
- return self.sim_array.pool_create_from_pool(
- system.id, pool_name, pool.id, size_bytes, flags)
-
- def pool_delete(self, pool, flags=0):
- return self.sim_array.pool_delete(pool.id, flags)
-
def volumes(self, search_key=None, search_value=None, flags=0):
sim_vols = self.sim_array.volumes()
return search_property(
diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index cb2b7dd..12a6edd 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -932,144 +932,6 @@ static int _volume_delete(lsm_plugin_ptr c, const char *volume_id)
return rc;
}
-static int _pool_create(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool **pool, char **job)
-{
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *new_pool = NULL;
- lsm_pool *pool_to_store = NULL;
- char *key = NULL;
-
- /* Verify system id */
- if( strcmp(lsm_system_id_get(system), lsm_system_id_get(pd->system[0])) == 0 ) {
- /* Verify that we don't already have a pool by that name */
- new_pool = find_pool_name(pd, pool_name);
- if( !new_pool ) {
- /* Create the pool */
- new_pool = lsm_pool_record_alloc(md5(pool_name), pool_name, 0, size_bytes,
- size_bytes, LSM_POOL_STATUS_OK, "",
- lsm_system_id_get(system), NULL);
-
- pool_to_store = lsm_pool_record_copy(new_pool);
- key = strdup(lsm_pool_id_get(pool_to_store));
- if( new_pool && pool_to_store && key ) {
- g_hash_table_insert(pd->pools, key, pool_to_store);
-
- /* Create a job */
- rc = create_job(pd, job, LSM_DATA_TYPE_POOL, new_pool,
- (void**)pool);
- } else {
- free(key);
- lsm_pool_record_free(new_pool);
- lsm_pool_record_free(pool_to_store);
- rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "No memory");
- }
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_EXISTS_POOL,
- "Pool with name exists!");
- }
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_SYSTEM,
- "system not found!");
- }
- return rc;
-}
-
-
-static int pool_create(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags)
-{
- return _pool_create(c, system, pool_name, size_bytes, pool, job);
-}
-
-static int pool_create_from_disks( lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type, lsm_pool **pool, char **job,
- lsm_flag flags)
-{
- /* Check that the disks are valid, then call common routine */
- uint64_t size = 0;
- int rc = LSM_ERR_OK;
- int i = 0;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
-
- if( num_disks ) {
- for( i = 0; i < num_disks; ++i ) {
- lsm_disk *d = find_disk(pd, lsm_disk_id_get(disks[i]));
- if( d ) {
- size += (lsm_disk_number_of_blocks_get(d) * lsm_disk_block_size_get(d));
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_DISK,
- "Disk not found");
- goto bail;
- }
- }
-
- rc = _pool_create(c, system, pool_name, size, pool, job);
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_INVALID_ARGUMENT, "No disks provided");
- }
-bail:
- return rc;
-}
-
-static int pool_create_from_pool(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags )
-{
- /* Check that the disks are valid, then call common routine */
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *p = find_pool(pd, lsm_pool_id_get(pool));
-
- if( p ) {
- rc = _pool_create(c, system, pool_name, size_bytes, created_pool, job);
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found");
- }
- return rc;
-}
-
-static int pool_delete(lsm_plugin_ptr c, lsm_pool *pool, char **job,
- lsm_flag flags)
-{
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *pool_to_delete = find_pool(pd, lsm_pool_id_get(pool));
-
- if( pool_to_delete ) {
-
- /* Loop through building a list of volumes in this pool */
- char *k = NULL;
- struct allocated_volume *vol;
- GHashTableIter iter;
- g_hash_table_iter_init(&iter, pd->volumes);
- while(g_hash_table_iter_next(&iter,(gpointer) &k,(gpointer)&vol)) {
- if( strcmp(lsm_volume_pool_id_get(vol->v), lsm_pool_id_get(pool)) == 0 ) {
- rc = lsm_log_error_basic(c, LSM_ERR_EXISTS_VOLUME,
- "volumes exist on pool");
- goto bail;
- }
- }
-
- /* Remove pool from hash and create job */
- g_hash_table_remove(pd->pools, lsm_pool_id_get(pool));
- rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL);
-
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL,
- "pool not found!");
- }
-bail:
- return rc;
-}
-
static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
char **job, lsm_flag flags)
{
@@ -1574,10 +1436,6 @@ static int iscsi_chap_auth(lsm_plugin_ptr c, const char *init_id,
static struct lsm_san_ops_v1 san_ops = {
list_volumes,
list_disks,
- pool_create,
- pool_create_from_disks,
- pool_create_from_pool,
- pool_delete,
volume_create,
volume_replicate,
volume_replicate_range_bs,
diff --git a/plugin/smispy/smis.py b/plugin/smispy/smis.py
index 368b606..2d91c96 100644
--- a/plugin/smispy/smis.py
+++ b/plugin/smispy/smis.py
@@ -1218,8 +1218,6 @@ class Smis(IStorageAreaNetwork):
return ErrorNumber.NOT_FOUND_SYSTEM
if class_type == 'Pool':
return ErrorNumber.NOT_FOUND_POOL
- if class_type == 'Disk':
- return ErrorNumber.NOT_FOUND_DISK
if class_type == 'Job':
return ErrorNumber.NOT_FOUND_JOB
if class_type == 'AccessGroup':
@@ -3367,170 +3365,6 @@ class Smis(IStorageAreaNetwork):
Smis._DMTF_STATUS_TO_POOL_STATUS_INFO[dmtf_status])
return (status, ", ".join(status_info))
- def _find_out_bottom_cexts(self, cim_pool_path, pros_list=None):
- """
- This is based on 'Extent Composition' subprofile.
- CIM_StoragePool can based on several CIM_CompositeExtent with several
- level. We will find out the bottom level CIM_CompositeExtent.
- This is how we traverse down:
- CIM_StoragePool
- ^
- | GroupComponent
- |
- | CIM_ConcreteComponent/CIM_AssociatedComponentExtent
- | |-> deprecated in SMI-S 1.5rev4 by ---^
- |
- | PartComponent
- v
- CIM_CompositeExtent # The rest traverse was handle by
- ^ # _traverse_cext()
- | GroupComponent
- |
- | CIM_BasedOn
- |
- | PartComponent
- v
- CIM_CompositeExtent
- .
- .
- .
- Will return a list of CIMInstance of CIM_CompositeExtent.
- Mid-level CIM_CompositeExtent will not included.
- If nothing found, return []
- """
- if pros_list is None:
- pros_list = []
- bottom_cim_cexts = []
- try:
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_AssociatedComponentExtent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
- except CIMError as ce:
- error_code = tuple(ce)[0]
- if error_code == pywbem.CIM_ERR_INVALID_CLASS or \
- error_code == pywbem.CIM_ERR_INVALID_PARAMETER:
- # Not support SMIS 1.5, using 1.4 way.
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_ConcreteComponent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
- else:
- raise
- if cim_pool_path.classname == 'LSIESG_StoragePool':
- # LSI does not report error on CIM_AssociatedComponentExtent
- # But they don't support it.
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_ConcreteComponent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
-
- if len(cim_cexts) == 0:
- return []
- for cim_cext in cim_cexts:
- tmp_cim_cexts = self._traverse_cext(cim_cext.path, pros_list)
- if len(tmp_cim_cexts) == 0:
- # already at the bottom level
- bottom_cim_cexts.extend([cim_cext])
- else:
- bottom_cim_cexts.extend(tmp_cim_cexts)
- return bottom_cim_cexts
-
- def _traverse_cext(self, cim_cext_path, pros_list=None):
- """
- Using this procedure to find out the bottom level CIM_CompositeExtent.
- CIM_CompositeExtent
- ^
- | GroupComponent
- |
- | CIM_BasedOn
- |
- | PartComponent
- v
- CIM_CompositeExtent
- .
- .
- .
- Will return a list of CIMInstance of CIM_CompositeExtent.
- Mid-level CIM_CompositeExtent will not included.
- If nothing found, return []
- """
- if pros_list is None:
- pros_list = []
- cim_sub_cexts = self._c.Associators(
- cim_cext_path,
- AssocClass='CIM_BasedOn',
- ResultClass='CIM_CompositeExtent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- PropertyList=pros_list)
- if len(cim_sub_cexts) == 0:
- return []
- cim_bottom_cexts = []
- for cim_sub_cext in cim_sub_cexts:
- tmp_cim_bottom_cexts = self._traverse_cext(cim_sub_cext.path,
- pros_list)
- if len(tmp_cim_bottom_cexts) == 0:
- cim_bottom_cexts.extend([cim_sub_cext])
- else:
- cim_bottom_cexts.extend(tmp_cim_bottom_cexts)
- return cim_bottom_cexts
-
- def _traverse_cext_2_pri_ext(self, cim_cext_path, pros_list=None):
- """
- Using this procedure to find out the member disks of
- CIM_CompositeExtent:
- CIM_CompositeExtent
- ^
- | Dependent
- |
- | CIM_BasedOn
- |
- | Antecedent
- v
- CIM_StorageExtent (Concrete)
- ^
- | Dependent
- |
- | CIM_BasedOn
- |
- | Antecedent
- v
- CIM_StorageExtent (Concrete)
- .
- .
- .
- CIM_StorageExtent (Primordial)
- """
- if pros_list is None:
- pros_list = []
- if 'Primordial' not in pros_list:
- pros_list.extend(['Primordial'])
- cim_sub_exts = self._c.Associators(
- cim_cext_path,
- AssocClass='CIM_BasedOn',
- ResultClass='CIM_StorageExtent',
- Role='Dependent',
- ResultRole='Antecedent',
- PropertyList=pros_list)
- cim_pri_exts = []
- for cim_sub_ext in cim_sub_exts:
- if cim_sub_ext['Primordial']:
- cim_pri_exts.extend([cim_sub_ext])
- else:
- cim_pri_exts.extend(
- self._traverse_cext_2_pri_ext(cim_sub_ext.path))
- return cim_pri_exts
-
def _cim_disk_of_pri_ext(self, cim_pri_ext_path, pros_list=None):
"""
Follow this procedure to find out CIM_DiskDrive from Primordial
@@ -3604,534 +3438,6 @@ class Smis(IStorageAreaNetwork):
return element_type
- def _pool_opt_data(self, cim_pool):
- """
- Usage:
- Update Pool object with optional data found in cim_pool.
- The CIMInstance cim_pool was supposed to hold all optional data.
- So that we save 1 SMI-S query.
- No matter we found any info or not, we still return the unknown
- filler, with this, we can make sure return object are containing
- same order/length of column_data().
- Parameter:
- cim_pool # CIMInstance of CIM_StoragePool
- Returns:
- opt_pro_dict # dict containing optional properties
- Exceptions:
- NONE
- """
- opt_pro_dict = {
- 'thinp_type': Pool.THINP_TYPE_UNKNOWN,
- 'raid_type': Pool.RAID_TYPE_UNKNOWN,
- 'member_type': Pool.MEMBER_TYPE_UNKNOWN,
- 'member_ids': [],
- 'element_type': Pool.ELEMENT_TYPE_UNKNOWN,
- }
-
- # check whether current pool support create volume or not.
- cim_sccs = self._c.Associators(
- cim_pool.path,
- AssocClass='CIM_ElementCapabilities',
- ResultClass='CIM_StorageConfigurationCapabilities',
- PropertyList=['SupportedStorageElementFeatures',
- 'SupportedStorageElementTypes'])
- # Associate StorageConfigurationCapabilities to StoragePool
- # is experimental in SNIA 1.6rev4, Block Book PDF Page 68.
- # Section 5.1.6 StoragePool, StorageVolume and LogicalDisk
- # Manipulation, Figure 9 - Capabilities Specific to a StoragePool
- if len(cim_sccs) == 1:
- cim_scc = cim_sccs[0]
- if 'SupportedStorageElementFeatures' in cim_scc and \
- Smis.DMTF_SUPPORT_VOL_CREATE in \
- cim_scc['SupportedStorageElementFeatures']:
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- # When certain Pool can create ThinlyProvisionedStorageVolume,
- # we mark it as Thin Pool.
- if 'SupportedStorageElementTypes' in cim_scc:
- dmtf_element_types = cim_scc['SupportedStorageElementTypes']
- if Smis.DMTF_ELEMENT_THIN_VOLUME in dmtf_element_types:
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THIN
- else:
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THICK
- else:
- # IBM DS 8000 does not support StorageConfigurationCapabilities
- # per pool yet. They has been informed. Before fix, use a quick
- # workaround.
- # TODO: Currently, we don't have a way to detect
- # Pool.ELEMENT_TYPE_POOL
- # but based on knowing definition of each vendor.
- if cim_pool.classname == 'IBMTSDS_VirtualPool' or \
- cim_pool.classname == 'IBMTSDS_ExtentPool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- elif cim_pool.classname == 'IBMTSDS_RankPool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_POOL
- elif cim_pool.classname == 'LSIESG_StoragePool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THICK
-
- pool_id_pros = self._property_list_of_id('Pool', ['Primordial'])
- # We use some blacklist here to speed up by skipping unnecessary
- # parent pool checking.
- # These class are known as Disk Pool, no need to waste time on
- # checking 'Pool over Pool' layout.
- if cim_pool.classname == 'Clar_UnifiedStoragePool' or \
- cim_pool.classname == 'IBMTSDS_RankPool' or \
- cim_pool.classname == 'LSIESG_StoragePool' or \
- cim_pool.classname == 'ONTAP_ConcretePool':
- pass
- else:
- cim_parent_pools = self._c.Associators(
- cim_pool.path,
- AssocClass='CIM_AllocatedFromStoragePool',
- Role='Dependent',
- ResultRole='Antecedent',
- ResultClass='CIM_StoragePool',
- PropertyList=pool_id_pros)
- for cim_parent_pool in cim_parent_pools:
- if not cim_parent_pool['Primordial']:
- opt_pro_dict['member_type'] = Pool.MEMBER_TYPE_POOL
- opt_pro_dict['member_ids'].extend(
- [self._pool_id(cim_parent_pool)])
-
- raid_pros = self._raid_type_pros()
- cim_cexts = []
- # We skip disk member checking on VMAX due to bad performance.
- if cim_pool.classname != 'Symm_DeviceStoragePool':
- cim_cexts = self._find_out_bottom_cexts(cim_pool.path, raid_pros)
- raid_type = None
- for cim_cext in cim_cexts:
- cur_raid_type = self._raid_type_of(cim_cext)
-
- if (raid_type is not None) and cur_raid_type != raid_type:
- raid_type = Pool.RAID_TYPE_MIXED
- else:
- raid_type = cur_raid_type
-
- if opt_pro_dict['member_type'] == Pool.MEMBER_TYPE_POOL:
- # we already know current pool is based on pool or volume.
- # skipping disk member traverse walk.
- continue
-
- # TODO: Current way consume too much time(too many SMIS call).
- # SNIA current standard (1.6rev4) does not have any better
- # way for disk members querying.
- cim_pri_exts = self._traverse_cext_2_pri_ext(cim_cext.path)
- cim_disks = []
- disk_id_pros = self._property_list_of_id('Disk')
- for cim_pri_ext in cim_pri_exts:
- cim_disk = self._cim_disk_of_pri_ext(cim_pri_ext.path,
- disk_id_pros)
- if cim_disk:
- cim_disks.extend([cim_disk])
- if len(cim_disks) > 0:
- cur_member_ids = []
- for cim_disk in cim_disks:
- cur_member_ids.extend([self._disk_id(cim_disk)])
-
- opt_pro_dict['member_type'] = Pool.MEMBER_TYPE_DISK
- opt_pro_dict['member_ids'].extend(cur_member_ids)
-
- if raid_type is not None:
- opt_pro_dict['raid_type'] = raid_type
-
- return opt_pro_dict
-
- @staticmethod
- def _raid_type_pros():
- """
- Return a list of properties needed to detect RAID type from
- CIM_StorageExtent.
- """
- return ['DataRedundancy', 'PackageRedundancy',
- 'NoSinglePointOfFailure', 'ExtentStripeLength']
-
- @staticmethod
- def _raid_type_of(cim_ext):
- """
- Take CIM_CompositePool to check the RAID type of it.
- Only check the up-first level of RAID, we does not nested down.
- For example, when got a RAID 1 CIM_CompositePool, we return
- Pool.RAID_TYPE_RAID1
- If failed to detect the RAID level, will return:
- Pool.RAID_TYPE_UNKNOWN
- Since this is a private method, we do not check whether cim_ext is
- valid or not.
- Make sure you have all properties listed in _raid_type_pros()
- # TODO: to support RAID 3 and RAID 4 level.
- # RAID 3/4 could be checked via
- # CIM_StorageSetting['ParityLayout']
- # RAID 3: stripesize is 512 (ExtentStripeLength == 1)
- # RAID 4: stripesize is 512 * (disk_count -1)
- #
- # Problem is: there is no SNIA spec said CIM_StorageSetting
- # should associate to CIM_CompositeExtent.
- # Since RAID 3/4 is rare in market, low priority.
- """
- if not cim_ext:
- return Pool.RAID_TYPE_UNKNOWN
- if 'DataRedundancy' not in cim_ext or \
- 'PackageRedundancy' not in cim_ext or \
- 'NoSinglePointOfFailure' not in cim_ext or \
- 'ExtentStripeLength' not in cim_ext:
- return Pool.RAID_TYPE_UNKNOWN
-
- # DataRedundancy:
- # Number of complete copies of data currently maintained.
- data_redundancy = cim_ext['DataRedundancy']
- # PackageRedundancy:
- # How many physical packages can currently fail without data loss.
- # For example, in the storage domain, this might be disk spindles.
- pack_redundancy = cim_ext['PackageRedundancy']
- # NoSinglePointOfFailure:
- # Indicates whether or not there exists no single point of
- # failure.
- no_spof = cim_ext['NoSinglePointOfFailure']
-
- # ExtentStripeLength:
- # Number of contiguous underlying StorageExtents counted before
- # looping back to the first underlying StorageExtent of the
- # current stripe. It is the number of StorageExtents forming the
- # user data stripe.
- stripe_len = cim_ext['ExtentStripeLength']
-
- # determine the RAID type as SNIA document require.
- # JBOD
- if ((data_redundancy == 1) and
- (pack_redundancy == 0) and
- (not no_spof) and
- (stripe_len == 1)):
- return Pool.RAID_TYPE_JBOD
- # RAID 0
- elif ((data_redundancy == 1) and
- (pack_redundancy == 0) and
- (not no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID0
- # RAID 1
- elif ((data_redundancy == 2) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len == 1)):
- return Pool.RAID_TYPE_RAID1
- # RAID 5
- elif ((data_redundancy == 1) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID5
- # RAID 6
- elif ((data_redundancy == 1) and
- (pack_redundancy == 2) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID6
- # RAID 10
- elif ((data_redundancy == 2) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID10
- # Base on these data, we cannot determine RAID 15 or 51 and etc.
- # In stead of providing incorrect info, we choose to provide nothing.
- return Pool.RAID_TYPE_UNKNOWN
-
- @handle_cim_errors
- def pool_delete(self, pool, flags=0):
- """
- Delete a Pool via CIM_StorageConfigurationService.DeleteStoragePool
- """
- if not self.fallback_mode and \
- self._profile_is_supported(SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4,
- strict=False) is None:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "SMI-S %s version %s is not supported" %
- (SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4))
-
- cim_pool = self._get_cim_instance_by_id('Pool', pool.id)
- cim_scs = self._get_class_instance(
- 'CIM_StorageConfigurationService',
- 'SystemName', pool.system_id)
-
- in_params = {'Pool': cim_pool.path}
-
- return self._pi("pool_delete", Smis.JOB_RETRIEVE_NONE,
- *(self._c.InvokeMethod('DeleteStoragePool',
- cim_scs.path,
- **in_params)))[0]
-
- @handle_cim_errors
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Creating pool via
- CIM_StorageConfigurationService.CreateOrModifyStoragePool()
- from SMI-S 1.4+ "Block Services" profile.
- TODO: Each vendor are needing different parameters for
- CreateOrModifyStoragePool()
- """
- if not self.fallback_mode and \
- self._profile_is_supported(SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4,
- strict=False) is None:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "SMI-S %s version %s is not supported" %
- (SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4))
-
- cim_sys = self._get_cim_instance_by_id('System', system.id)
-
- # we does not support defining thinp_type yet.
- # just using whatever provider set.
-
- in_params = {}
- if pool_name:
- in_params['ElementName'] = pool_name
-
- in_cim_exts_path = []
- if Pool.member_type_is_disk(member_type):
- disk_type = Pool.member_type_to_disk_type(member_type)
- if disk_type != Disk.DISK_TYPE_UNKNOWN:
- # We have to define InExtents for certain disk type.
- # SNIA 1.6.1 CIM_StorageSetting has these experimetal
- # properties:
- # DiskType, InterconnectType, InterconnectSpeed,
- # FormFactor, RPM, PortType.
- # But currently, no vendor implement that.
- # And there is no effective way to detect the free disks,
- # walking though all CIM_CompositeExtent is not a good idea.
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "The pool_create of SMI-S plugin does not "
- "support defining disk type in member_type")
- else:
- # We depend on SMI-S provider to chose the disks for us.
- pass
-
- elif member_type == Pool.MEMBER_TYPE_POOL:
- # I(Gris) have lost my access to IBM DS8000 which support pool
- # over pool. I will raise NO_SUPPORT until got array to test on.
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "The pool_create of SMI-S plugin does not "
- "support creating pool over pool(sub-pool) yet")
-
- elif member_type == Pool.MEMBER_TYPE_UNKNOWN:
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Got invalid member_type %d" % member_type)
-
- in_params['Size'] = pywbem.Uint64(size_bytes)
-
- if raid_type != Pool.RAID_TYPE_UNKNOWN:
- in_params['Goal'] = self._cim_st_path_for_goal(
- raid_type, cim_sys.path)
-
- cim_scs = self._get_class_instance(
- 'CIM_StorageConfigurationService',
- 'SystemName', system.id)
-
- in_params = self._pool_chg_paras_check(in_params, cim_sys.path)
- return self._pi("pool_create", Smis.JOB_RETRIEVE_POOL,
- *(self._c.InvokeMethod(
- 'CreateOrModifyStoragePool',
- cim_scs.path, **in_params)))
-
- @handle_cim_errors
- def _find_preset_cim_st(self, cim_cap_path, raid_type):
- """
- Usage:
- Find first proper CIM_StorageSetting under speficied
- CIM_StorageCapabilities by giving raid_type.
- Thin pool prefered.
- Parameter:
- cim_cap_path # CIMInstanceName of CIM_StorageCapabilities
- raid_type # Pool.RAID_TYPE_XXX
- Returns:
- cim_st # CIMInstance of CIM_StorageSetting
- or
- None # No match found
- """
- cim_sts = self._c.Associators(
- cim_cap_path,
- AssocClass='CIM_StorageSettingsAssociatedToCapabilities',
- ResultClass='CIM_StorageSetting',
- PropertyList=['ElementName',
- 'ThinProvisionedPoolType'])
- if not cim_sts:
- return None
- possible_element_names = []
- if raid_type == Pool.RAID_TYPE_JBOD:
- possible_element_names = ['JBOD']
- elif (raid_type == Pool.RAID_TYPE_RAID0 or
- raid_type == Pool.RAID_TYPE_NOT_APPLICABLE):
- possible_element_names = ['RAID0']
- elif raid_type == Pool.RAID_TYPE_RAID1:
- possible_element_names = ['RAID1']
- elif raid_type == Pool.RAID_TYPE_RAID3:
- possible_element_names = ['RAID3']
- elif raid_type == Pool.RAID_TYPE_RAID4:
- possible_element_names = ['RAID4']
- elif raid_type == Pool.RAID_TYPE_RAID5:
- possible_element_names = ['RAID5']
- elif raid_type == Pool.RAID_TYPE_RAID6:
- # According to SNIA suggest, RAID6 can also be writen as RAID5DP
- # and etc.
- possible_element_names = ['RAID6', 'RAID5DP']
- elif raid_type == Pool.RAID_TYPE_RAID10:
- possible_element_names = ['RAID10', 'RAID1+0']
- elif raid_type == Pool.RAID_TYPE_RAID50:
- possible_element_names = ['RAID50', 'RAID5+0']
- elif raid_type == Pool.RAID_TYPE_RAID60:
- possible_element_names = ['RAID60', 'RAID6+0', 'RAID5DP+0']
- elif raid_type == Pool.RAID_TYPE_RAID51:
- possible_element_names = ['RAID51', 'RAID5+1']
- elif raid_type == Pool.RAID_TYPE_RAID61:
- possible_element_names = ['RAID61', 'RAID6+1', 'RAID5DP+1']
- else:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Got unknown RAID type: %d" % raid_type)
-
- chose_cim_sts = []
- for cim_st in cim_sts:
- if cim_st['ElementName'] in possible_element_names:
- chose_cim_sts.extend([cim_st])
-
- if len(chose_cim_sts) == 1:
- return chose_cim_sts[0]
-
- elif len(chose_cim_sts) > 1:
- # Perfer the thin pool. This is for EMC VNX which support both
- # think pool(less feature) and thin pool.
- for cim_st in chose_cim_sts:
- if cim_st['ThinProvisionedPoolType'] == \
- Smis.DMTF_THINP_POOL_TYPE_ALLOCATED:
- return cim_st
-
- # Return the first one if no thin pool setting found.
- return chose_cim_sts[0]
-
- return None
-
- def _cim_st_path_for_goal(self, raid_type, cim_sys_path):
- """
- Usage:
- Find out the array pre-defined CIM_StorageSetting for certain RAID
- Level. Check CIM_StorageSetting['ElementName'] for RAID type.
- Even SNIA defined a way to create new setting, but we find out
- that not a good way to follow.
- Pool.RAID_TYPE_NOT_APPLICABLE will be treat as RAID 0.
- # TODO: currently no check we will get one member for
- # Pool.RAID_TYPE_NOT_APPLICABLE. Maybe we should replace
- # this RAID type by RAID_0.
- Parameter:
- raid_type # Tier.RAID_TYPE_XXX
- cim_sys_path # CIMInstanceName of CIM_ComputerSystem.
- Returns:
- cim_st_path # Found or created CIMInstanceName of
- # CIM_StorageSetting
- Exceptions:
- LsmError
- ErrorNumber.NO_SUPPORT # Failed to find out
- # suitable CIM_StorageSetting
- """
- chose_cim_st = None
- # We will try to find the existing CIM_StorageSetting
- # with ElementName equal to raid_type_str
- # potted(pre-defined) CIM_StorageSetting
- cim_pool_path = None
- cim_pools = self._c.Associators(cim_sys_path,
- ResultClass='CIM_StoragePool',
- PropertyList=['Primordial'])
- # Base on SNIA commanded, each array should provide a
- # Primordial pool.
- for cim_tmp_pool in cim_pools:
- if cim_tmp_pool['Primordial']:
- cim_pool_path = cim_tmp_pool.path
- break
- if not cim_pool_path:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Target storage array does not have any "
- "Primordial CIM_StoragePool")
- cim_caps = self._c.Associators(
- cim_pool_path,
- ResultClass='CIM_StorageCapabilities',
- PropertyList=['ElementType'])
- for cim_cap in cim_caps:
- tmp_cim_st_set = self._find_preset_cim_st(cim_cap.path, raid_type)
- if tmp_cim_st_set:
- return tmp_cim_st_set.path
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support RAID type: %d"
- % raid_type)
-
- def _pool_chg_paras_check(self, in_params, cim_sys_path):
- """
- Usage:
- CIM_StorageConfigurationCapabilities
- ['SupportedStoragePoolFeatures'] provide indication what
- parameters current array support when CreateOrModifyStoragePool()
- We will filter out the unsupported parameters.
- Parameter:
- in_params # a dict will be used for CreateOrModifyStoragePool()
- Returns:
- new_in_params # a dict of updated parameters
- """
- # EMC vendor specific value for thick pool.
- EMC_THINP_POOL_TYPE_THICK = 0
- new_in_params = in_params
- cim_scss = self._c.AssociatorNames(
- cim_sys_path,
- AssocClass='CIM_HostedService',
- ResultClass='CIM_StorageConfigurationService',)
- if len(cim_scss) != 1:
- return new_in_params
- cim_sccs = self._c.Associators(
- cim_scss[0],
- AssocClass='CIM_ElementCapabilities',
- ResultClass='CIM_StorageConfigurationCapabilities',
- PropertyList=['SupportedStoragePoolFeatures'])
- if len(cim_sccs) != 1:
- return new_in_params
-
- cur_features = cim_sccs[0]['SupportedStoragePoolFeatures']
- if 'InExtents' in new_in_params:
- if Smis.DMTF_ST_POOL_FEATURE_INEXTS not in cur_features:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from Volume or Disk")
- if 'InPools' in new_in_params:
- if Smis.DMTF_ST_POOL_FEATURE_MULTI_INPOOL not in cur_features \
- and Smis.DMTF_ST_POOL_FEATURE_SINGLE_INPOOL not in cur_features:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from Pool")
- if Smis.DMTF_ST_POOL_FEATURE_SINGLE_INPOOL in cur_features \
- and len(new_in_params['InPools']) > 1:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from multiple pools")
- # Vendor specific check
- if cim_sys_path.classname == 'Clar_StorageSystem':
- if 'Goal' in new_in_params and 'ElementName' in new_in_params:
- ## EMC VNX/CX RAID Group should not define a ElementName.
- cim_st_path = new_in_params['Goal']
- cim_st = self._c.GetInstance(
- cim_st_path,
- PropertyList=['ThinProvisionedPoolType'],
- LocalOnly=False)
- if cim_st['ThinProvisionedPoolType'] == \
- EMC_THINP_POOL_TYPE_THICK:
- del new_in_params['ElementName']
- if 'Pool' in new_in_params and 'Goal' in new_in_params:
- ## Expanding VNX/CX Pool/RAID Group shoud not define Goal
- ## Should we raise a error here?
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "EMC VNX/CX does not allowed change RAID " +
- "type or add different RAID type tier")
- return new_in_params
-
def _profile_is_supported(self, profile_name, spec_ver, strict=False,
raise_error=False):
"""
diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index 00b9184..e7db99f 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -297,104 +297,6 @@ class Client(INetworkAttachedStorage):
_check_search_key(search_key, Pool.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('pools', _del_self(locals()))
- ## Create new pool in user friendly way. Depending on this capability:
- ## Capabilities.POOL_CREATE
- ## For plugin developer: this method require complex codes to chose
- ## pool members, please refer to SimData.pool_create() in simarray.py for
- ## sample codes.
- ## Return the newly created pool object.
- # @param self The this pointer
- # @param system The system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param size_bytes The size in bytes for new pool.
- # New pool can have equal or larger size than
- # requested, but not less. Should larger than 0.
- # @param raid_type Optional. If defined, new pool should using
- # defined RAID type.
- # When member_type was set to Pool.MEMBER_TYPE_POOL,
- # only allowed raid_type is RAID_TYPE_UNKNOWN or
- # RAID_TYPE_NOT_APPLICABLE
- # @param member_type Optional. If defined, new pool will be assembled
- # by defined member types. For example;
- # when member_type == Pool.MEMBER_TYPE_DISK_SAS,
- # new pool will be created from SAS disks only.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Returns the created new pool object.
- """
- if size_bytes <= 0:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "'size_bytes' should larger than 0")
- return self._tp.rpc('pool_create', _del_self(locals()))
-
- ## Create new pool in the hard way by defined what exactly disks should
- ## be used. Depending on these capabilities:
- ## Capabilities.POOL_CREATE_FROM_DISKS
- ## Return the newly created pool object with all supported optional data.
- # @param self The this pointer
- # @param system_id The id of system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param disks The disks (list) to create new pool from.
- # The new pool could contain more disks than
- # requested due to internal needs, but if possible,
- # new pool should only contain requested disks.
- # @param raid_type The RAID level for new pool.
- # Capabilities.POOL_CREATE_DISK_RAID_XXX will
- # indicate the supported RAID level.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create_from_disks(self, system_id, pool_name, disks,
- raid_type, flags=0):
- """
- Creates pool from disks.
- Returns the created new pool object.
- """
- return self._tp.rpc('pool_create_from_disks', _del_self(locals()))
-
- ## Create new pool in the hard way by defined what exactly pool should
- ## be allocate space from. Depending on this capability:
- ## Capabilities.POOL_CREATE_FROM_POOL
- ## Return the newly created pool object with all supported optional data.
- # @param self The this pointer
- # @param system_id The id of system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param pool The pool to allocate space from for new pool.
- # @param size_bytes The size of the new pool.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create_from_pool(self, system_id, pool_name, pool,
- size_bytes, flags=0):
- """
- Creates pool from volumes.
- Returns the created new pool object.
- """
- return self._tp.rpc('pool_create_from_pool', _del_self(locals()))
-
- ## Remove a pool. This method depend on Capabilities.POOL_DELETE
- # @param self The this pointer
- # @param pool The pool object
- # @param flags Reserved for future use, must be zero.
- # @returns None on success, else job id. Raises LsmError on errors.
- @_return_requires(unicode)
- def pool_delete(self, pool, flags=0):
- """
- Return None on success, else job id. Raises LsmError on errors.
- """
- return self._tp.rpc('pool_delete', _del_self(locals()))
-
## Returns an array of system objects.
# @param self The this pointer
# @param flags Reserved for future use, must be zero.
diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index d11c170..f1674eb 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -457,7 +457,6 @@ class ErrorNumber(object):
NOT_FOUND_VOLUME = 205
NOT_FOUND_NFS_EXPORT = 206
NOT_FOUND_SYSTEM = 208
- NOT_FOUND_DISK = 209
NOT_LICENSED = 226
@@ -480,7 +479,6 @@ class ErrorNumber(object):
TRANSPORT_SERIALIZATION = 401
TRANSPORT_INVALID_ARG = 402
- DISK_BUSY = 500
VOLUME_BUSY = 501
ACCESS_GROUP_MASKED = 502 # refuse to remove the last initiator from
# access group which have volume masked or
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 17df3ec..3aba6c8 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -371,97 +371,6 @@ class Pool(IData):
TOTAL_SPACE_NOT_FOUND = -1
FREE_SPACE_NOT_FOUND = -1
- STRIPE_SIZE_NOT_FOUND = -1
-
- # RAID_xx name was following SNIA SMI-S 1.4 rev6 Block Book,
- # section '14.1.5.3', Table 255 - Supported Common RAID Levels
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
- MEMBER_TYPE_UNKNOWN = 0
- MEMBER_TYPE_DISK = 1
- MEMBER_TYPE_DISK_MIX = 10
- MEMBER_TYPE_DISK_ATA = 11
- MEMBER_TYPE_DISK_SATA = 12
- MEMBER_TYPE_DISK_SAS = 13
- MEMBER_TYPE_DISK_FC = 14
- MEMBER_TYPE_DISK_SOP = 15
- MEMBER_TYPE_DISK_SCSI = 16
- MEMBER_TYPE_DISK_NL_SAS = 17
- MEMBER_TYPE_DISK_HDD = 18
- MEMBER_TYPE_DISK_SSD = 19
- MEMBER_TYPE_DISK_HYBRID = 110
- MEMBER_TYPE_DISK_LUN = 111
-
- MEMBER_TYPE_POOL = 2
-
- _MEMBER_TYPE_2_DISK_TYPE = {
- MEMBER_TYPE_DISK: Disk.DISK_TYPE_UNKNOWN,
- MEMBER_TYPE_DISK_MIX: Disk.DISK_TYPE_UNKNOWN,
- MEMBER_TYPE_DISK_ATA: Disk.DISK_TYPE_ATA,
- MEMBER_TYPE_DISK_SATA: Disk.DISK_TYPE_SATA,
- MEMBER_TYPE_DISK_SAS: Disk.DISK_TYPE_SAS,
- MEMBER_TYPE_DISK_FC: Disk.DISK_TYPE_FC,
- MEMBER_TYPE_DISK_SOP: Disk.DISK_TYPE_SOP,
- MEMBER_TYPE_DISK_SCSI: Disk.DISK_TYPE_SCSI,
- MEMBER_TYPE_DISK_NL_SAS: Disk.DISK_TYPE_NL_SAS,
- MEMBER_TYPE_DISK_HDD: Disk.DISK_TYPE_HDD,
- MEMBER_TYPE_DISK_SSD: Disk.DISK_TYPE_SSD,
- MEMBER_TYPE_DISK_HYBRID: Disk.DISK_TYPE_HYBRID,
- MEMBER_TYPE_DISK_LUN: Disk.DISK_TYPE_LUN,
- }
-
- @staticmethod
- def member_type_is_disk(member_type):
- """
- Returns True if defined 'member_type' is disk.
- False when else.
- """
- return member_type in Pool._MEMBER_TYPE_2_DISK_TYPE
-
- @staticmethod
- def member_type_to_disk_type(member_type):
- """
- Convert member_type to disk_type.
- For non-disk member, we return Disk.DISK_TYPE_NOT_APPLICABLE
- """
- return Pool._MEMBER_TYPE_2_DISK_TYPE.get(member_type,
- Disk.DISK_TYPE_NOT_APPLICABLE)
-
- @staticmethod
- def disk_type_to_member_type(disk_type):
- """
- Convert disk_type to Pool.MEMBER_TYPE_DISK_XXXX
- Will return Pool.MEMBER_TYPE_DISK as failback.
- """
- # Invert dict. Assumes values are unique.
- inv_dict = dict((v, k)
- for k, v in Pool._MEMBER_TYPE_2_DISK_TYPE.iteritems())
- return inv_dict.get(disk_type, Pool.MEMBER_TYPE_DISK)
-
- THINP_TYPE_UNKNOWN = 0
- THINP_TYPE_THIN = 1
- THINP_TYPE_THICK = 5
- THINP_TYPE_NOT_APPLICABLE = 6
- # NOT_APPLICABLE means current pool is not implementing Thin Provisioning,
- # but can create thin or thick pool from it.
# Element Type indicate what kind of element could this pool create:
# * Another Pool
@@ -474,79 +383,24 @@ class Pool(IData):
ELEMENT_TYPE_DELTA = 1 << 4
ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use
- MAX_POOL_STATUS_BITS = 64
# Pool status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
- # UNKNOWN:
- # Failed to query out the status of Pool.
STATUS_OK = 1 << 1
- # OK:
- # Pool is accessible with no issue.
STATUS_OTHER = 1 << 2
- # OTHER:
- # Should explain in Pool.status_info for detail.
STATUS_STRESSED = 1 < 3
- # STRESSED:
- # Pool is under heavy workload which cause bad I/O performance.
STATUS_DEGRADED = 1 << 4
- # DEGRADED:
- # Pool is accessible but lost full RAID protection due to
- # I/O error or offline of one or more RAID member.
- # Example:
- # * RAID 6 pool lost access to 1 disk or 2 disks.
- # * RAID 5 pool lost access to 1 disk.
- # May explain detail in Pool.status_info.
- # Example:
- # * Pool.status = 'Disk 0_0_1 offline'
STATUS_ERROR = 1 << 5
- # OFFLINE:
- # Pool is not accessible for internal issue.
- # Should explain in Pool.status_info for reason.
STATUS_STARTING = 1 << 7
- # STARTING:
- # Pool is reviving from STOPPED status. Pool is not accessible.
STATUS_STOPPING = 1 << 8
- # STOPPING:
- # Pool is stopping by administrator. Pool is not accessible.
STATUS_STOPPED = 1 << 9
- # STOPPING:
- # Pool is stopped by administrator. Pool is not accessible.
STATUS_READ_ONLY = 1 << 10
- # READ_ONLY:
- # Pool is read only.
- # Pool.status_info should explain why.
STATUS_DORMANT = 1 << 11
- # DORMANT:
- # Pool is not accessible.
- # It's not stopped by administrator, but stopped for some mechanism.
- # For example, The DR pool acting as the SYNC replication target will be
- # in DORMANT state, As long as the PR(production) pool alive.
- # Another example could relocating.
STATUS_RECONSTRUCTING = 1 << 12
- # RECONSTRUCTING:
- # Pool is reconstructing the hash data or mirror data.
- # Mostly happen when disk revive from offline or disk replaced.
- # Pool.status_info can contain progress of this reconstruction job.
STATUS_VERIFYING = 1 << 13
- # VERIFYING:
- # Array is running integrity check on data of current pool.
- # It might be started by administrator or array itself.
- # Pool.status_info can contain progress of this verification job.
STATUS_INITIALIZING = 1 << 14
- # INITIALIZING:
- # Pool is in initialing state.
- # Mostly shown when new pool created or array boot up.
STATUS_GROWING = 1 << 15
- # GROWING:
- # Pool is growing its size and doing internal jobs.
- # Pool.status_info can contain progress of this growing job.
STATUS_SHRINKING = 1 << 16
- # SHRINKING:
- # Pool is shrinking its size and doing internal jobs.
- # Pool.status_info can contain progress of this shrinking job.
STATUS_DESTROYING = 1 << 17
- # DESTROYING:
- # Array is removing current pool.
def __init__(self, _id, _name, _element_type, _total_space, _free_space,
_status, _status_info, _system_id, _plugin_data=None):
@@ -886,29 +740,6 @@ class Capabilities(IData):
EXPORT_REMOVE = 123
EXPORT_CUSTOM_PATH = 124
- #Pool
- POOL_CREATE = 130
- POOL_CREATE_FROM_DISKS = 131
- POOL_CREATE_FROM_POOL = 133
-
- POOL_CREATE_DISK_RAID_0 = 140
- POOL_CREATE_DISK_RAID_1 = 141
- POOL_CREATE_DISK_RAID_JBOD = 142
- POOL_CREATE_DISK_RAID_3 = 143
- POOL_CREATE_DISK_RAID_4 = 144
- POOL_CREATE_DISK_RAID_5 = 145
- POOL_CREATE_DISK_RAID_6 = 146
- POOL_CREATE_DISK_RAID_10 = 147
- POOL_CREATE_DISK_RAID_50 = 148
- POOL_CREATE_DISK_RAID_51 = 149
- POOL_CREATE_DISK_RAID_60 = 150
- POOL_CREATE_DISK_RAID_61 = 151
- POOL_CREATE_DISK_RAID_15 = 152
- POOL_CREATE_DISK_RAID_16 = 153
- POOL_CREATE_DISK_RAID_NOT_APPLICABLE = 154
-
- POOL_DELETE = 200
-
POOLS_QUICK_SEARCH = 210
VOLUMES_QUICK_SEARCH = 211
DISKS_QUICK_SEARCH = 212
diff --git a/python_binding/lsm/_iplugin.py b/python_binding/lsm/_iplugin.py
index e1f7d4c..5973df6 100644
--- a/python_binding/lsm/_iplugin.py
+++ b/python_binding/lsm/_iplugin.py
@@ -129,40 +129,6 @@ class IPlugin(object):
class IStorageAreaNetwork(IPlugin):
- def pool_create(self, system_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Creates a pool letting the array pick the specifics
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
- def pool_create_from_disks(self, system_id, pool_name, member_ids,
- raid_type, flags=0):
- """
- Creates a pool letting the user select the disks
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
- def pool_create_from_pool(self, system_id, pool_name, member_id,
- size_bytes, flags=0):
- """
- Creates a pool from existing volumes
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
def volumes(self, search_key=None, search_value=None, flags=0):
"""
Returns an array of volume objects
diff --git a/test/plugin_test.py b/test/plugin_test.py
index ff4aafc..83f1b3b 100755
--- a/test/plugin_test.py
+++ b/test/plugin_test.py
@@ -107,12 +107,7 @@ def supported(cap, capability):
class TestProxy(object):
# Hash of all calls that can be async
- async_calls = {'pool_create': (unicode, lsm.Pool),
- 'pool_create_from_disks': (unicode, lsm.Pool),
- 'pool_create_from_volumes': (unicode, lsm.Pool),
- 'pool_create_from_pool': (unicode, lsm.Pool),
- 'pool_delete': (unicode,),
- 'volume_create': (unicode, lsm.Volume),
+ async_calls = {'volume_create': (unicode, lsm.Volume),
'volume_resize': (unicode, lsm.Volume),
'volume_replicate': (unicode, lsm.Volume),
'volume_replicate_range': (unicode,),
@@ -325,9 +320,6 @@ class TestPlugin(unittest.TestCase):
disks = self.c.disks()
self.assertTrue(len(disks) > 0, "We need at least 1 disk to test")
- def test_pool_create(self):
- pass
-
def _volume_create(self, system_id):
if system_id in self.pool_by_sys_id:
p = self._get_pool_by_usage(system_id,
diff --git a/test/tester.c b/test/tester.c
index 5ce641f..a0e23e3 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -1790,47 +1790,6 @@ START_TEST(test_invalid_input)
resized = NULL;
fail_unless(rc == LSM_ERR_OK, "rc = %d", rc);
-
- /* Pool create */
- int raid_type = 65535;
- int member_type = 65535;
- uint64_t size = 0;
- int flags = 10;
-
- rc = lsm_pool_create(NULL, NULL, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, NULL, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, system, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- size = 1024*1024*1024;
-
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- raid_type = LSM_POOL_RAID_TYPE_0;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- member_type = LSM_POOL_MEMBER_TYPE_DISK;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- lsm_pool *pcp = NULL;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, &pcp, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- char *pcj = NULL;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, &pcp, &pcj, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
-
rc = lsm_system_record_array_free(sys, num_systems);
fail_unless(LSM_ERR_OK == rc, "%d", rc);
@@ -2281,173 +2240,6 @@ START_TEST(test_nfs_export_funcs)
}
END_TEST
-START_TEST(test_pool_delete)
-{
- int rc = 0;
- char *job = NULL;
- lsm_volume *v = NULL;
-
- printf("Testing pool delete!\n");
-
- lsm_pool *test_pool = get_test_pool(c);
-
- fail_unless( test_pool != NULL );
-
- if( test_pool ) {
-
- rc = lsm_volume_create(c, test_pool, "lsm_volume_pool_remove_test",
- 10000000, LSM_PROVISION_DEFAULT,
- &v, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- v = wait_for_job_vol(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
-
- if( v ) {
-
- rc = lsm_pool_delete(c, test_pool, &job, LSM_FLAG_RSVD);
-
- fail_unless(LSM_ERR_EXISTS_VOLUME == rc, "rc %d", rc);
-
- if( LSM_ERR_EXISTS_VOLUME == rc ) {
-
- /* Delete the volume and try again */
- rc = lsm_volume_delete(c, v, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- wait_for_job(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
-
- rc = lsm_pool_delete(c, test_pool, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- wait_for_job(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
- }
- }
-
- G(rc, lsm_pool_record_free, test_pool);
- test_pool = NULL;
- G(rc, lsm_volume_record_free, v);
- v = NULL;
- }
-}
-END_TEST
-
-START_TEST(test_pool_create)
-{
- int rc = 0;
- lsm_pool *pool = NULL;
- char *job = NULL;
- lsm_disk **disks = NULL;
- uint32_t num_disks = 0;
- lsm_pool *pool_one = NULL;
- lsm_system *system = get_system(c);
-
- /*
- * Test basic pool create option.
- */
- rc = lsm_pool_create(c, system, "pool_create_unit_test", 1024*1024*1024,
- LSM_POOL_RAID_TYPE_0, LSM_POOL_MEMBER_TYPE_DISK, &pool,
- &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d which_plugin %d", rc,
- which_plugin);
- }
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
-
- /*
- * Test pool creations from disks
- */
- rc = lsm_disk_list(c, NULL, NULL, &disks, &num_disks, LSM_FLAG_RSVD);
- lsm_disk *disks_to_use[128];
- uint32_t num_disks_to_use = 0;
-
- memset(disks_to_use, 0, sizeof(disks_to_use));
- fail_unless(LSM_ERR_OK == rc, "rc = %d", rc);
- if( LSM_ERR_OK == rc && num_disks ) {
- int i = 0;
-
- /* Python simulator one accepts same type and size */
- lsm_disk_type disk_type = lsm_disk_type_get(disks[num_disks-1]);
- uint64_t size = lsm_disk_number_of_blocks_get(disks[num_disks-1]);
-
- for( i = 0; i < num_disks; ++i ) {
- /* Only include disks of one type */
- if( lsm_disk_type_get(disks[i]) == disk_type &&
- size == lsm_disk_number_of_blocks_get(disks[i])) {
-
- disks_to_use[num_disks_to_use] = disks[i];
- num_disks_to_use += 1;
- }
- }
- }
-
- rc = lsm_pool_create_from_disks(c, system, "pool_create_from_disks",
- disks_to_use, num_disks_to_use,
- LSM_POOL_RAID_TYPE_0, &pool, &job,
- LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "lsmPoolCreateFromDisks %d (%s)", rc,
- error(lsm_error_last_get(c)));
- }
-
- G(rc, lsm_disk_record_array_free, disks, num_disks);
- memset(disks_to_use, 0, sizeof(disks_to_use));
-
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
-
- /* Test pool creation from pool */
- {
- if( pool_one ) {
- pool = NULL;
- job = NULL;
-
- rc = lsm_pool_create_from_pool(c, system, "New pool from pool",
- pool_one, 1024*1024*1024, &pool,
- &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc,
- "lsmPoolCreateFromVolumes %d (%s)",
- rc, error(lsm_error_last_get(c)));
- }
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
- }
- }
-
- if( pool_one ) {
- G(rc, lsm_pool_record_free, pool_one);
- pool_one = NULL;
- }
-
- if( system ) {
- G(rc, lsm_system_record_free, system);
- system = NULL;
- }
-}
-END_TEST
-
START_TEST(test_uri_parse)
{
const char uri_g[] = "sim://***@host:123/path/?namespace=root/uber";
@@ -2877,8 +2669,6 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_search_volumes);
tcase_add_test(basic, test_search_pools);
- tcase_add_test(basic, test_pool_delete);
- tcase_add_test(basic, test_pool_create);
tcase_add_test(basic, test_uri_parse);
tcase_add_test(basic, test_error_reporting);
diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index 8d65abf..c7f9862 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,6 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,
from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- pool_raid_type_str_to_type, pool_member_type_str_to_type,
vol_provision_str_to_type, vol_rep_type_str_to_type,
ag_init_type_str_to_lsm)
@@ -133,30 +132,6 @@ replicate_help = "replication type: " + ", ".join(replicate_types)
size_help = 'Can use B, KiB, MiB, GiB, TiB, PiB postfix (IEC sizing)'
-member_types = ('DISK', 'VOLUME', 'POOL', 'DISK_ATA', 'DISK_SATA',
- 'DISK_SAS', 'DISK_FC', 'DISK_SOP', 'DISK_SCSI', 'DISK_NL_SAS',
- 'DISK_HDD', 'DISK_SSD', 'DISK_HYBRID')
-
-member_types_formatted = ''
-for i in range(0, len(member_types), 4):
- member_types_formatted += "\n "
- for member_type_str in member_types[i:i + 4]:
- member_types_formatted += "%-15s" % member_type_str
-
-member_help = "Valid member type: " + member_types_formatted
-
-raid_types = ('JBOD', 'RAID0', 'RAID1', 'RAID3', 'RAID4', 'RAID5', 'RAID6',
- 'RAID10', 'RAID50', 'RAID60', 'RAID51', 'RAID61',
- 'NOT_APPLICABLE')
-
-raid_types_formatted = ''
-for i in range(0, len(raid_types), 4):
- raid_types_formatted += "\n "
- for raid_type_str in raid_types[i:i + 4]:
- raid_types_formatted += "%-15s" % raid_type_str
-
-raid_help = "Valid RAID type:" + raid_types_formatted
-
sys_id_opt = dict(name='--sys', metavar='<SYS_ID>', help='System ID')
sys_id_filter_opt = sys_id_opt.copy()
sys_id_filter_opt['help'] = 'Search by System ID'
@@ -603,66 +578,6 @@ cmds = (
],
),
- dict(
- name='pool-create',
- help='Creates a storage pool',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(size_opt),
- ],
- optional=[
- dict(name="--raid-type", metavar='<RAID_TYPE>',
- help=raid_help,
- choices=raid_types,
- type=str.upper),
- dict(name="--member-type", metavar='<MEMBER_TYPE>',
- help=member_help,
- choices=member_types),
- ],
- ),
-
- dict(
- name='pool-create-from-disks',
- help='Creates a storage pool from disks',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(name="--member-id", metavar='<MEMBER_ID>',
- help='The ID of disks to create new pool\n'
- 'This is a repeatable argument',
- action='append'),
- dict(name="--raid-type", metavar='<RAID_TYPE>',
- help=raid_help,
- choices=raid_types,
- type=str.upper),
- ],
- ),
-
- dict(
- name='pool-create-from-pool',
- help='Creates a sub-pool from another storage pool',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(name="--member-id", metavar='<POOL_ID>',
- help='The ID of pool to create new pool from\n',
- action='append'),
- dict(name="--size", metavar='<SIZE>',
- help='The size of new pool'),
- ],
- ),
-
- dict(
- name='pool-delete',
- help='Deletes a storage pool',
- args=[
- dict(pool_id_opt),
- ],
- ),
)
aliases = (
@@ -1389,95 +1304,6 @@ class CmdLine:
args.file),
None)
- ## Deletes a pool
- def pool_delete(self, args):
- pool = _get_item(self.c.pools(), args.pool, "pool id")
- if self.confirm_prompt(True):
- self._wait_for_it("pool-delete",
- self.c.pool_delete(pool),
- None)
-
- ## Creates a pool
- def pool_create(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- pool_name = args.name
- raid_type = Pool.RAID_TYPE_UNKNOWN
- member_type = Pool.MEMBER_TYPE_UNKNOWN
- size_bytes = self._size(self.args.size)
-
- if args.raid_type:
- raid_type = pool_raid_type_str_to_type(
- self.args.raid_type)
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- raise ArgError("Unknown RAID type specified: %s" %
- args.raid_type)
-
- if args.member_type:
- member_type = pool_member_type_str_to_type(
- args.member_type)
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- raise ArgError("Unknown member type specified: %s" %
- args.member_type)
-
- pool = self._wait_for_it("pool-create",
- *self.c.pool_create(system,
- pool_name,
- size_bytes,
- raid_type,
- member_type,
- 0))
- self.display_data([pool])
-
- def pool_create_from_disks(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- if len(args.member_id) <= 0:
- raise ArgError("No disk ID was provided for new pool")
-
- member_ids = args.member_id
- disks_to_use = []
- disks = self.c.disks()
- disk_ids = dict((x.id, x) for x in disks)
- for member_id in member_ids:
- if member_id not in disk_ids:
- raise ArgError("Invalid Disk ID specified in " +
- "--member-id %s " % member_id)
- else:
- disks_to_use.append(disk_ids[member_id])
-
- raid_type = pool_raid_type_str_to_type(self.args.raid_type)
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- raise ArgError("Unknown RAID type specified: %s" %
- self.args.raid_type)
-
- pool_name = args.name
- pool = self._wait_for_it(
- "pool-create-from-disks",
- *self.c.pool_create_from_disks(
- system, pool_name, disks_to_use, raid_type, 0))
- self.display_data([pool])
-
- def pool_create_from_pool(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- if len(args.member_id) <= 0:
- raise ArgError("No volume ID was provided for new pool")
-
- member_ids = args.member_id
- if len(member_ids) > 1:
- raise ArgError("Two or more member defined, but creating pool " +
- "from pool only allow one member pool")
-
- member_id = member_ids[0]
- pool = _get_item(self.c.pools(), member_id, "pool id")
-
- size_bytes = self._size(self.args.size)
-
- pool_name = args.name
- pool = self._wait_for_it(
- "pool-create-from-pool",
- *self.c.pool_create_from_pool(
- system, pool_name, pool, size_bytes, 0))
- self.display_data([pool])
-
def _read_configfile(self):
"""
Set uri from config file. Will be overridden by cmdline option or
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 9a18cd5..3c7a83d 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -131,81 +131,6 @@ def pool_element_type_to_str(element_type):
return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
-_POOL_RAID_TYPE_CONV = {
- Pool.RAID_TYPE_RAID0: 'RAID0', # stripe
- Pool.RAID_TYPE_RAID1: 'RAID1', # mirror
- Pool.RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
- # parity
- Pool.RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
- # parity
- Pool.RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
- # parity
- Pool.RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
- Pool.RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
- Pool.RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
- Pool.RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
- Pool.RAID_TYPE_RAID50: 'RAID50', # stripe of parities
- Pool.RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
- Pool.RAID_TYPE_RAID51: 'RAID51', # mirror of parities
- Pool.RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
- Pool.RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
- Pool.RAID_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- Pool.RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
- # different RAID type
-}
-
-
-def pool_raid_type_to_str(raid_type):
- return _enum_type_to_str(raid_type, _POOL_RAID_TYPE_CONV)
-
-
-def pool_raid_type_str_to_type(raid_type_str):
- return _str_to_enum(raid_type_str, _POOL_RAID_TYPE_CONV)
-
-
-_POOL_MEMBER_TYPE_CONV = {
- Pool.MEMBER_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.MEMBER_TYPE_DISK: 'DISK', # Pool was created from Disk(s).
- Pool.MEMBER_TYPE_DISK_MIX: 'DISK_MIX', # Has two or more types of disks.
- Pool.MEMBER_TYPE_DISK_ATA: 'DISK_ATA',
- Pool.MEMBER_TYPE_DISK_SATA: 'DISK_SATA',
- Pool.MEMBER_TYPE_DISK_SAS: 'DISK_SAS',
- Pool.MEMBER_TYPE_DISK_FC: 'DISK_FC',
- Pool.MEMBER_TYPE_DISK_SOP: 'DISK_SOP',
- Pool.MEMBER_TYPE_DISK_SCSI: 'DISK_SCSI',
- Pool.MEMBER_TYPE_DISK_NL_SAS: 'DISK_NL_SAS',
- Pool.MEMBER_TYPE_DISK_HDD: 'DISK_HDD',
- Pool.MEMBER_TYPE_DISK_SSD: 'DISK_SSD',
- Pool.MEMBER_TYPE_DISK_HYBRID: 'DISK_HYBRID',
- Pool.MEMBER_TYPE_POOL: 'POOL', # Pool was created from other Pool(s).
-}
-
-
-def pool_member_type_to_str(member_type):
- return _enum_type_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
-
-
-def pool_member_type_str_to_type(member_type_str):
- return _str_to_enum(member_type_str, _POOL_MEMBER_TYPE_CONV)
-
-
-_POOL_THINP_TYPE_CONV = {
- Pool.THINP_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.THINP_TYPE_THIN: 'THIN',
- Pool.THINP_TYPE_THICK: 'THICK',
- Pool.THINP_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
-}
-
-
-def pool_thinp_type_to_str(thinp_type):
- return _enum_type_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
-
-
-def pool_thinp_type_str_to_type(thinp_type_str):
- return _str_to_enum(thinp_type_str, _POOL_THINP_TYPE_CONV)
-
-
_VOL_STATUS_CONV = {
Volume.STATUS_UNKNOWN: 'Unknown',
Volume.STATUS_OK: 'OK',
@@ -393,9 +318,6 @@ class DisplayData(object):
POOL_VALUE_CONV_ENUM = {
'status': pool_status_to_str,
- 'raid_type': pool_raid_type_to_str,
- 'member_type': pool_member_type_to_str,
- 'thinp_type': pool_thinp_type_to_str,
'element_type': pool_element_type_to_str,
}
suggest user to create pool using vendor specific tools.
* Removed these methods:
C:
lsm_pool_create()
lsm_pool_create_from_disks()
lsm_pool_create_from()
lsm_pool_create_from_pool()
Python:
pool_create()
pool_create_from_disks()
pool_create_from_pool()
pool_create_from_volumes()
pool_delete()
* Removed unused RAID_TYPE and MEMBER_TYPE.
* Removed related capabilities.
* Removed error number:
C:
LSM_ERR_NOT_FOUND_DISK
LSM_ERR_DISK_BUSY
Python:
ErrorNumber.NOT_FOUND_DISK
ErrorNumber.DISK_BUSY
* Plugins cleaned.
* For 'sim://' plugin, it use raid type and member type to calculate pool
size. We move pool constants into PoolRAID class for future use.
* For 'ontap://' plugin, it's raid_type detection code just be commented out.
We might use it in the future.
* lsmcli cleaned.
* Test updated:
tester.c
plugin_test.py
* "make check" and "make distcheck" passed.
Signed-off-by: Gris Ge <***@redhat.com>
---
c_binding/include/libstoragemgmt/libstoragemgmt.h | 87 ---
.../libstoragemgmt/libstoragemgmt_capabilities.h | 22 -
.../include/libstoragemgmt/libstoragemgmt_error.h | 2 -
.../libstoragemgmt/libstoragemgmt_plug_interface.h | 68 --
.../include/libstoragemgmt/libstoragemgmt_types.h | 38 --
c_binding/lsm_mgmt.cpp | 217 -------
c_binding/lsm_plugin_ipc.cpp | 167 -----
doc/man/lsmcli.1.in | 90 ---
plugin/ontap/ontap.py | 24 +-
plugin/sim/simarray.py | 557 +++--------------
plugin/sim/simulator.py | 20 -
plugin/simc/simc_lsmplugin.c | 142 -----
plugin/smispy/smis.py | 694 ---------------------
python_binding/lsm/_client.py | 98 ---
python_binding/lsm/_common.py | 2 -
python_binding/lsm/_data.py | 169 -----
python_binding/lsm/_iplugin.py | 34 -
test/plugin_test.py | 10 +-
test/tester.c | 210 -------
tools/lsmcli/cmdline.py | 174 ------
tools/lsmcli/data_display.py | 78 ---
21 files changed, 104 insertions(+), 2799 deletions(-)
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt.h b/c_binding/include/libstoragemgmt/libstoragemgmt.h
index 968bfdd..1587f0d 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt.h
@@ -237,93 +237,6 @@ extern "C" {
uint32_t *count, lsm_flag flags);
/**
- * Create new pool allowing the array to make the most decisions.
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name Name of new pool
- * @param [in] size_bytes Size of new pool in bytes
- * @param [in] raid_type Optional. If defined, new pool should
- * using defined RAID type. When
- * member_type was set to LSM_POOL_MEMBER_TYPE_POOL,
- * only allowed raid_type is LSM_POOL_RAID_TYPE_UNKNOWN or
- * LSM_POOL_RAID_TYPE_NOT_APPLICABLE
- * @param [in] member_type Optional. If defined, new pool will be assembled
- * by defined member types. For example;
- * when member_type == LSM_POOL_MEMBER_TYPE_DISK_SAS,
- * new pool will be created from SAS disks
- * only.
- * @param [out] pool Newly created pool
- * @param [out] job Job ID of async. operation
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create(lsm_connect *conn,
- lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags);
-
- /**
- * Create a pool specifying specific disks to use.
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name The name of the new pool, will not fail
- * if request name cannot be fulfilled
- * @param [in] disks An array of disk pointers to create new
- * pool from.
- * The new pool could contain more disks
- * than requested due to internal needs,
- * but if possible should only contain
- * requested disks.
- * @param [in] num_disks Number of disks in disks array
- * @param [in] raid_type The RAID type for new pool
- * @param [out] pool Newly created pool
- * @param [out] job Job ID of async. operation
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create_from_disks(lsm_connect *conn,
- lsm_system *system, const char *pool_name,
- lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags);
-
- /**
- * Create new pool from an existing pool
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] system System of where pool will reside
- * @param [in] pool_name The name of the new pool, will not fail
- * if request name cannot be fulfilled
- * @param [in] pool The pool to create new pool from
- * @param [in] size_bytes Desired size of new pool
- * @param [out] created_pool Newly created pool
- * @param [out] job Job ID of async.
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_create_from_pool(lsm_connect *conn,
- lsm_system *system, const char *pool_name,
- lsm_pool *pool, uint64_t size_bytes,
- lsm_pool** created_pool, char **job, lsm_flag flags);
-
- /**
- * Deletes a pool
- * @param [in] conn Valid connection @see lsm_connect_password
- * @param [in] pool The pool to delete
- * @param [out] job_id Job id of job if async.
- * @param [in] flags Reserved for future use, must be zero
- * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async.,
- * else error code
- */
- int LSM_DLL_EXPORT lsm_pool_delete(lsm_connect *conn, lsm_pool *pool,
- char **job_id, lsm_flag flags);
-
-
- /**
* Volume management functions
*/
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
index bc45f7e..818c8b2 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h
@@ -101,28 +101,6 @@ typedef enum {
LSM_CAP_EXPORT_REMOVE = 123, /**< Remove an export */
LSM_CAP_EXPORT_CUSTOM_PATH = 124, /**< Plug-in allows user to define custome export path */
- LSM_CAP_POOL_CREATE = 130, /**< Pool create support */
- LSM_CAP_POOL_CREATE_FROM_DISKS = 131, /**< Pool create from disks */
- LSM_CAP_POOL_CREATE_FROM_POOL = 133, /**< Pool create from pool */
-
- LSM_CAP_POOL_CREATE_DISK_RAID_0 = 140,
- LSM_CAP_POOL_CREATE_DISK_RAID_1 = 141,
- LSM_CAP_POOL_CREATE_DISK_RAID_JBOD = 142,
- LSM_CAP_POOL_CREATE_DISK_RAID_3 = 143,
- LSM_CAP_POOL_CREATE_DISK_RAID_4 = 144,
- LSM_CAP_POOL_CREATE_DISK_RAID_5 = 145,
- LSM_CAP_POOL_CREATE_DISK_RAID_6 = 146,
- LSM_CAP_POOL_CREATE_DISK_RAID_10 = 147,
- LSM_CAP_POOL_CREATE_DISK_RAID_50 = 148,
- LSM_CAP_POOL_CREATE_DISK_RAID_51 = 149,
- LSM_CAP_POOL_CREATE_DISK_RAID_60 = 150,
- LSM_CAP_POOL_CREATE_DISK_RAID_61 = 151,
- LSM_CAP_POOL_CREATE_DISK_RAID_15 = 152,
- LSM_CAP_POOL_CREATE_DISK_RAID_16 = 153,
- LSM_CAP_POOL_CREATE_DISK_RAID_NOT_APPLICABLE = 154,
-
- LSM_CAP_POOL_DELETE = 200, /**< Pool delete support */
-
LSM_CAP_POOLS_QUICK_SEARCH = 210, /**< Seach occurs on array */
LSM_CAP_VOLUMES_QUICK_SEARCH = 211, /**< Seach occurs on array */
LSM_CAP_DISKS_QUICK_SEARCH = 212, /**< Seach occurs on array */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
index 5ce1d2c..d381912 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_error.h
@@ -80,7 +80,6 @@ typedef enum {
LSM_ERR_NOT_FOUND_NFS_EXPORT = 206, /**< NFS export not found */
LSM_ERR_NOT_FOUND_INITIATOR = 207, /**< Initiator not found */
LSM_ERR_NOT_FOUND_SYSTEM = 208, /**< System not found */
- LSM_ERR_NOT_FOUND_DISK = 209, /**< Disk not found */
LSM_ERR_NOT_LICENSED = 226, /**< Need license for feature */
@@ -103,7 +102,6 @@ typedef enum {
LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**< Transport serialization error */
LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**< Parameter transported over IPC is invalid */
- LSM_ERR_DISK_BUSY = 500, /* Disk already in use */
LSM_ERR_VOLUME_BUSY = 501, /* Volume already in use */
ACCESS_GROUP_MASKED = 502,
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
index 3296252..1da1aa3 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h
@@ -230,70 +230,6 @@ typedef int (*lsm_plug_target_port_list)( lsm_plugin_ptr c,
uint32_t *count, lsm_flag flags);
/**
- * Create a pool.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System
- * @param[in] pool_name Human name of pool
- * @param[in] size_bytes Desired size of pool
- * @param[in] raid_type Raid type for pool
- * @param[in] member_type Type of individual members eg. SAS/FC/SSD etc.
- * @param[out] pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create)( lsm_plugin_ptr c, lsm_system* system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type, lsm_pool_member_type member_type,
- lsm_pool **pool, char **job, lsm_flag flags);
-
-/**
- * Create a pool and specifying disks to use.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System
- * @param[in] pool_name Human name of pool
- * @param[in] disks Array of disk pointers to create pool from
- * @param[in] num_disks Number of disks
- * @param[in] raid_type Raid type for pool
- * @param[out] pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create_from_disks)( lsm_plugin_ptr c,
- lsm_system *system,
- const char *pool_name, lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type, lsm_pool **pool, char **job,
- lsm_flag flags);
-
-
-/**
- * Create a pool and specifying pool to use.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] system System id
- * @param[in] pool_name Human name of pool
- * @param[in] pool Pool to create pool from
- * @param[in] size_bytes Size of pool
- * @param[out] created_pool Newly create pool if done sync.
- * @param[out] job Job id if execution is async.
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_create_from_pool)( lsm_plugin_ptr c,
- lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags );
-
-
-/**
- * Delete a pool.
- * @param[in] c Valid lsm plug-in pointer
- * @param[in] pool Pool to delete
- * @param[out] job Job pointer if job is async
- * @return LSM_ERR_OK, else error reason
- */
-typedef int (*lsm_plug_pool_delete)( lsm_plugin_ptr c, lsm_pool *pool, char **job,
- lsm_flag flags);
-
-/**
* Creates a volume, callback function signature
* @param[in] c Valid lsm plug-in pointer
* @param[in] pool Pool to allocated storage from
@@ -822,10 +758,6 @@ typedef int (*lsm_plug_nfs_export_remove)( lsm_plugin_ptr c, lsm_nfs_export *e,
struct lsm_san_ops_v1 {
lsm_plug_volume_list vol_get; /**< retrieving volumes */
lsm_plug_disk_list disk_get; /**< retrieve disks */
- lsm_plug_pool_create pool_create; /**< Pool create */
- lsm_plug_pool_create_from_disks pool_create_from_disks; /**< Pool create from disks */
- lsm_plug_pool_create_from_pool pool_create_from_pool; /**< Pool creation from pool */
- lsm_plug_pool_delete pool_delete; /**< Delete a pool */
lsm_plug_volume_create vol_create; /**< creating a lun */
lsm_plug_volume_replicate vol_replicate; /**< replicating lun */
lsm_plug_volume_replicate_range_block_size vol_rep_range_bs; /**< volume replication range block size */
diff --git a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
index 4d3b6bd..82a23a8 100644
--- a/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
+++ b/c_binding/include/libstoragemgmt/libstoragemgmt_types.h
@@ -254,44 +254,6 @@ typedef enum {
#define LSM_POOL_ELEMENT_TYPE_SYS_RESERVED 0x0000000000000400
typedef enum {
- LSM_POOL_MEMBER_TYPE_UNKNOWN = 0,
- LSM_POOL_MEMBER_TYPE_DISK = 1,
- LSM_POOL_MEMBER_TYPE_POOL = 2,
- LSM_POOL_MEMBER_TYPE_DISK_MIX = 10,
- LSM_POOL_MEMBER_TYPE_DISK_ATA = 11,
- LSM_POOL_MEMBER_TYPE_DISK_SATA = 12,
- LSM_POOL_MEMBER_TYPE_DISK_SAS = 13,
- LSM_POOL_MEMBER_TYPE_DISK_FC = 14,
- LSM_POOL_MEMBER_TYPE_DISK_SOP = 15,
- LSM_POOL_MEMBER_TYPE_DISK_SCSI = 16,
- LSM_POOL_MEMBER_TYPE_DISK_NL_SAS = 17,
- LSM_POOL_MEMBER_TYPE_DISK_HDD = 18,
- LSM_POOL_MEMBER_TYPE_DISK_SSD = 19,
- LSM_POOL_MEMBER_TYPE_DISK_HYBRID = 110,
- LSM_POOL_MEMBER_TYPE_DISK_LUN = 111
-} lsm_pool_member_type;
-
-typedef enum {
- LSM_POOL_RAID_TYPE_0 = 0,
- LSM_POOL_RAID_TYPE_1 = 1,
- LSM_POOL_RAID_TYPE_3 = 3,
- LSM_POOL_RAID_TYPE_4 = 4,
- LSM_POOL_RAID_TYPE_5 = 5,
- LSM_POOL_RAID_TYPE_6 = 6,
- LSM_POOL_RAID_TYPE_10 = 10,
- LSM_POOL_RAID_TYPE_15 = 15,
- LSM_POOL_RAID_TYPE_16 = 16,
- LSM_POOL_RAID_TYPE_50 = 50,
- LSM_POOL_RAID_TYPE_60 = 60,
- LSM_POOL_RAID_TYPE_51 = 51,
- LSM_POOL_RAID_TYPE_61 = 61,
- LSM_POOL_RAID_TYPE_JBOD = 20,
- LSM_POOL_RAID_TYPE_UNKNOWN = 21,
- LSM_POOL_RAID_TYPE_NOT_APPLICABLE = 22,
- LSM_POOL_RAID_TYPE_MIXED = 23
-} lsm_pool_raid_type;
-
-typedef enum {
LSM_PORT_TYPE_UNKNOWN = 0,
LSM_PORT_TYPE_OTHER = 1,
LSM_PORT_TYPE_FC = 2,
diff --git a/c_binding/lsm_mgmt.cpp b/c_binding/lsm_mgmt.cpp
index 5805cf9..9d86714 100644
--- a/c_binding/lsm_mgmt.cpp
+++ b/c_binding/lsm_mgmt.cpp
@@ -888,223 +888,6 @@ static void* parse_job_response(lsm_connect *c, Value response, int &rc,
return val;
}
-static int valid_pool_raid_type(lsm_pool_raid_type validate)
-{
- switch(validate) {
- case (LSM_POOL_RAID_TYPE_0):
- case (LSM_POOL_RAID_TYPE_1):
- case (LSM_POOL_RAID_TYPE_3):
- case (LSM_POOL_RAID_TYPE_5):
- case (LSM_POOL_RAID_TYPE_6):
- case (LSM_POOL_RAID_TYPE_10):
- case (LSM_POOL_RAID_TYPE_15):
- case (LSM_POOL_RAID_TYPE_16):
- case (LSM_POOL_RAID_TYPE_50):
- case (LSM_POOL_RAID_TYPE_60):
- case (LSM_POOL_RAID_TYPE_51):
- case (LSM_POOL_RAID_TYPE_61):
- case (LSM_POOL_RAID_TYPE_JBOD):
- case (LSM_POOL_RAID_TYPE_UNKNOWN):
- case (LSM_POOL_RAID_TYPE_NOT_APPLICABLE):
- case (LSM_POOL_RAID_TYPE_MIXED):
- break;
- default:
- return 0;
- }
- return 1;
-}
-
-static int valid_pool_member_type(lsm_pool_member_type validate)
-{
- switch(validate) {
- case (LSM_POOL_MEMBER_TYPE_UNKNOWN):
- case (LSM_POOL_MEMBER_TYPE_DISK):
- case (LSM_POOL_MEMBER_TYPE_POOL):
- case (LSM_POOL_MEMBER_TYPE_DISK_MIX):
- case (LSM_POOL_MEMBER_TYPE_DISK_ATA):
- case (LSM_POOL_MEMBER_TYPE_DISK_SATA):
- case (LSM_POOL_MEMBER_TYPE_DISK_SAS):
- case (LSM_POOL_MEMBER_TYPE_DISK_FC):
- case (LSM_POOL_MEMBER_TYPE_DISK_SOP):
- case (LSM_POOL_MEMBER_TYPE_DISK_SCSI):
- case (LSM_POOL_MEMBER_TYPE_DISK_NL_SAS):
- case (LSM_POOL_MEMBER_TYPE_DISK_HDD):
- case (LSM_POOL_MEMBER_TYPE_DISK_SSD):
- case (LSM_POOL_MEMBER_TYPE_DISK_HYBRID):
- case (LSM_POOL_MEMBER_TYPE_DISK_LUN):
- break;
- default:
- return 0;
- }
- return 1;
-}
-
-int lsm_pool_create(lsm_connect *c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags)
-{
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || CHECK_STR(pool_name) || !size_bytes ||
- CHECK_RP(pool)|| CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ||
- !valid_pool_raid_type(raid_type) ||
- !valid_pool_member_type(member_type)) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p["size_bytes"] = Value(size_bytes);
- p["raid_type"] = Value((int32_t)raid_type);
- p["member_type"] = Value((int32_t)member_type);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, "pool_create", parameters, response);
- if( LSM_ERR_OK == rc ) {
- *pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
-}
-
-
-static int lsm_pool_create_from(lsm_connect *c,
- lsm_system *system, const char *pool_name,
- std::vector<Value> &member_ids, lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags,
- const char *member_id, const char *method)
-{
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || CHECK_STR(pool_name) ||
- CHECK_RP(pool)|| CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ||
- !valid_pool_raid_type(raid_type) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p[member_id] = Value(member_ids);
- p["raid_type"] = Value((int32_t)raid_type);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, method, parameters, response);
- if( LSM_ERR_OK == rc ) {
- *pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
-}
-
-int LSM_DLL_EXPORT lsm_pool_create_from_disks(lsm_connect *c,
- lsm_system *system, const char *pool_name,
- lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type,
- lsm_pool** pool, char **job, lsm_flag flags)
-{
- uint32_t i;
-
- CONN_SETUP(c);
-
- if( !disks || !num_disks ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- /* Create disks container */
- std::vector<Value> d;
- for( i = 0; i < num_disks; ++i ) {
- d.push_back(disk_to_value(disks[i]));
- }
-
- return lsm_pool_create_from(c, system, pool_name, d, raid_type, pool, job,
- flags, "disks", "pool_create_from_disks");
-
-}
-
-
- int lsm_pool_create_from_pool(lsm_connect *c, lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags)
- {
- CONN_SETUP(c);
-
- if( !LSM_IS_SYSTEM(system) || !LSM_IS_POOL(pool) || CHECK_STR(pool_name) ||
- !size_bytes || CHECK_RP(created_pool)|| CHECK_RP(job) ||
- LSM_FLAG_UNUSED_CHECK(flags) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- std::map<std::string, Value> p;
- p["system"] = system_to_value(system);
- p["pool_name"] = Value(pool_name);
- p["size_bytes"] = Value(size_bytes);
- p["pool"] = pool_to_value(pool);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- int rc = rpc(c, "pool_create_from_pool", parameters, response);
- if( LSM_ERR_OK == rc ) {
- *created_pool = (lsm_pool *)parse_job_response(c, response, rc, job,
- (convert)value_to_pool);
- }
- return rc;
- }
-
-int lsm_pool_delete(lsm_connect *c, lsm_pool *pool, char **job, lsm_flag flags)
-{
- int rc;
- CONN_SETUP(c);
-
- if( !LSM_IS_POOL(pool) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags) ) {
- return LSM_ERR_INVALID_ARGUMENT;
- }
-
- try {
-
- std::map<std::string, Value> p;
- p["pool"] = pool_to_value(pool);
- p["flags"] = Value(flags);
-
- Value parameters(p);
- Value response;
-
- rc = rpc(c, "pool_delete", parameters, response);
- if( LSM_ERR_OK == rc ) {
- //We get a value back, either null or job id.
- if( Value::string_t == response.valueType() ) {
- *job = strdup(response.asString().c_str());
-
- if( *job ) {
- rc = LSM_ERR_JOB_STARTED;
- } else {
- rc = LSM_ERR_NO_MEMORY;
- }
- }
- }
- } catch( const ValueException &ve ) {
- rc = logException(c, LSM_ERR_LIB_BUG, "Unexpected type",
- ve.what());
- }
- return rc;
- }
-
int lsm_volume_create(lsm_connect *c, lsm_pool *pool, const char *volumeName,
uint64_t size, lsm_provision_type provisioning,
lsm_volume **newVolume, char **job, lsm_flag flags)
diff --git a/c_binding/lsm_plugin_ipc.cpp b/c_binding/lsm_plugin_ipc.cpp
index b174d09..eef07db 100644
--- a/c_binding/lsm_plugin_ipc.cpp
+++ b/c_binding/lsm_plugin_ipc.cpp
@@ -543,169 +543,6 @@ static int handle_target_ports(lsm_plugin_ptr p, Value ¶ms, Value &response)
return rc;
}
-static int handle_pool_create(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_size = params["size_bytes"];
- Value v_raid_t = params["raid_type"];
- Value v_member_t = params["member_type"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::numeric_t == v_size.valueType() &&
- Value::numeric_t == v_raid_t.valueType() &&
- Value::numeric_t == v_member_t.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- lsm_system *system = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- uint64_t size = v_size.asUint64_t();
- lsm_pool_raid_type raid_type = (lsm_pool_raid_type)v_raid_t.asInt32_t();
- lsm_pool_member_type member_type = (lsm_pool_member_type)v_member_t.asInt32_t();
- lsm_pool *pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create(p, system, pool_name, size, raid_type,
- member_type, &pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(pool);
- response = job_handle(p, job);
- lsm_pool_record_free(pool);
- lsm_system_record_free(system);
- free(job);
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_create_from_disks(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create_from_disks ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_disks = params["disks"];
- Value v_raid_t = params["raid_type"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::array_t == v_disks.valueType() &&
- Value::numeric_t == v_raid_t.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- /* Get the array of disks */
- lsm_disk **disks = NULL;
- uint32_t num_disks = 0;
- rc = value_array_to_disks(v_disks, &disks, &num_disks);
-
- if( LSM_ERR_OK == rc ) {
- lsm_system *sys = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- lsm_pool_raid_type raid_type = (lsm_pool_raid_type)v_raid_t.asInt32_t();
-
- lsm_pool *pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create_from_disks(p, sys, pool_name,
- disks, num_disks, raid_type,
- &pool, &job, LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(pool);
- response = job_handle(p, job);
- lsm_disk_record_array_free(disks, num_disks);
- lsm_pool_record_free(pool);
- lsm_system_record_free(sys);
- free(job);
- }
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_create_from_pool(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_create_from_pool ) {
-
- Value v_sys = params["system"];
- Value v_pool_name = params["pool_name"];
- Value v_pool = params["pool"];
- Value v_size = params["size_bytes"];
-
- if( Value::object_t == v_sys.valueType() &&
- Value::string_t == v_pool_name.valueType() &&
- Value::object_t == v_pool.valueType() &&
- Value::numeric_t == v_size.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params)) {
-
- lsm_system *sys = value_to_system(v_sys);
- const char *pool_name = v_pool_name.asC_str();
- lsm_pool *pool = value_to_pool(v_pool);
- uint64_t size = v_size.asUint64_t();
-
- lsm_pool *created_pool = NULL;
- char *job = NULL;
-
- rc = p->san_ops->pool_create_from_pool(p, sys, pool_name,
- pool, size, &created_pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- Value p = pool_to_value(created_pool);
- response = job_handle(p, job);
- lsm_pool_record_free(created_pool);
- lsm_pool_record_free(pool);
- lsm_system_record_free(sys);
- free(job);
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
-static int handle_pool_delete(lsm_plugin_ptr p, Value ¶ms, Value &response)
-{
- int rc = LSM_ERR_NO_SUPPORT;
- if( p && p->san_ops && p->san_ops->pool_delete ) {
- Value v_pool = params["pool"];
-
- if(Value::object_t == v_pool.valueType() &&
- LSM_FLAG_EXPECTED_TYPE(params) ) {
- lsm_pool *pool = value_to_pool(v_pool);
-
- if( pool ) {
- char *job = NULL;
-
- rc = p->san_ops->pool_delete(p, pool, &job,
- LSM_FLAG_GET_VALUE(params));
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- response = Value(job);
- }
-
- lsm_pool_record_free(pool);
- free(job);
- } else {
- rc = LSM_ERR_NO_MEMORY;
- }
-
- } else {
- rc = LSM_ERR_TRANSPORT_INVALID_ARG;
- }
- }
- return rc;
-}
-
static int capabilities(lsm_plugin_ptr p, Value ¶ms, Value &response)
{
int rc = LSM_ERR_NO_SUPPORT;
@@ -2289,10 +2126,6 @@ static std::map<std::string,handler> dispatch = static_map<std::string,handler>
("job_status", handle_job_status)
("plugin_info", handle_plugin_info)
("pools", handle_pools)
- ("pool_create", handle_pool_create)
- ("pool_create_from_disks", handle_pool_create_from_disks)
- ("pool_create_from_pool", handle_pool_create_from_pool)
- ("pool_delete", handle_pool_delete)
("target_ports", handle_target_ports)
("time_out_set", handle_set_time_out)
("plugin_unregister", handle_unregister)
diff --git a/doc/man/lsmcli.1.in b/doc/man/lsmcli.1.in
index d73467a..7d96fb5 100644
--- a/doc/man/lsmcli.1.in
+++ b/doc/man/lsmcli.1.in
@@ -567,96 +567,6 @@ Required. Repeatable. Destination file to clone (relative path).
For two or more files/paths:
'\fB--src fileA --src fileB --dst old_fileA --dst old_fileB\fR'.
-.SS pool-create
-Creates a storage pool. LibStorageMgmt will automatically choose the correct
-pool members to assemble new pool. This require POOL_CREATE capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--size\fR \fI<POOL_SIZE>\fR
-Required. The size of new pool. Due to data alignment or other issue, the
-size of new pool might larger than requested.
-See \fBSIZE OPTION\fR for allowed formats.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-.TP
-\fB--raid-type\fR \fI<RAID_TYPE>\fR
-Optional. The RAID type of new pool. Valid values are:
-\fBNOT_APPLICABLE\fR, \fBJBOD\fR, \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR,
-\fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID51\fR, \fBRAID60\fR,
-\fBRAID61\fR.
-.br
-The \fBNOT_APPLICABLE\fR means pool only contain 1 member.
-If not defined, will let array to determine the RAID type.
-.br
-When using with \fB--member-type POOL\fR, \fB--raid-type\fR should be unset or
-defined as \fBNOT_APPLICABLE\fR.
-.TP
-\fB--member-type\fR \fI<MEM_TYPE>\fR
-Optional. The type of resource to create new pool. Valid values are:
-\fBDISK\fR, \fBVOLUME\fR, \fBPOOL\fR, \fBDISK_ATA\fR, \fBDISK_SATA\fR,
-\fBDISK_SAS\fR, \fBDISK_FC\fR, \fBDISK_SOP\fR \fBDISK_SCSI\fR, \fBDISK_NL_SAS,
-\fBDISK_HDD\fR, \fBDISK_SSD\fR, \fBDISK_HYBRID\fR.
-.br
-The \fBDISK\fR member type means creating pool from disk(s). For \fBDISK_\fR
-prefixed types, they are used to request new pool creating from certain type
-of DISK.
-\fBDISK_SOP\fR indicate SCSI over PCI-E, normally a PCI-E based SSD.
-\fBDISK_HYBRID\fR indicate HDD and SSD hybrid(mixed) disk.
-.br
-The \fBVOLUME\fR member type means creating pool from volume(s).
-.br
-The \fBPOOL\fR member type means create sub-pool from another pool.
-
-.SS pool-create-from-disks
-Create a new pool by specifying which disks to use with which RAID type.
-This require POOL_CREATE_FROM_DISKS capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--raid-type\fR \fI<RAID_TYPE>\fR
-Required. The RAID type of new pool. Valid values are:
-\fBNOT_APPLICABLE\fR, \fBJBOD\fR, \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR,
-\fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID51\fR, \fBRAID60\fR,
-\fBRAID61\fR.
-.br
-The \fBNOT_APPLICABLE\fR means pool only contain 1 disks.
-For supported RAID types of certain array, please use \fBcapabilities\fR
-command for POOL_CREATE_DISK_RAID_XXX entries.
-.TP
-\fB--member-id\fR \fI<DISK_ID>\fR
-Required. Repeatable. The ID of disk to create new pool.
-For two or more members: '\fB--member-id DISK_ID_A --member DISK_ID_B\fR'.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-
-.SS pool-create-from-pool
-Create a new sub-pool from specified pool. This require POOL_CREATE_FROM_POOLS
-capability.
-.TP 15
-\fB--name\fR \fI<POOL_NAME>\fR
-Required. Human friendly name for new pool.
-.TP
-\fB--size\fR \fI<POOL_SIZE>\fR
-Required. The spaces of new pool.
-See \fBSIZE OPTION\fR for allowed formats.
-.TP
-\fB--member-id\fR \fI<POOL_ID>\fR
-Required. The ID of pool to create new pool from.
-.TP
-\fB--sys\fR \fI<SYS_ID>\fR
-Required. ID of the system to create new pool.
-
-.SS pool-delete
-Deletes a storage pool.
-.TP 15
-\fB--pool\fR \fI<POOL_ID>\fR
-Required. The ID of pool to delete.
-
.IP
.SH ALIAS
.SS ls
diff --git a/plugin/ontap/ontap.py b/plugin/ontap/ontap.py
index 067fc3f..88c3ddd 100644
--- a/plugin/ontap/ontap.py
+++ b/plugin/ontap/ontap.py
@@ -312,18 +312,18 @@ class Ontap(IStorageAreaNetwork, INfs):
"""
return na_xxx['uuid']
- @staticmethod
- def _raid_type_of_na_aggr(na_aggr):
- na_raid_statuses = na_aggr['raid-status'].split(',')
- if 'raid0' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID0
- if 'raid4' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID4
- if 'raid_dp' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID6
- if 'mixed_raid_type' in na_raid_statuses:
- return Pool.RAID_TYPE_MIXED
- return Pool.RAID_TYPE_UNKNOWN
+# @staticmethod
+# def _raid_type_of_na_aggr(na_aggr):
+# na_raid_statuses = na_aggr['raid-status'].split(',')
+# if 'raid0' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID0
+# if 'raid4' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID4
+# if 'raid_dp' in na_raid_statuses:
+# return Pool.RAID_TYPE_RAID6
+# if 'mixed_raid_type' in na_raid_statuses:
+# return Pool.RAID_TYPE_MIXED
+# return Pool.RAID_TYPE_UNKNOWN
@staticmethod
def _status_of_na_aggr(na_aggr):
diff --git a/plugin/sim/simarray.py b/plugin/sim/simarray.py
index c81c851..d0d291c 100644
--- a/plugin/sim/simarray.py
+++ b/plugin/sim/simarray.py
@@ -34,6 +34,69 @@ from lsm import (System, Volume, Disk, Pool, FileSystem, AccessGroup,
D_FMT = 5
+class PoolRAID(object):
+ RAID_TYPE_RAID0 = 0
+ RAID_TYPE_RAID1 = 1
+ RAID_TYPE_RAID3 = 3
+ RAID_TYPE_RAID4 = 4
+ RAID_TYPE_RAID5 = 5
+ RAID_TYPE_RAID6 = 6
+ RAID_TYPE_RAID10 = 10
+ RAID_TYPE_RAID15 = 15
+ RAID_TYPE_RAID16 = 16
+ RAID_TYPE_RAID50 = 50
+ RAID_TYPE_RAID60 = 60
+ RAID_TYPE_RAID51 = 51
+ RAID_TYPE_RAID61 = 61
+ # number 2x is reserved for non-numbered RAID.
+ RAID_TYPE_JBOD = 20
+ RAID_TYPE_UNKNOWN = 21
+ RAID_TYPE_NOT_APPLICABLE = 22
+ # NOT_APPLICABLE indicate current pool only has one member.
+ RAID_TYPE_MIXED = 23
+
+ MEMBER_TYPE_UNKNOWN = 0
+ MEMBER_TYPE_DISK = 1
+ MEMBER_TYPE_DISK_MIX = 10
+ MEMBER_TYPE_DISK_ATA = 11
+ MEMBER_TYPE_DISK_SATA = 12
+ MEMBER_TYPE_DISK_SAS = 13
+ MEMBER_TYPE_DISK_FC = 14
+ MEMBER_TYPE_DISK_SOP = 15
+ MEMBER_TYPE_DISK_SCSI = 16
+ MEMBER_TYPE_DISK_NL_SAS = 17
+ MEMBER_TYPE_DISK_HDD = 18
+ MEMBER_TYPE_DISK_SSD = 19
+ MEMBER_TYPE_DISK_HYBRID = 110
+ MEMBER_TYPE_DISK_LUN = 111
+
+ MEMBER_TYPE_POOL = 2
+
+ _MEMBER_TYPE_2_DISK_TYPE = {
+ MEMBER_TYPE_DISK: Disk.DISK_TYPE_UNKNOWN,
+ MEMBER_TYPE_DISK_MIX: Disk.DISK_TYPE_UNKNOWN,
+ MEMBER_TYPE_DISK_ATA: Disk.DISK_TYPE_ATA,
+ MEMBER_TYPE_DISK_SATA: Disk.DISK_TYPE_SATA,
+ MEMBER_TYPE_DISK_SAS: Disk.DISK_TYPE_SAS,
+ MEMBER_TYPE_DISK_FC: Disk.DISK_TYPE_FC,
+ MEMBER_TYPE_DISK_SOP: Disk.DISK_TYPE_SOP,
+ MEMBER_TYPE_DISK_SCSI: Disk.DISK_TYPE_SCSI,
+ MEMBER_TYPE_DISK_NL_SAS: Disk.DISK_TYPE_NL_SAS,
+ MEMBER_TYPE_DISK_HDD: Disk.DISK_TYPE_HDD,
+ MEMBER_TYPE_DISK_SSD: Disk.DISK_TYPE_SSD,
+ MEMBER_TYPE_DISK_HYBRID: Disk.DISK_TYPE_HYBRID,
+ MEMBER_TYPE_DISK_LUN: Disk.DISK_TYPE_LUN,
+ }
+
+ @staticmethod
+ def member_type_is_disk(member_type):
+ """
+ Returns True if defined 'member_type' is disk.
+ False when else.
+ """
+ return member_type in PoolRAID._MEMBER_TYPE_2_DISK_TYPE
+
+
class SimJob(object):
"""
Simulates a longer running job, uses actual wall time. If test cases
@@ -169,31 +232,6 @@ class SimArray(object):
rc.extend([self._sim_pool_2_lsm(sim_pool, flags)])
return SimArray._sort_by_id(rc)
- def pool_create(self, sys_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- sim_pool = self.data.pool_create(
- sys_id, pool_name, size_bytes, raid_type, member_type, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_create_from_disks(self, sys_id, pool_name, disks_ids, raid_type,
- flags=0):
- sim_pool = self.data.pool_create_from_disks(
- sys_id, pool_name, disks_ids, raid_type, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_create_from_pool(self, sys_id, pool_name, member_id, size_bytes,
- flags=0):
- sim_pool = self.data.pool_create_from_pool(
- sys_id, pool_name, member_id, size_bytes, flags)
- return self.data.job_create(
- self._sim_pool_2_lsm(sim_pool))
-
- def pool_delete(self, pool_id, flags=0):
- return self.data.job_create(self.data.pool_delete(pool_id, flags))[0]
-
def disks(self):
rc = []
sim_disks = self.data.disks()
@@ -489,9 +527,9 @@ class SimData(object):
sim_pool = {
'name': pool_name,
'pool_id': Pool.id,
- 'raid_type': Pool.RAID_TYPE_XXXX,
+ 'raid_type': PoolRAID.RAID_TYPE_XXXX,
'member_ids': [ disk_id or pool_id or volume_id ],
- 'member_type': Pool.MEMBER_TYPE_XXXX,
+ 'member_type': PoolRAID.MEMBER_TYPE_XXXX,
'member_size': size_bytes # space allocated from each member pool.
# only for MEMBER_TYPE_POOL
'status': SIM_DATA_POOL_STATUS,
@@ -500,14 +538,12 @@ class SimData(object):
}
"""
SIM_DATA_BLK_SIZE = 512
- SIM_DATA_VERSION = "2.6"
+ SIM_DATA_VERSION = "2.7"
SIM_DATA_SYS_ID = 'sim-01'
SIM_DATA_INIT_NAME = 'NULL'
SIM_DATA_TMO = 30000 # ms
SIM_DATA_POOL_STATUS = Pool.STATUS_OK
SIM_DATA_POOL_STATUS_INFO = ''
- SIM_DATA_DISK_DEFAULT_RAID = Pool.RAID_TYPE_RAID0
- SIM_DATA_VOLUME_DEFAULT_RAID = Pool.RAID_TYPE_RAID0
SIM_DATA_POOL_ELEMENT_TYPE = Pool.ELEMENT_TYPE_FS \
| Pool.ELEMENT_TYPE_POOL \
| Pool.ELEMENT_TYPE_VOLUME
@@ -570,9 +606,9 @@ class SimData(object):
'POO1': {
'pool_id': 'POO1',
'name': 'Pool 1',
- 'member_type': Pool.MEMBER_TYPE_DISK_SATA,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SATA,
'member_ids': [SimData._disk_id(0), SimData._disk_id(1)],
- 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID1,
'status': SimData.SIM_DATA_POOL_STATUS,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -581,10 +617,10 @@ class SimData(object):
'POO2': {
'pool_id': 'POO2',
'name': 'Pool 2',
- 'member_type': Pool.MEMBER_TYPE_POOL,
+ 'member_type': PoolRAID.MEMBER_TYPE_POOL,
'member_ids': ['POO1'],
'member_size': pool_size_200g,
- 'raid_type': Pool.RAID_TYPE_NOT_APPLICABLE,
+ 'raid_type': PoolRAID.RAID_TYPE_NOT_APPLICABLE,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -594,9 +630,9 @@ class SimData(object):
'lsm_test_aggr': {
'pool_id': 'lsm_test_aggr',
'name': 'lsm_test_aggr',
- 'member_type': Pool.MEMBER_TYPE_DISK_SAS,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SAS,
'member_ids': [SimData._disk_id(2), SimData._disk_id(3)],
- 'raid_type': Pool.RAID_TYPE_RAID0,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID0,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -651,12 +687,12 @@ class SimData(object):
self.pool_dict['POO3'] = {
'pool_id': 'POO3',
'name': 'Pool 3',
- 'member_type': Pool.MEMBER_TYPE_DISK_SSD,
+ 'member_type': PoolRAID.MEMBER_TYPE_DISK_SSD,
'member_ids': [
self.disk_dict[SimData._disk_id(9)]['disk_id'],
self.disk_dict[SimData._disk_id(10)]['disk_id'],
],
- 'raid_type': Pool.RAID_TYPE_RAID1,
+ 'raid_type': PoolRAID.RAID_TYPE_RAID1,
'status': Pool.STATUS_OK,
'status_info': SimData.SIM_DATA_POOL_STATUS_INFO,
'sys_id': SimData.SIM_DATA_SYS_ID,
@@ -731,7 +767,7 @@ class SimData(object):
return 0
free_space -= sim_fs['consume_size']
for sim_pool in self.pool_dict.values():
- if sim_pool['member_type'] != Pool.MEMBER_TYPE_POOL:
+ if sim_pool['member_type'] != PoolRAID.MEMBER_TYPE_POOL:
continue
if pool_id in sim_pool['member_ids']:
free_space -= sim_pool['member_size']
@@ -750,11 +786,11 @@ class SimData(object):
def _size_of_raid(self, member_type, member_ids, raid_type,
pool_each_size=0):
member_sizes = []
- if Pool.member_type_is_disk(member_type):
+ if PoolRAID.member_type_is_disk(member_type):
for member_id in member_ids:
member_sizes.extend([self.disk_dict[member_id]['total_space']])
- elif member_type == Pool.MEMBER_TYPE_POOL:
+ elif member_type == PoolRAID.MEMBER_TYPE_POOL:
for member_id in member_ids:
member_sizes.extend([pool_each_size])
@@ -768,38 +804,38 @@ class SimData(object):
for member_size in member_sizes:
all_size += member_size
- if raid_type == Pool.RAID_TYPE_JBOD or \
- raid_type == Pool.RAID_TYPE_NOT_APPLICABLE or \
- raid_type == Pool.RAID_TYPE_RAID0:
+ if raid_type == PoolRAID.RAID_TYPE_JBOD or \
+ raid_type == PoolRAID.RAID_TYPE_NOT_APPLICABLE or \
+ raid_type == PoolRAID.RAID_TYPE_RAID0:
return int(all_size)
- elif (raid_type == Pool.RAID_TYPE_RAID1 or
- raid_type == Pool.RAID_TYPE_RAID10):
+ elif (raid_type == PoolRAID.RAID_TYPE_RAID1 or
+ raid_type == PoolRAID.RAID_TYPE_RAID10):
if member_count % 2 == 1:
return 0
return int(all_size / 2)
- elif (raid_type == Pool.RAID_TYPE_RAID3 or
- raid_type == Pool.RAID_TYPE_RAID4 or
- raid_type == Pool.RAID_TYPE_RAID5):
+ elif (raid_type == PoolRAID.RAID_TYPE_RAID3 or
+ raid_type == PoolRAID.RAID_TYPE_RAID4 or
+ raid_type == PoolRAID.RAID_TYPE_RAID5):
if member_count < 3:
return 0
return int(all_size - member_size)
- elif raid_type == Pool.RAID_TYPE_RAID50:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID50:
if member_count < 6 or member_count % 2 == 1:
return 0
return int(all_size - member_size * 2)
- elif raid_type == Pool.RAID_TYPE_RAID6:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID6:
if member_count < 4:
return 0
return int(all_size - member_size * 2)
- elif raid_type == Pool.RAID_TYPE_RAID60:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID60:
if member_count < 8 or member_count % 2 == 1:
return 0
return int(all_size - member_size * 4)
- elif raid_type == Pool.RAID_TYPE_RAID51:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID51:
if member_count < 6 or member_count % 2 == 1:
return 0
return int(all_size / 2 - member_size)
- elif raid_type == Pool.RAID_TYPE_RAID61:
+ elif raid_type == PoolRAID.RAID_TYPE_RAID61:
if member_count < 8 or member_count % 2 == 1:
return 0
print "%s" % size_bytes_2_size_human(all_size)
@@ -817,7 +853,7 @@ class SimData(object):
sim_pool = self.pool_dict[pool_id]
each_pool_size_bytes = 0
member_type = sim_pool['member_type']
- if sim_pool['member_type'] == Pool.MEMBER_TYPE_POOL:
+ if sim_pool['member_type'] == PoolRAID.MEMBER_TYPE_POOL:
each_pool_size_bytes = sim_pool['member_size']
return self._size_of_raid(
@@ -1412,416 +1448,5 @@ class SimData(object):
del self.exp_dict[exp_id]
return None
- def _free_disks_list(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
- """
- Return a list of free sim_disk.
- Return [] if no free disk found.
- """
- free_sim_disks = []
- for sim_disk in self.disk_dict.values():
- if disk_type != Disk.DISK_TYPE_UNKNOWN and \
- sim_disk['disk_type'] != disk_type:
- continue
- flag_free = True
- for sim_pool in self.pool_dict.values():
- if Pool.member_type_is_disk(sim_pool['member_type']) and \
- sim_disk['disk_id'] in sim_pool['member_ids']:
- flag_free = False
- break
- if flag_free is True:
- free_sim_disks.extend([sim_disk])
- return sorted(free_sim_disks, key=lambda k: (k['disk_id']))
-
- def _free_disks(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
- """
- Return a dictionary like this:
- {
- Disk.DISK_TYPE_XXX: {
- Disk.total_space: [sim_disk, ]
- }
- }
- Return None if not free.
- """
- free_sim_disks = self._free_disks_list()
- rc = dict()
- for sim_disk in free_sim_disks:
- if disk_type != Disk.DISK_TYPE_UNKNOWN and \
- sim_disk['disk_type'] != disk_type:
- continue
-
- cur_type = sim_disk['disk_type']
- cur_size = sim_disk['total_space']
-
- if cur_type not in rc.keys():
- rc[cur_type] = dict()
-
- if cur_size not in rc[cur_type]:
- rc[cur_type][cur_size] = []
-
- rc[cur_type][cur_size].extend([sim_disk])
-
- return rc
-
- def _free_pools_list(self):
- """
- Return a list of sim_pool or []
- """
- free_sim_pools = []
- for sim_pool in self.pool_dict.values():
- # TODO: one day we will introduce free_size of Volume.
- # in that case we will check whether
- # total_space == pool_free_size(sim_pool['pool_id'])
- pool_id = sim_pool['pool_id']
- if self.pool_free_space(pool_id) > 0:
- free_sim_pools.extend([sim_pool])
- return sorted(
- free_sim_pools,
- key=lambda k: (k['pool_id'].isupper(), k['pool_id']))
-
- def _pool_create_from_disks(self, pool_name, member_ids, raid_type,
- raise_error=False):
- # Check:
- # 1. The disk_id is valid
- # 2. All disks are the same disk type.
- # 3. All disks are free.
- # 4. All disks' total space is the same.
- if len(member_ids) <= 0:
- if raise_error:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "No disk ID defined")
- else:
- return None
-
- if raid_type == Pool.RAID_TYPE_NOT_APPLICABLE and \
- len(member_ids) >= 2:
- if raise_error:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Pool.RAID_TYPE_NOT_APPLICABLE means only 1 " +
- "member, but got 2 or more: %s" %
- ', '.join(member_ids))
- else:
- return None
-
- current_disk_type = None
- current_total_space = None
- for disk_id in member_ids:
- if disk_id not in self.disk_dict.keys():
- if raise_error:
- raise LsmError(ErrorNumber.NOT_FOUND_DISK,
- "The disk ID %s does not exist" % disk_id)
- else:
- return None
- sim_disk = self.disk_dict[disk_id]
- if current_disk_type is None:
- current_disk_type = sim_disk['disk_type']
- elif current_disk_type != sim_disk['disk_type']:
- if raise_error:
- raise LsmError(
- ErrorNumber.NO_SUPPORT,
- "Mixing disk types in one pool " +
- "is not supported: %s and %s" %
- (Disk.disk_type_to_str(current_disk_type),
- Disk.disk_type_to_str(sim_disk['disk_type'])))
- else:
- return None
- if current_total_space is None:
- current_total_space = sim_disk['total_space']
- elif current_total_space != sim_disk['total_space']:
- if raise_error:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Mixing different size of disks is not " +
- "supported")
- else:
- return None
-
- all_free_disks = self._free_disks_list()
- if all_free_disks is None:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free disk to create new pool")
- else:
- return None
- all_free_disk_ids = [d['disk_id'] for d in all_free_disks]
- for disk_id in member_ids:
- if disk_id not in all_free_disk_ids:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "Disk %s is used by other pool" % disk_id)
- else:
- return None
-
- if raid_type == Pool.RAID_TYPE_UNKNOWN or \
- raid_type == Pool.RAID_TYPE_MIXED:
- if raise_error:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "RAID type %s(%d) is not supported" %
- (Pool.raid_type_to_str(raid_type), raid_type))
- else:
- return None
-
- pool_id = self._next_pool_id()
- if pool_name == '':
- pool_name = 'POOL %s' % SimData._random_vpd(4)
-
- sim_pool = dict()
- sim_pool['name'] = pool_name
- sim_pool['pool_id'] = pool_id
- if len(member_ids) == 1:
- sim_pool['raid_type'] = Pool.RAID_TYPE_NOT_APPLICABLE
- else:
- sim_pool['raid_type'] = raid_type
- sim_pool['member_ids'] = member_ids
- sim_pool['member_type'] = \
- Pool.disk_type_to_member_type(current_disk_type)
- sim_pool['sys_id'] = SimData.SIM_DATA_SYS_ID
- sim_pool['element_type'] = SimData.SIM_DATA_POOL_ELEMENT_TYPE
- sim_pool['status'] = SimData.SIM_DATA_POOL_STATUS
- sim_pool['status_info'] = SimData.SIM_DATA_POOL_STATUS_INFO
- self.pool_dict[pool_id] = sim_pool
- return sim_pool
-
- def pool_create_from_disks(self, sys_id, pool_name, member_ids, raid_type,
- flags=0):
- """
- return newly create sim_pool or None.
- """
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
-
- return self._pool_create_from_disks(pool_name, member_ids, raid_type,
- raise_error=True)
-
- def _pool_create_from_pool(self, pool_name, member_id,
- size_bytes, raise_error=False):
-
- size_bytes = SimData._block_rounding(size_bytes)
- free_sim_pools = self._free_pools_list()
- free_sim_pool_ids = [p['pool_id'] for p in free_sim_pools]
- if len(free_sim_pool_ids) == 0 or \
- member_id not in free_sim_pool_ids:
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "Pool %s " % member_id +
- "is full, no space to create new pool")
- else:
- return None
-
- free_size = self.pool_free_space(member_id)
- if free_size < size_bytes:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "Pool %s does not have requested free" %
- member_id + "to create new pool")
-
- pool_id = self._next_pool_id()
- if pool_name == '':
- pool_name = 'POOL %s' % SimData._random_vpd(4)
- sim_pool = dict()
- sim_pool['name'] = pool_name
- sim_pool['pool_id'] = pool_id
- sim_pool['raid_type'] = Pool.RAID_TYPE_NOT_APPLICABLE
- sim_pool['member_ids'] = [member_id]
- sim_pool['member_type'] = Pool.MEMBER_TYPE_POOL
- sim_pool['member_size'] = size_bytes
- sim_pool['sys_id'] = SimData.SIM_DATA_SYS_ID
- sim_pool['element_type'] = SimData.SIM_DATA_POOL_ELEMENT_TYPE
- sim_pool['status'] = SimData.SIM_DATA_POOL_STATUS
- sim_pool['status_info'] = SimData.SIM_DATA_POOL_STATUS_INFO
- self.pool_dict[pool_id] = sim_pool
- return sim_pool
-
- def pool_create_from_pool(self, sys_id, pool_name, member_id, size_bytes,
- flags=0):
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
- return self._pool_create_from_pool(pool_name, member_id, size_bytes,
- raise_error=True)
-
- def _auto_choose_disk(self, size_bytes, raid_type, disk_type,
- raise_error=False):
- """
- Return a list of member ids suitable for creating RAID pool with
- required size_bytes.
- Return [] if nothing found.
- if raise_error is True, raise error if not found
- """
- disk_type_str = "disk"
- if disk_type != Disk.DISK_TYPE_UNKNOWN:
- disk_type_str = "disk(type: %s)" % Disk.disk_type_to_str(disk_type)
-
- if raid_type == Pool.RAID_TYPE_NOT_APPLICABLE:
- # NOT_APPLICABLE means pool will only contain one disk.
- sim_disks = self._free_disks_list(disk_type)
- if len(sim_disks) == 0:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free %s found" % disk_type_str)
- else:
- return []
-
- for sim_disk in sim_disks:
- if sim_disk['total_space'] >= size_bytes:
- return [sim_disk]
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No %s is bigger than " % disk_type_str +
- "expected size: %s(%d)" %
- (size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- if raid_type == Pool.RAID_TYPE_JBOD:
- # JBOD does not require all disks in the same size or the same type.
- sim_disks = self._free_disks_list(disk_type)
- if len(sim_disks) == 0:
- if raise_error:
- raise LsmError(ErrorNumber.DISK_BUSY,
- "No free %s found" % disk_type_str)
- else:
- return []
-
- chose_sim_disks = []
- all_free_size = 0
- for sim_disk in sim_disks:
- chose_sim_disks.extend([sim_disk])
- all_free_size += sim_disk['total_space']
- if all_free_size >= size_bytes:
- return chose_sim_disks
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough %s to provide size %s(%d)" %
- (disk_type_str,
- size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- # All rest RAID type require member are in the same size and same
- # type.
- sim_disks_struct = self._free_disks(disk_type)
- for cur_disk_type in sim_disks_struct.keys():
- for cur_disk_size in sim_disks_struct[cur_disk_type].keys():
- cur_sim_disks = sim_disks_struct[cur_disk_type][cur_disk_size]
- if len(cur_sim_disks) == 0:
- continue
- chose_sim_disks = []
- for member_count in range(1, len(cur_sim_disks) + 1):
- partial_sim_disks = cur_sim_disks[0:member_count]
- member_ids = [x['disk_id'] for x in partial_sim_disks]
- raid_actual_size = self._size_of_raid(
- Pool.MEMBER_TYPE_DISK, member_ids, raid_type)
- if size_bytes <= raid_actual_size:
- return cur_sim_disks[0:member_count]
-
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough %s " % disk_type_str +
- "to create %s providing size: %s(%d)" %
- (Pool.raid_type_to_str(raid_type),
- size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return []
-
- def _auto_choose_pool(self, size_bytes, raise_error=False):
- """
- Return a sim_pool.
- Return None if not found.
- """
- sim_pools = self._free_pools_list()
- if len(sim_pools) >= 1:
- for sim_pool in sim_pools:
- pool_id = sim_pool['pool_id']
- free_size = self.pool_free_space(pool_id)
- if free_size >= size_bytes:
- return sim_pool
-
- if raise_error:
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No pool is bigger than expected size: " +
- "%s(%d)" %
- (size_bytes_2_size_human(size_bytes),
- size_bytes))
- else:
- return None
-
- def pool_create(self, sys_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- if sys_id != SimData.SIM_DATA_SYS_ID:
- raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM,
- "No such system: %s" % sys_id)
-
- size_bytes = SimData._block_rounding(size_bytes)
-
- raise_error = False
- if member_type != Pool.MEMBER_TYPE_UNKNOWN:
- raise_error = True
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN or \
- Pool.member_type_is_disk(member_type):
- disk_raid_type = raid_type
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- disk_raid_type = SimData.SIM_DATA_DISK_DEFAULT_RAID
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- disk_type = Disk.DISK_TYPE_UNKNOWN
- else:
- disk_type = Pool.member_type_to_disk_type(member_type)
- sim_disks = self._auto_choose_disk(
- size_bytes, disk_raid_type, disk_type, raise_error)
- if len(sim_disks) >= 1:
- member_ids = [d['disk_id'] for d in sim_disks]
- sim_pool = self._pool_create_from_disks(
- pool_name, member_ids, disk_raid_type, raise_error)
- if sim_pool:
- return sim_pool
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN or \
- member_type == Pool.MEMBER_TYPE_POOL:
- if raid_type != Pool.RAID_TYPE_UNKNOWN and \
- raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Pool based pool does not support " +
- "raid_type: %s(%d)" %
- (Pool.raid_type_to_str(raid_type),
- raid_type))
-
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- if raid_type != Pool.RAID_TYPE_UNKNOWN and \
- raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "No enough free disk or volume spaces " +
- "to create new pool. And pool based " +
- "pool does not support raid_type: %s" %
- Pool.raid_type_to_str(raid_type))
-
- member_sim_pool = self._auto_choose_pool(size_bytes, raise_error)
- if member_sim_pool:
- member_id = member_sim_pool['pool_id']
- sim_pool = self._pool_create_from_pool(
- pool_name, member_id, size_bytes, raise_error)
- if sim_pool:
- return sim_pool
-
- # only member_type == Pool.MEMBER_TYPE_UNKNOWN can reach here.
- raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE,
- "No enough free spaces to create new pool")
-
- def pool_delete(self, pool_id, flags=0):
- if pool_id not in self.pool_dict.keys():
- raise LsmError(ErrorNumber.NOT_FOUND_POOL,
- "Pool not found: %s" % pool_id)
-
- volumes = self.volumes()
- for v in volumes:
- if v['pool_id'] == pool_id:
- raise LsmError(ErrorNumber.EXISTS_VOLUME,
- "Volumes exist on pool")
-
- del(self.pool_dict[pool_id])
- return None
-
def target_ports(self):
return self.tgt_dict.values()
diff --git a/plugin/sim/simulator.py b/plugin/sim/simulator.py
index f6c26de..f84f6bd 100644
--- a/plugin/sim/simulator.py
+++ b/plugin/sim/simulator.py
@@ -101,26 +101,6 @@ class SimPlugin(INfs, IStorageAreaNetwork):
[SimPlugin._sim_data_2_lsm(p) for p in sim_pools],
search_key, search_value)
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- return self.sim_array.pool_create(
- system.id, pool_name, size_bytes, raid_type, member_type, flags)
-
- def pool_create_from_disks(self, system, pool_name, disks,
- raid_type, flags=0):
- member_ids = [x.id for x in disks]
- return self.sim_array.pool_create_from_disks(
- system.id, pool_name, member_ids, raid_type, flags)
-
- def pool_create_from_pool(self, system, pool_name, pool,
- size_bytes, flags=0):
- return self.sim_array.pool_create_from_pool(
- system.id, pool_name, pool.id, size_bytes, flags)
-
- def pool_delete(self, pool, flags=0):
- return self.sim_array.pool_delete(pool.id, flags)
-
def volumes(self, search_key=None, search_value=None, flags=0):
sim_vols = self.sim_array.volumes()
return search_property(
diff --git a/plugin/simc/simc_lsmplugin.c b/plugin/simc/simc_lsmplugin.c
index cb2b7dd..12a6edd 100644
--- a/plugin/simc/simc_lsmplugin.c
+++ b/plugin/simc/simc_lsmplugin.c
@@ -932,144 +932,6 @@ static int _volume_delete(lsm_plugin_ptr c, const char *volume_id)
return rc;
}
-static int _pool_create(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool **pool, char **job)
-{
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *new_pool = NULL;
- lsm_pool *pool_to_store = NULL;
- char *key = NULL;
-
- /* Verify system id */
- if( strcmp(lsm_system_id_get(system), lsm_system_id_get(pd->system[0])) == 0 ) {
- /* Verify that we don't already have a pool by that name */
- new_pool = find_pool_name(pd, pool_name);
- if( !new_pool ) {
- /* Create the pool */
- new_pool = lsm_pool_record_alloc(md5(pool_name), pool_name, 0, size_bytes,
- size_bytes, LSM_POOL_STATUS_OK, "",
- lsm_system_id_get(system), NULL);
-
- pool_to_store = lsm_pool_record_copy(new_pool);
- key = strdup(lsm_pool_id_get(pool_to_store));
- if( new_pool && pool_to_store && key ) {
- g_hash_table_insert(pd->pools, key, pool_to_store);
-
- /* Create a job */
- rc = create_job(pd, job, LSM_DATA_TYPE_POOL, new_pool,
- (void**)pool);
- } else {
- free(key);
- lsm_pool_record_free(new_pool);
- lsm_pool_record_free(pool_to_store);
- rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "No memory");
- }
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_EXISTS_POOL,
- "Pool with name exists!");
- }
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_SYSTEM,
- "system not found!");
- }
- return rc;
-}
-
-
-static int pool_create(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, uint64_t size_bytes,
- lsm_pool_raid_type raid_type,
- lsm_pool_member_type member_type, lsm_pool** pool,
- char **job, lsm_flag flags)
-{
- return _pool_create(c, system, pool_name, size_bytes, pool, job);
-}
-
-static int pool_create_from_disks( lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, lsm_disk *disks[], uint32_t num_disks,
- lsm_pool_raid_type raid_type, lsm_pool **pool, char **job,
- lsm_flag flags)
-{
- /* Check that the disks are valid, then call common routine */
- uint64_t size = 0;
- int rc = LSM_ERR_OK;
- int i = 0;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
-
- if( num_disks ) {
- for( i = 0; i < num_disks; ++i ) {
- lsm_disk *d = find_disk(pd, lsm_disk_id_get(disks[i]));
- if( d ) {
- size += (lsm_disk_number_of_blocks_get(d) * lsm_disk_block_size_get(d));
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_DISK,
- "Disk not found");
- goto bail;
- }
- }
-
- rc = _pool_create(c, system, pool_name, size, pool, job);
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_INVALID_ARGUMENT, "No disks provided");
- }
-bail:
- return rc;
-}
-
-static int pool_create_from_pool(lsm_plugin_ptr c, lsm_system *system,
- const char *pool_name, lsm_pool *pool,
- uint64_t size_bytes, lsm_pool **created_pool, char **job,
- lsm_flag flags )
-{
- /* Check that the disks are valid, then call common routine */
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *p = find_pool(pd, lsm_pool_id_get(pool));
-
- if( p ) {
- rc = _pool_create(c, system, pool_name, size_bytes, created_pool, job);
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found");
- }
- return rc;
-}
-
-static int pool_delete(lsm_plugin_ptr c, lsm_pool *pool, char **job,
- lsm_flag flags)
-{
- int rc = LSM_ERR_OK;
- struct plugin_data *pd = (struct plugin_data*)lsm_private_data_get(c);
- lsm_pool *pool_to_delete = find_pool(pd, lsm_pool_id_get(pool));
-
- if( pool_to_delete ) {
-
- /* Loop through building a list of volumes in this pool */
- char *k = NULL;
- struct allocated_volume *vol;
- GHashTableIter iter;
- g_hash_table_iter_init(&iter, pd->volumes);
- while(g_hash_table_iter_next(&iter,(gpointer) &k,(gpointer)&vol)) {
- if( strcmp(lsm_volume_pool_id_get(vol->v), lsm_pool_id_get(pool)) == 0 ) {
- rc = lsm_log_error_basic(c, LSM_ERR_EXISTS_VOLUME,
- "volumes exist on pool");
- goto bail;
- }
- }
-
- /* Remove pool from hash and create job */
- g_hash_table_remove(pd->pools, lsm_pool_id_get(pool));
- rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL);
-
- } else {
- rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL,
- "pool not found!");
- }
-bail:
- return rc;
-}
-
static int volume_delete(lsm_plugin_ptr c, lsm_volume *volume,
char **job, lsm_flag flags)
{
@@ -1574,10 +1436,6 @@ static int iscsi_chap_auth(lsm_plugin_ptr c, const char *init_id,
static struct lsm_san_ops_v1 san_ops = {
list_volumes,
list_disks,
- pool_create,
- pool_create_from_disks,
- pool_create_from_pool,
- pool_delete,
volume_create,
volume_replicate,
volume_replicate_range_bs,
diff --git a/plugin/smispy/smis.py b/plugin/smispy/smis.py
index 368b606..2d91c96 100644
--- a/plugin/smispy/smis.py
+++ b/plugin/smispy/smis.py
@@ -1218,8 +1218,6 @@ class Smis(IStorageAreaNetwork):
return ErrorNumber.NOT_FOUND_SYSTEM
if class_type == 'Pool':
return ErrorNumber.NOT_FOUND_POOL
- if class_type == 'Disk':
- return ErrorNumber.NOT_FOUND_DISK
if class_type == 'Job':
return ErrorNumber.NOT_FOUND_JOB
if class_type == 'AccessGroup':
@@ -3367,170 +3365,6 @@ class Smis(IStorageAreaNetwork):
Smis._DMTF_STATUS_TO_POOL_STATUS_INFO[dmtf_status])
return (status, ", ".join(status_info))
- def _find_out_bottom_cexts(self, cim_pool_path, pros_list=None):
- """
- This is based on 'Extent Composition' subprofile.
- CIM_StoragePool can based on several CIM_CompositeExtent with several
- level. We will find out the bottom level CIM_CompositeExtent.
- This is how we traverse down:
- CIM_StoragePool
- ^
- | GroupComponent
- |
- | CIM_ConcreteComponent/CIM_AssociatedComponentExtent
- | |-> deprecated in SMI-S 1.5rev4 by ---^
- |
- | PartComponent
- v
- CIM_CompositeExtent # The rest traverse was handle by
- ^ # _traverse_cext()
- | GroupComponent
- |
- | CIM_BasedOn
- |
- | PartComponent
- v
- CIM_CompositeExtent
- .
- .
- .
- Will return a list of CIMInstance of CIM_CompositeExtent.
- Mid-level CIM_CompositeExtent will not included.
- If nothing found, return []
- """
- if pros_list is None:
- pros_list = []
- bottom_cim_cexts = []
- try:
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_AssociatedComponentExtent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
- except CIMError as ce:
- error_code = tuple(ce)[0]
- if error_code == pywbem.CIM_ERR_INVALID_CLASS or \
- error_code == pywbem.CIM_ERR_INVALID_PARAMETER:
- # Not support SMIS 1.5, using 1.4 way.
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_ConcreteComponent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
- else:
- raise
- if cim_pool_path.classname == 'LSIESG_StoragePool':
- # LSI does not report error on CIM_AssociatedComponentExtent
- # But they don't support it.
- cim_cexts = self._c.Associators(
- cim_pool_path,
- AssocClass='CIM_ConcreteComponent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- ResultClass='CIM_CompositeExtent',
- PropertyList=pros_list)
-
- if len(cim_cexts) == 0:
- return []
- for cim_cext in cim_cexts:
- tmp_cim_cexts = self._traverse_cext(cim_cext.path, pros_list)
- if len(tmp_cim_cexts) == 0:
- # already at the bottom level
- bottom_cim_cexts.extend([cim_cext])
- else:
- bottom_cim_cexts.extend(tmp_cim_cexts)
- return bottom_cim_cexts
-
- def _traverse_cext(self, cim_cext_path, pros_list=None):
- """
- Using this procedure to find out the bottom level CIM_CompositeExtent.
- CIM_CompositeExtent
- ^
- | GroupComponent
- |
- | CIM_BasedOn
- |
- | PartComponent
- v
- CIM_CompositeExtent
- .
- .
- .
- Will return a list of CIMInstance of CIM_CompositeExtent.
- Mid-level CIM_CompositeExtent will not included.
- If nothing found, return []
- """
- if pros_list is None:
- pros_list = []
- cim_sub_cexts = self._c.Associators(
- cim_cext_path,
- AssocClass='CIM_BasedOn',
- ResultClass='CIM_CompositeExtent',
- Role='GroupComponent',
- ResultRole='PartComponent',
- PropertyList=pros_list)
- if len(cim_sub_cexts) == 0:
- return []
- cim_bottom_cexts = []
- for cim_sub_cext in cim_sub_cexts:
- tmp_cim_bottom_cexts = self._traverse_cext(cim_sub_cext.path,
- pros_list)
- if len(tmp_cim_bottom_cexts) == 0:
- cim_bottom_cexts.extend([cim_sub_cext])
- else:
- cim_bottom_cexts.extend(tmp_cim_bottom_cexts)
- return cim_bottom_cexts
-
- def _traverse_cext_2_pri_ext(self, cim_cext_path, pros_list=None):
- """
- Using this procedure to find out the member disks of
- CIM_CompositeExtent:
- CIM_CompositeExtent
- ^
- | Dependent
- |
- | CIM_BasedOn
- |
- | Antecedent
- v
- CIM_StorageExtent (Concrete)
- ^
- | Dependent
- |
- | CIM_BasedOn
- |
- | Antecedent
- v
- CIM_StorageExtent (Concrete)
- .
- .
- .
- CIM_StorageExtent (Primordial)
- """
- if pros_list is None:
- pros_list = []
- if 'Primordial' not in pros_list:
- pros_list.extend(['Primordial'])
- cim_sub_exts = self._c.Associators(
- cim_cext_path,
- AssocClass='CIM_BasedOn',
- ResultClass='CIM_StorageExtent',
- Role='Dependent',
- ResultRole='Antecedent',
- PropertyList=pros_list)
- cim_pri_exts = []
- for cim_sub_ext in cim_sub_exts:
- if cim_sub_ext['Primordial']:
- cim_pri_exts.extend([cim_sub_ext])
- else:
- cim_pri_exts.extend(
- self._traverse_cext_2_pri_ext(cim_sub_ext.path))
- return cim_pri_exts
-
def _cim_disk_of_pri_ext(self, cim_pri_ext_path, pros_list=None):
"""
Follow this procedure to find out CIM_DiskDrive from Primordial
@@ -3604,534 +3438,6 @@ class Smis(IStorageAreaNetwork):
return element_type
- def _pool_opt_data(self, cim_pool):
- """
- Usage:
- Update Pool object with optional data found in cim_pool.
- The CIMInstance cim_pool was supposed to hold all optional data.
- So that we save 1 SMI-S query.
- No matter we found any info or not, we still return the unknown
- filler, with this, we can make sure return object are containing
- same order/length of column_data().
- Parameter:
- cim_pool # CIMInstance of CIM_StoragePool
- Returns:
- opt_pro_dict # dict containing optional properties
- Exceptions:
- NONE
- """
- opt_pro_dict = {
- 'thinp_type': Pool.THINP_TYPE_UNKNOWN,
- 'raid_type': Pool.RAID_TYPE_UNKNOWN,
- 'member_type': Pool.MEMBER_TYPE_UNKNOWN,
- 'member_ids': [],
- 'element_type': Pool.ELEMENT_TYPE_UNKNOWN,
- }
-
- # check whether current pool support create volume or not.
- cim_sccs = self._c.Associators(
- cim_pool.path,
- AssocClass='CIM_ElementCapabilities',
- ResultClass='CIM_StorageConfigurationCapabilities',
- PropertyList=['SupportedStorageElementFeatures',
- 'SupportedStorageElementTypes'])
- # Associate StorageConfigurationCapabilities to StoragePool
- # is experimental in SNIA 1.6rev4, Block Book PDF Page 68.
- # Section 5.1.6 StoragePool, StorageVolume and LogicalDisk
- # Manipulation, Figure 9 - Capabilities Specific to a StoragePool
- if len(cim_sccs) == 1:
- cim_scc = cim_sccs[0]
- if 'SupportedStorageElementFeatures' in cim_scc and \
- Smis.DMTF_SUPPORT_VOL_CREATE in \
- cim_scc['SupportedStorageElementFeatures']:
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- # When certain Pool can create ThinlyProvisionedStorageVolume,
- # we mark it as Thin Pool.
- if 'SupportedStorageElementTypes' in cim_scc:
- dmtf_element_types = cim_scc['SupportedStorageElementTypes']
- if Smis.DMTF_ELEMENT_THIN_VOLUME in dmtf_element_types:
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THIN
- else:
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THICK
- else:
- # IBM DS 8000 does not support StorageConfigurationCapabilities
- # per pool yet. They has been informed. Before fix, use a quick
- # workaround.
- # TODO: Currently, we don't have a way to detect
- # Pool.ELEMENT_TYPE_POOL
- # but based on knowing definition of each vendor.
- if cim_pool.classname == 'IBMTSDS_VirtualPool' or \
- cim_pool.classname == 'IBMTSDS_ExtentPool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- elif cim_pool.classname == 'IBMTSDS_RankPool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_POOL
- elif cim_pool.classname == 'LSIESG_StoragePool':
- opt_pro_dict['element_type'] = Pool.ELEMENT_TYPE_VOLUME
- opt_pro_dict['thinp_type'] = Pool.THINP_TYPE_THICK
-
- pool_id_pros = self._property_list_of_id('Pool', ['Primordial'])
- # We use some blacklist here to speed up by skipping unnecessary
- # parent pool checking.
- # These class are known as Disk Pool, no need to waste time on
- # checking 'Pool over Pool' layout.
- if cim_pool.classname == 'Clar_UnifiedStoragePool' or \
- cim_pool.classname == 'IBMTSDS_RankPool' or \
- cim_pool.classname == 'LSIESG_StoragePool' or \
- cim_pool.classname == 'ONTAP_ConcretePool':
- pass
- else:
- cim_parent_pools = self._c.Associators(
- cim_pool.path,
- AssocClass='CIM_AllocatedFromStoragePool',
- Role='Dependent',
- ResultRole='Antecedent',
- ResultClass='CIM_StoragePool',
- PropertyList=pool_id_pros)
- for cim_parent_pool in cim_parent_pools:
- if not cim_parent_pool['Primordial']:
- opt_pro_dict['member_type'] = Pool.MEMBER_TYPE_POOL
- opt_pro_dict['member_ids'].extend(
- [self._pool_id(cim_parent_pool)])
-
- raid_pros = self._raid_type_pros()
- cim_cexts = []
- # We skip disk member checking on VMAX due to bad performance.
- if cim_pool.classname != 'Symm_DeviceStoragePool':
- cim_cexts = self._find_out_bottom_cexts(cim_pool.path, raid_pros)
- raid_type = None
- for cim_cext in cim_cexts:
- cur_raid_type = self._raid_type_of(cim_cext)
-
- if (raid_type is not None) and cur_raid_type != raid_type:
- raid_type = Pool.RAID_TYPE_MIXED
- else:
- raid_type = cur_raid_type
-
- if opt_pro_dict['member_type'] == Pool.MEMBER_TYPE_POOL:
- # we already know current pool is based on pool or volume.
- # skipping disk member traverse walk.
- continue
-
- # TODO: Current way consume too much time(too many SMIS call).
- # SNIA current standard (1.6rev4) does not have any better
- # way for disk members querying.
- cim_pri_exts = self._traverse_cext_2_pri_ext(cim_cext.path)
- cim_disks = []
- disk_id_pros = self._property_list_of_id('Disk')
- for cim_pri_ext in cim_pri_exts:
- cim_disk = self._cim_disk_of_pri_ext(cim_pri_ext.path,
- disk_id_pros)
- if cim_disk:
- cim_disks.extend([cim_disk])
- if len(cim_disks) > 0:
- cur_member_ids = []
- for cim_disk in cim_disks:
- cur_member_ids.extend([self._disk_id(cim_disk)])
-
- opt_pro_dict['member_type'] = Pool.MEMBER_TYPE_DISK
- opt_pro_dict['member_ids'].extend(cur_member_ids)
-
- if raid_type is not None:
- opt_pro_dict['raid_type'] = raid_type
-
- return opt_pro_dict
-
- @staticmethod
- def _raid_type_pros():
- """
- Return a list of properties needed to detect RAID type from
- CIM_StorageExtent.
- """
- return ['DataRedundancy', 'PackageRedundancy',
- 'NoSinglePointOfFailure', 'ExtentStripeLength']
-
- @staticmethod
- def _raid_type_of(cim_ext):
- """
- Take CIM_CompositePool to check the RAID type of it.
- Only check the up-first level of RAID, we does not nested down.
- For example, when got a RAID 1 CIM_CompositePool, we return
- Pool.RAID_TYPE_RAID1
- If failed to detect the RAID level, will return:
- Pool.RAID_TYPE_UNKNOWN
- Since this is a private method, we do not check whether cim_ext is
- valid or not.
- Make sure you have all properties listed in _raid_type_pros()
- # TODO: to support RAID 3 and RAID 4 level.
- # RAID 3/4 could be checked via
- # CIM_StorageSetting['ParityLayout']
- # RAID 3: stripesize is 512 (ExtentStripeLength == 1)
- # RAID 4: stripesize is 512 * (disk_count -1)
- #
- # Problem is: there is no SNIA spec said CIM_StorageSetting
- # should associate to CIM_CompositeExtent.
- # Since RAID 3/4 is rare in market, low priority.
- """
- if not cim_ext:
- return Pool.RAID_TYPE_UNKNOWN
- if 'DataRedundancy' not in cim_ext or \
- 'PackageRedundancy' not in cim_ext or \
- 'NoSinglePointOfFailure' not in cim_ext or \
- 'ExtentStripeLength' not in cim_ext:
- return Pool.RAID_TYPE_UNKNOWN
-
- # DataRedundancy:
- # Number of complete copies of data currently maintained.
- data_redundancy = cim_ext['DataRedundancy']
- # PackageRedundancy:
- # How many physical packages can currently fail without data loss.
- # For example, in the storage domain, this might be disk spindles.
- pack_redundancy = cim_ext['PackageRedundancy']
- # NoSinglePointOfFailure:
- # Indicates whether or not there exists no single point of
- # failure.
- no_spof = cim_ext['NoSinglePointOfFailure']
-
- # ExtentStripeLength:
- # Number of contiguous underlying StorageExtents counted before
- # looping back to the first underlying StorageExtent of the
- # current stripe. It is the number of StorageExtents forming the
- # user data stripe.
- stripe_len = cim_ext['ExtentStripeLength']
-
- # determine the RAID type as SNIA document require.
- # JBOD
- if ((data_redundancy == 1) and
- (pack_redundancy == 0) and
- (not no_spof) and
- (stripe_len == 1)):
- return Pool.RAID_TYPE_JBOD
- # RAID 0
- elif ((data_redundancy == 1) and
- (pack_redundancy == 0) and
- (not no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID0
- # RAID 1
- elif ((data_redundancy == 2) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len == 1)):
- return Pool.RAID_TYPE_RAID1
- # RAID 5
- elif ((data_redundancy == 1) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID5
- # RAID 6
- elif ((data_redundancy == 1) and
- (pack_redundancy == 2) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID6
- # RAID 10
- elif ((data_redundancy == 2) and
- (pack_redundancy == 1) and
- (no_spof) and
- (stripe_len >= 1)):
- return Pool.RAID_TYPE_RAID10
- # Base on these data, we cannot determine RAID 15 or 51 and etc.
- # In stead of providing incorrect info, we choose to provide nothing.
- return Pool.RAID_TYPE_UNKNOWN
-
- @handle_cim_errors
- def pool_delete(self, pool, flags=0):
- """
- Delete a Pool via CIM_StorageConfigurationService.DeleteStoragePool
- """
- if not self.fallback_mode and \
- self._profile_is_supported(SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4,
- strict=False) is None:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "SMI-S %s version %s is not supported" %
- (SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4))
-
- cim_pool = self._get_cim_instance_by_id('Pool', pool.id)
- cim_scs = self._get_class_instance(
- 'CIM_StorageConfigurationService',
- 'SystemName', pool.system_id)
-
- in_params = {'Pool': cim_pool.path}
-
- return self._pi("pool_delete", Smis.JOB_RETRIEVE_NONE,
- *(self._c.InvokeMethod('DeleteStoragePool',
- cim_scs.path,
- **in_params)))[0]
-
- @handle_cim_errors
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Creating pool via
- CIM_StorageConfigurationService.CreateOrModifyStoragePool()
- from SMI-S 1.4+ "Block Services" profile.
- TODO: Each vendor are needing different parameters for
- CreateOrModifyStoragePool()
- """
- if not self.fallback_mode and \
- self._profile_is_supported(SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4,
- strict=False) is None:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "SMI-S %s version %s is not supported" %
- (SNIA.BLK_SRVS_PROFILE,
- SNIA.SMIS_SPEC_VER_1_4))
-
- cim_sys = self._get_cim_instance_by_id('System', system.id)
-
- # we does not support defining thinp_type yet.
- # just using whatever provider set.
-
- in_params = {}
- if pool_name:
- in_params['ElementName'] = pool_name
-
- in_cim_exts_path = []
- if Pool.member_type_is_disk(member_type):
- disk_type = Pool.member_type_to_disk_type(member_type)
- if disk_type != Disk.DISK_TYPE_UNKNOWN:
- # We have to define InExtents for certain disk type.
- # SNIA 1.6.1 CIM_StorageSetting has these experimetal
- # properties:
- # DiskType, InterconnectType, InterconnectSpeed,
- # FormFactor, RPM, PortType.
- # But currently, no vendor implement that.
- # And there is no effective way to detect the free disks,
- # walking though all CIM_CompositeExtent is not a good idea.
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "The pool_create of SMI-S plugin does not "
- "support defining disk type in member_type")
- else:
- # We depend on SMI-S provider to chose the disks for us.
- pass
-
- elif member_type == Pool.MEMBER_TYPE_POOL:
- # I(Gris) have lost my access to IBM DS8000 which support pool
- # over pool. I will raise NO_SUPPORT until got array to test on.
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "The pool_create of SMI-S plugin does not "
- "support creating pool over pool(sub-pool) yet")
-
- elif member_type == Pool.MEMBER_TYPE_UNKNOWN:
- pass
- else:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Got invalid member_type %d" % member_type)
-
- in_params['Size'] = pywbem.Uint64(size_bytes)
-
- if raid_type != Pool.RAID_TYPE_UNKNOWN:
- in_params['Goal'] = self._cim_st_path_for_goal(
- raid_type, cim_sys.path)
-
- cim_scs = self._get_class_instance(
- 'CIM_StorageConfigurationService',
- 'SystemName', system.id)
-
- in_params = self._pool_chg_paras_check(in_params, cim_sys.path)
- return self._pi("pool_create", Smis.JOB_RETRIEVE_POOL,
- *(self._c.InvokeMethod(
- 'CreateOrModifyStoragePool',
- cim_scs.path, **in_params)))
-
- @handle_cim_errors
- def _find_preset_cim_st(self, cim_cap_path, raid_type):
- """
- Usage:
- Find first proper CIM_StorageSetting under speficied
- CIM_StorageCapabilities by giving raid_type.
- Thin pool prefered.
- Parameter:
- cim_cap_path # CIMInstanceName of CIM_StorageCapabilities
- raid_type # Pool.RAID_TYPE_XXX
- Returns:
- cim_st # CIMInstance of CIM_StorageSetting
- or
- None # No match found
- """
- cim_sts = self._c.Associators(
- cim_cap_path,
- AssocClass='CIM_StorageSettingsAssociatedToCapabilities',
- ResultClass='CIM_StorageSetting',
- PropertyList=['ElementName',
- 'ThinProvisionedPoolType'])
- if not cim_sts:
- return None
- possible_element_names = []
- if raid_type == Pool.RAID_TYPE_JBOD:
- possible_element_names = ['JBOD']
- elif (raid_type == Pool.RAID_TYPE_RAID0 or
- raid_type == Pool.RAID_TYPE_NOT_APPLICABLE):
- possible_element_names = ['RAID0']
- elif raid_type == Pool.RAID_TYPE_RAID1:
- possible_element_names = ['RAID1']
- elif raid_type == Pool.RAID_TYPE_RAID3:
- possible_element_names = ['RAID3']
- elif raid_type == Pool.RAID_TYPE_RAID4:
- possible_element_names = ['RAID4']
- elif raid_type == Pool.RAID_TYPE_RAID5:
- possible_element_names = ['RAID5']
- elif raid_type == Pool.RAID_TYPE_RAID6:
- # According to SNIA suggest, RAID6 can also be writen as RAID5DP
- # and etc.
- possible_element_names = ['RAID6', 'RAID5DP']
- elif raid_type == Pool.RAID_TYPE_RAID10:
- possible_element_names = ['RAID10', 'RAID1+0']
- elif raid_type == Pool.RAID_TYPE_RAID50:
- possible_element_names = ['RAID50', 'RAID5+0']
- elif raid_type == Pool.RAID_TYPE_RAID60:
- possible_element_names = ['RAID60', 'RAID6+0', 'RAID5DP+0']
- elif raid_type == Pool.RAID_TYPE_RAID51:
- possible_element_names = ['RAID51', 'RAID5+1']
- elif raid_type == Pool.RAID_TYPE_RAID61:
- possible_element_names = ['RAID61', 'RAID6+1', 'RAID5DP+1']
- else:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "Got unknown RAID type: %d" % raid_type)
-
- chose_cim_sts = []
- for cim_st in cim_sts:
- if cim_st['ElementName'] in possible_element_names:
- chose_cim_sts.extend([cim_st])
-
- if len(chose_cim_sts) == 1:
- return chose_cim_sts[0]
-
- elif len(chose_cim_sts) > 1:
- # Perfer the thin pool. This is for EMC VNX which support both
- # think pool(less feature) and thin pool.
- for cim_st in chose_cim_sts:
- if cim_st['ThinProvisionedPoolType'] == \
- Smis.DMTF_THINP_POOL_TYPE_ALLOCATED:
- return cim_st
-
- # Return the first one if no thin pool setting found.
- return chose_cim_sts[0]
-
- return None
-
- def _cim_st_path_for_goal(self, raid_type, cim_sys_path):
- """
- Usage:
- Find out the array pre-defined CIM_StorageSetting for certain RAID
- Level. Check CIM_StorageSetting['ElementName'] for RAID type.
- Even SNIA defined a way to create new setting, but we find out
- that not a good way to follow.
- Pool.RAID_TYPE_NOT_APPLICABLE will be treat as RAID 0.
- # TODO: currently no check we will get one member for
- # Pool.RAID_TYPE_NOT_APPLICABLE. Maybe we should replace
- # this RAID type by RAID_0.
- Parameter:
- raid_type # Tier.RAID_TYPE_XXX
- cim_sys_path # CIMInstanceName of CIM_ComputerSystem.
- Returns:
- cim_st_path # Found or created CIMInstanceName of
- # CIM_StorageSetting
- Exceptions:
- LsmError
- ErrorNumber.NO_SUPPORT # Failed to find out
- # suitable CIM_StorageSetting
- """
- chose_cim_st = None
- # We will try to find the existing CIM_StorageSetting
- # with ElementName equal to raid_type_str
- # potted(pre-defined) CIM_StorageSetting
- cim_pool_path = None
- cim_pools = self._c.Associators(cim_sys_path,
- ResultClass='CIM_StoragePool',
- PropertyList=['Primordial'])
- # Base on SNIA commanded, each array should provide a
- # Primordial pool.
- for cim_tmp_pool in cim_pools:
- if cim_tmp_pool['Primordial']:
- cim_pool_path = cim_tmp_pool.path
- break
- if not cim_pool_path:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Target storage array does not have any "
- "Primordial CIM_StoragePool")
- cim_caps = self._c.Associators(
- cim_pool_path,
- ResultClass='CIM_StorageCapabilities',
- PropertyList=['ElementType'])
- for cim_cap in cim_caps:
- tmp_cim_st_set = self._find_preset_cim_st(cim_cap.path, raid_type)
- if tmp_cim_st_set:
- return tmp_cim_st_set.path
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support RAID type: %d"
- % raid_type)
-
- def _pool_chg_paras_check(self, in_params, cim_sys_path):
- """
- Usage:
- CIM_StorageConfigurationCapabilities
- ['SupportedStoragePoolFeatures'] provide indication what
- parameters current array support when CreateOrModifyStoragePool()
- We will filter out the unsupported parameters.
- Parameter:
- in_params # a dict will be used for CreateOrModifyStoragePool()
- Returns:
- new_in_params # a dict of updated parameters
- """
- # EMC vendor specific value for thick pool.
- EMC_THINP_POOL_TYPE_THICK = 0
- new_in_params = in_params
- cim_scss = self._c.AssociatorNames(
- cim_sys_path,
- AssocClass='CIM_HostedService',
- ResultClass='CIM_StorageConfigurationService',)
- if len(cim_scss) != 1:
- return new_in_params
- cim_sccs = self._c.Associators(
- cim_scss[0],
- AssocClass='CIM_ElementCapabilities',
- ResultClass='CIM_StorageConfigurationCapabilities',
- PropertyList=['SupportedStoragePoolFeatures'])
- if len(cim_sccs) != 1:
- return new_in_params
-
- cur_features = cim_sccs[0]['SupportedStoragePoolFeatures']
- if 'InExtents' in new_in_params:
- if Smis.DMTF_ST_POOL_FEATURE_INEXTS not in cur_features:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from Volume or Disk")
- if 'InPools' in new_in_params:
- if Smis.DMTF_ST_POOL_FEATURE_MULTI_INPOOL not in cur_features \
- and Smis.DMTF_ST_POOL_FEATURE_SINGLE_INPOOL not in cur_features:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from Pool")
- if Smis.DMTF_ST_POOL_FEATURE_SINGLE_INPOOL in cur_features \
- and len(new_in_params['InPools']) > 1:
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "Current array does not support " +
- "creating Pool from multiple pools")
- # Vendor specific check
- if cim_sys_path.classname == 'Clar_StorageSystem':
- if 'Goal' in new_in_params and 'ElementName' in new_in_params:
- ## EMC VNX/CX RAID Group should not define a ElementName.
- cim_st_path = new_in_params['Goal']
- cim_st = self._c.GetInstance(
- cim_st_path,
- PropertyList=['ThinProvisionedPoolType'],
- LocalOnly=False)
- if cim_st['ThinProvisionedPoolType'] == \
- EMC_THINP_POOL_TYPE_THICK:
- del new_in_params['ElementName']
- if 'Pool' in new_in_params and 'Goal' in new_in_params:
- ## Expanding VNX/CX Pool/RAID Group shoud not define Goal
- ## Should we raise a error here?
- raise LsmError(ErrorNumber.NO_SUPPORT,
- "EMC VNX/CX does not allowed change RAID " +
- "type or add different RAID type tier")
- return new_in_params
-
def _profile_is_supported(self, profile_name, spec_ver, strict=False,
raise_error=False):
"""
diff --git a/python_binding/lsm/_client.py b/python_binding/lsm/_client.py
index 00b9184..e7db99f 100644
--- a/python_binding/lsm/_client.py
+++ b/python_binding/lsm/_client.py
@@ -297,104 +297,6 @@ class Client(INetworkAttachedStorage):
_check_search_key(search_key, Pool.SUPPORTED_SEARCH_KEYS)
return self._tp.rpc('pools', _del_self(locals()))
- ## Create new pool in user friendly way. Depending on this capability:
- ## Capabilities.POOL_CREATE
- ## For plugin developer: this method require complex codes to chose
- ## pool members, please refer to SimData.pool_create() in simarray.py for
- ## sample codes.
- ## Return the newly created pool object.
- # @param self The this pointer
- # @param system The system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param size_bytes The size in bytes for new pool.
- # New pool can have equal or larger size than
- # requested, but not less. Should larger than 0.
- # @param raid_type Optional. If defined, new pool should using
- # defined RAID type.
- # When member_type was set to Pool.MEMBER_TYPE_POOL,
- # only allowed raid_type is RAID_TYPE_UNKNOWN or
- # RAID_TYPE_NOT_APPLICABLE
- # @param member_type Optional. If defined, new pool will be assembled
- # by defined member types. For example;
- # when member_type == Pool.MEMBER_TYPE_DISK_SAS,
- # new pool will be created from SAS disks only.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create(self, system, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Returns the created new pool object.
- """
- if size_bytes <= 0:
- raise LsmError(ErrorNumber.INVALID_ARGUMENT,
- "'size_bytes' should larger than 0")
- return self._tp.rpc('pool_create', _del_self(locals()))
-
- ## Create new pool in the hard way by defined what exactly disks should
- ## be used. Depending on these capabilities:
- ## Capabilities.POOL_CREATE_FROM_DISKS
- ## Return the newly created pool object with all supported optional data.
- # @param self The this pointer
- # @param system_id The id of system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param disks The disks (list) to create new pool from.
- # The new pool could contain more disks than
- # requested due to internal needs, but if possible,
- # new pool should only contain requested disks.
- # @param raid_type The RAID level for new pool.
- # Capabilities.POOL_CREATE_DISK_RAID_XXX will
- # indicate the supported RAID level.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create_from_disks(self, system_id, pool_name, disks,
- raid_type, flags=0):
- """
- Creates pool from disks.
- Returns the created new pool object.
- """
- return self._tp.rpc('pool_create_from_disks', _del_self(locals()))
-
- ## Create new pool in the hard way by defined what exactly pool should
- ## be allocate space from. Depending on this capability:
- ## Capabilities.POOL_CREATE_FROM_POOL
- ## Return the newly created pool object with all supported optional data.
- # @param self The this pointer
- # @param system_id The id of system where new pool should reside.
- # @param pool_name The name for new pool. Will not fail if created
- # pool_name is not the same as requested.
- # @param pool The pool to allocate space from for new pool.
- # @param size_bytes The size of the new pool.
- # @param flags Reserved for future use.
- # @returns A tuple (job_id, new_pool), when one is None the other is
- # valid.
- @_return_requires(unicode, Pool)
- def pool_create_from_pool(self, system_id, pool_name, pool,
- size_bytes, flags=0):
- """
- Creates pool from volumes.
- Returns the created new pool object.
- """
- return self._tp.rpc('pool_create_from_pool', _del_self(locals()))
-
- ## Remove a pool. This method depend on Capabilities.POOL_DELETE
- # @param self The this pointer
- # @param pool The pool object
- # @param flags Reserved for future use, must be zero.
- # @returns None on success, else job id. Raises LsmError on errors.
- @_return_requires(unicode)
- def pool_delete(self, pool, flags=0):
- """
- Return None on success, else job id. Raises LsmError on errors.
- """
- return self._tp.rpc('pool_delete', _del_self(locals()))
-
## Returns an array of system objects.
# @param self The this pointer
# @param flags Reserved for future use, must be zero.
diff --git a/python_binding/lsm/_common.py b/python_binding/lsm/_common.py
index d11c170..f1674eb 100644
--- a/python_binding/lsm/_common.py
+++ b/python_binding/lsm/_common.py
@@ -457,7 +457,6 @@ class ErrorNumber(object):
NOT_FOUND_VOLUME = 205
NOT_FOUND_NFS_EXPORT = 206
NOT_FOUND_SYSTEM = 208
- NOT_FOUND_DISK = 209
NOT_LICENSED = 226
@@ -480,7 +479,6 @@ class ErrorNumber(object):
TRANSPORT_SERIALIZATION = 401
TRANSPORT_INVALID_ARG = 402
- DISK_BUSY = 500
VOLUME_BUSY = 501
ACCESS_GROUP_MASKED = 502 # refuse to remove the last initiator from
# access group which have volume masked or
diff --git a/python_binding/lsm/_data.py b/python_binding/lsm/_data.py
index 17df3ec..3aba6c8 100644
--- a/python_binding/lsm/_data.py
+++ b/python_binding/lsm/_data.py
@@ -371,97 +371,6 @@ class Pool(IData):
TOTAL_SPACE_NOT_FOUND = -1
FREE_SPACE_NOT_FOUND = -1
- STRIPE_SIZE_NOT_FOUND = -1
-
- # RAID_xx name was following SNIA SMI-S 1.4 rev6 Block Book,
- # section '14.1.5.3', Table 255 - Supported Common RAID Levels
- RAID_TYPE_RAID0 = 0
- RAID_TYPE_RAID1 = 1
- RAID_TYPE_RAID3 = 3
- RAID_TYPE_RAID4 = 4
- RAID_TYPE_RAID5 = 5
- RAID_TYPE_RAID6 = 6
- RAID_TYPE_RAID10 = 10
- RAID_TYPE_RAID15 = 15
- RAID_TYPE_RAID16 = 16
- RAID_TYPE_RAID50 = 50
- RAID_TYPE_RAID60 = 60
- RAID_TYPE_RAID51 = 51
- RAID_TYPE_RAID61 = 61
- # number 2x is reserved for non-numbered RAID.
- RAID_TYPE_JBOD = 20
- RAID_TYPE_UNKNOWN = 21
- RAID_TYPE_NOT_APPLICABLE = 22
- # NOT_APPLICABLE indicate current pool only has one member.
- RAID_TYPE_MIXED = 23
-
- MEMBER_TYPE_UNKNOWN = 0
- MEMBER_TYPE_DISK = 1
- MEMBER_TYPE_DISK_MIX = 10
- MEMBER_TYPE_DISK_ATA = 11
- MEMBER_TYPE_DISK_SATA = 12
- MEMBER_TYPE_DISK_SAS = 13
- MEMBER_TYPE_DISK_FC = 14
- MEMBER_TYPE_DISK_SOP = 15
- MEMBER_TYPE_DISK_SCSI = 16
- MEMBER_TYPE_DISK_NL_SAS = 17
- MEMBER_TYPE_DISK_HDD = 18
- MEMBER_TYPE_DISK_SSD = 19
- MEMBER_TYPE_DISK_HYBRID = 110
- MEMBER_TYPE_DISK_LUN = 111
-
- MEMBER_TYPE_POOL = 2
-
- _MEMBER_TYPE_2_DISK_TYPE = {
- MEMBER_TYPE_DISK: Disk.DISK_TYPE_UNKNOWN,
- MEMBER_TYPE_DISK_MIX: Disk.DISK_TYPE_UNKNOWN,
- MEMBER_TYPE_DISK_ATA: Disk.DISK_TYPE_ATA,
- MEMBER_TYPE_DISK_SATA: Disk.DISK_TYPE_SATA,
- MEMBER_TYPE_DISK_SAS: Disk.DISK_TYPE_SAS,
- MEMBER_TYPE_DISK_FC: Disk.DISK_TYPE_FC,
- MEMBER_TYPE_DISK_SOP: Disk.DISK_TYPE_SOP,
- MEMBER_TYPE_DISK_SCSI: Disk.DISK_TYPE_SCSI,
- MEMBER_TYPE_DISK_NL_SAS: Disk.DISK_TYPE_NL_SAS,
- MEMBER_TYPE_DISK_HDD: Disk.DISK_TYPE_HDD,
- MEMBER_TYPE_DISK_SSD: Disk.DISK_TYPE_SSD,
- MEMBER_TYPE_DISK_HYBRID: Disk.DISK_TYPE_HYBRID,
- MEMBER_TYPE_DISK_LUN: Disk.DISK_TYPE_LUN,
- }
-
- @staticmethod
- def member_type_is_disk(member_type):
- """
- Returns True if defined 'member_type' is disk.
- False when else.
- """
- return member_type in Pool._MEMBER_TYPE_2_DISK_TYPE
-
- @staticmethod
- def member_type_to_disk_type(member_type):
- """
- Convert member_type to disk_type.
- For non-disk member, we return Disk.DISK_TYPE_NOT_APPLICABLE
- """
- return Pool._MEMBER_TYPE_2_DISK_TYPE.get(member_type,
- Disk.DISK_TYPE_NOT_APPLICABLE)
-
- @staticmethod
- def disk_type_to_member_type(disk_type):
- """
- Convert disk_type to Pool.MEMBER_TYPE_DISK_XXXX
- Will return Pool.MEMBER_TYPE_DISK as failback.
- """
- # Invert dict. Assumes values are unique.
- inv_dict = dict((v, k)
- for k, v in Pool._MEMBER_TYPE_2_DISK_TYPE.iteritems())
- return inv_dict.get(disk_type, Pool.MEMBER_TYPE_DISK)
-
- THINP_TYPE_UNKNOWN = 0
- THINP_TYPE_THIN = 1
- THINP_TYPE_THICK = 5
- THINP_TYPE_NOT_APPLICABLE = 6
- # NOT_APPLICABLE means current pool is not implementing Thin Provisioning,
- # but can create thin or thick pool from it.
# Element Type indicate what kind of element could this pool create:
# * Another Pool
@@ -474,79 +383,24 @@ class Pool(IData):
ELEMENT_TYPE_DELTA = 1 << 4
ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use
- MAX_POOL_STATUS_BITS = 64
# Pool status could be any combination of these status.
STATUS_UNKNOWN = 1 << 0
- # UNKNOWN:
- # Failed to query out the status of Pool.
STATUS_OK = 1 << 1
- # OK:
- # Pool is accessible with no issue.
STATUS_OTHER = 1 << 2
- # OTHER:
- # Should explain in Pool.status_info for detail.
STATUS_STRESSED = 1 < 3
- # STRESSED:
- # Pool is under heavy workload which cause bad I/O performance.
STATUS_DEGRADED = 1 << 4
- # DEGRADED:
- # Pool is accessible but lost full RAID protection due to
- # I/O error or offline of one or more RAID member.
- # Example:
- # * RAID 6 pool lost access to 1 disk or 2 disks.
- # * RAID 5 pool lost access to 1 disk.
- # May explain detail in Pool.status_info.
- # Example:
- # * Pool.status = 'Disk 0_0_1 offline'
STATUS_ERROR = 1 << 5
- # OFFLINE:
- # Pool is not accessible for internal issue.
- # Should explain in Pool.status_info for reason.
STATUS_STARTING = 1 << 7
- # STARTING:
- # Pool is reviving from STOPPED status. Pool is not accessible.
STATUS_STOPPING = 1 << 8
- # STOPPING:
- # Pool is stopping by administrator. Pool is not accessible.
STATUS_STOPPED = 1 << 9
- # STOPPING:
- # Pool is stopped by administrator. Pool is not accessible.
STATUS_READ_ONLY = 1 << 10
- # READ_ONLY:
- # Pool is read only.
- # Pool.status_info should explain why.
STATUS_DORMANT = 1 << 11
- # DORMANT:
- # Pool is not accessible.
- # It's not stopped by administrator, but stopped for some mechanism.
- # For example, The DR pool acting as the SYNC replication target will be
- # in DORMANT state, As long as the PR(production) pool alive.
- # Another example could relocating.
STATUS_RECONSTRUCTING = 1 << 12
- # RECONSTRUCTING:
- # Pool is reconstructing the hash data or mirror data.
- # Mostly happen when disk revive from offline or disk replaced.
- # Pool.status_info can contain progress of this reconstruction job.
STATUS_VERIFYING = 1 << 13
- # VERIFYING:
- # Array is running integrity check on data of current pool.
- # It might be started by administrator or array itself.
- # Pool.status_info can contain progress of this verification job.
STATUS_INITIALIZING = 1 << 14
- # INITIALIZING:
- # Pool is in initialing state.
- # Mostly shown when new pool created or array boot up.
STATUS_GROWING = 1 << 15
- # GROWING:
- # Pool is growing its size and doing internal jobs.
- # Pool.status_info can contain progress of this growing job.
STATUS_SHRINKING = 1 << 16
- # SHRINKING:
- # Pool is shrinking its size and doing internal jobs.
- # Pool.status_info can contain progress of this shrinking job.
STATUS_DESTROYING = 1 << 17
- # DESTROYING:
- # Array is removing current pool.
def __init__(self, _id, _name, _element_type, _total_space, _free_space,
_status, _status_info, _system_id, _plugin_data=None):
@@ -886,29 +740,6 @@ class Capabilities(IData):
EXPORT_REMOVE = 123
EXPORT_CUSTOM_PATH = 124
- #Pool
- POOL_CREATE = 130
- POOL_CREATE_FROM_DISKS = 131
- POOL_CREATE_FROM_POOL = 133
-
- POOL_CREATE_DISK_RAID_0 = 140
- POOL_CREATE_DISK_RAID_1 = 141
- POOL_CREATE_DISK_RAID_JBOD = 142
- POOL_CREATE_DISK_RAID_3 = 143
- POOL_CREATE_DISK_RAID_4 = 144
- POOL_CREATE_DISK_RAID_5 = 145
- POOL_CREATE_DISK_RAID_6 = 146
- POOL_CREATE_DISK_RAID_10 = 147
- POOL_CREATE_DISK_RAID_50 = 148
- POOL_CREATE_DISK_RAID_51 = 149
- POOL_CREATE_DISK_RAID_60 = 150
- POOL_CREATE_DISK_RAID_61 = 151
- POOL_CREATE_DISK_RAID_15 = 152
- POOL_CREATE_DISK_RAID_16 = 153
- POOL_CREATE_DISK_RAID_NOT_APPLICABLE = 154
-
- POOL_DELETE = 200
-
POOLS_QUICK_SEARCH = 210
VOLUMES_QUICK_SEARCH = 211
DISKS_QUICK_SEARCH = 212
diff --git a/python_binding/lsm/_iplugin.py b/python_binding/lsm/_iplugin.py
index e1f7d4c..5973df6 100644
--- a/python_binding/lsm/_iplugin.py
+++ b/python_binding/lsm/_iplugin.py
@@ -129,40 +129,6 @@ class IPlugin(object):
class IStorageAreaNetwork(IPlugin):
- def pool_create(self, system_id, pool_name, size_bytes,
- raid_type=Pool.RAID_TYPE_UNKNOWN,
- member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
- """
- Creates a pool letting the array pick the specifics
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
- def pool_create_from_disks(self, system_id, pool_name, member_ids,
- raid_type, flags=0):
- """
- Creates a pool letting the user select the disks
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
- def pool_create_from_pool(self, system_id, pool_name, member_id,
- size_bytes, flags=0):
- """
- Creates a pool from existing volumes
-
- Returns a tuple (job_id, re-sized_volume)
- Note: Tuple return values are mutually exclusive, when one
- is None the other must be valid.
- """
- raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported")
-
def volumes(self, search_key=None, search_value=None, flags=0):
"""
Returns an array of volume objects
diff --git a/test/plugin_test.py b/test/plugin_test.py
index ff4aafc..83f1b3b 100755
--- a/test/plugin_test.py
+++ b/test/plugin_test.py
@@ -107,12 +107,7 @@ def supported(cap, capability):
class TestProxy(object):
# Hash of all calls that can be async
- async_calls = {'pool_create': (unicode, lsm.Pool),
- 'pool_create_from_disks': (unicode, lsm.Pool),
- 'pool_create_from_volumes': (unicode, lsm.Pool),
- 'pool_create_from_pool': (unicode, lsm.Pool),
- 'pool_delete': (unicode,),
- 'volume_create': (unicode, lsm.Volume),
+ async_calls = {'volume_create': (unicode, lsm.Volume),
'volume_resize': (unicode, lsm.Volume),
'volume_replicate': (unicode, lsm.Volume),
'volume_replicate_range': (unicode,),
@@ -325,9 +320,6 @@ class TestPlugin(unittest.TestCase):
disks = self.c.disks()
self.assertTrue(len(disks) > 0, "We need at least 1 disk to test")
- def test_pool_create(self):
- pass
-
def _volume_create(self, system_id):
if system_id in self.pool_by_sys_id:
p = self._get_pool_by_usage(system_id,
diff --git a/test/tester.c b/test/tester.c
index 5ce641f..a0e23e3 100644
--- a/test/tester.c
+++ b/test/tester.c
@@ -1790,47 +1790,6 @@ START_TEST(test_invalid_input)
resized = NULL;
fail_unless(rc == LSM_ERR_OK, "rc = %d", rc);
-
- /* Pool create */
- int raid_type = 65535;
- int member_type = 65535;
- uint64_t size = 0;
- int flags = 10;
-
- rc = lsm_pool_create(NULL, NULL, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, NULL, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, system, NULL, size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- size = 1024*1024*1024;
-
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- raid_type = LSM_POOL_RAID_TYPE_0;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- member_type = LSM_POOL_MEMBER_TYPE_DISK;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, NULL, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- lsm_pool *pcp = NULL;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, &pcp, NULL, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
- char *pcj = NULL;
- rc = lsm_pool_create(c, system, "pool name", size, raid_type, member_type, &pcp, &pcj, flags);
- fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc);
-
-
rc = lsm_system_record_array_free(sys, num_systems);
fail_unless(LSM_ERR_OK == rc, "%d", rc);
@@ -2281,173 +2240,6 @@ START_TEST(test_nfs_export_funcs)
}
END_TEST
-START_TEST(test_pool_delete)
-{
- int rc = 0;
- char *job = NULL;
- lsm_volume *v = NULL;
-
- printf("Testing pool delete!\n");
-
- lsm_pool *test_pool = get_test_pool(c);
-
- fail_unless( test_pool != NULL );
-
- if( test_pool ) {
-
- rc = lsm_volume_create(c, test_pool, "lsm_volume_pool_remove_test",
- 10000000, LSM_PROVISION_DEFAULT,
- &v, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- v = wait_for_job_vol(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
-
- if( v ) {
-
- rc = lsm_pool_delete(c, test_pool, &job, LSM_FLAG_RSVD);
-
- fail_unless(LSM_ERR_EXISTS_VOLUME == rc, "rc %d", rc);
-
- if( LSM_ERR_EXISTS_VOLUME == rc ) {
-
- /* Delete the volume and try again */
- rc = lsm_volume_delete(c, v, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- wait_for_job(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
-
- rc = lsm_pool_delete(c, test_pool, &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- wait_for_job(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d", rc);
- }
- }
- }
-
- G(rc, lsm_pool_record_free, test_pool);
- test_pool = NULL;
- G(rc, lsm_volume_record_free, v);
- v = NULL;
- }
-}
-END_TEST
-
-START_TEST(test_pool_create)
-{
- int rc = 0;
- lsm_pool *pool = NULL;
- char *job = NULL;
- lsm_disk **disks = NULL;
- uint32_t num_disks = 0;
- lsm_pool *pool_one = NULL;
- lsm_system *system = get_system(c);
-
- /*
- * Test basic pool create option.
- */
- rc = lsm_pool_create(c, system, "pool_create_unit_test", 1024*1024*1024,
- LSM_POOL_RAID_TYPE_0, LSM_POOL_MEMBER_TYPE_DISK, &pool,
- &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "rc %d which_plugin %d", rc,
- which_plugin);
- }
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
-
- /*
- * Test pool creations from disks
- */
- rc = lsm_disk_list(c, NULL, NULL, &disks, &num_disks, LSM_FLAG_RSVD);
- lsm_disk *disks_to_use[128];
- uint32_t num_disks_to_use = 0;
-
- memset(disks_to_use, 0, sizeof(disks_to_use));
- fail_unless(LSM_ERR_OK == rc, "rc = %d", rc);
- if( LSM_ERR_OK == rc && num_disks ) {
- int i = 0;
-
- /* Python simulator one accepts same type and size */
- lsm_disk_type disk_type = lsm_disk_type_get(disks[num_disks-1]);
- uint64_t size = lsm_disk_number_of_blocks_get(disks[num_disks-1]);
-
- for( i = 0; i < num_disks; ++i ) {
- /* Only include disks of one type */
- if( lsm_disk_type_get(disks[i]) == disk_type &&
- size == lsm_disk_number_of_blocks_get(disks[i])) {
-
- disks_to_use[num_disks_to_use] = disks[i];
- num_disks_to_use += 1;
- }
- }
- }
-
- rc = lsm_pool_create_from_disks(c, system, "pool_create_from_disks",
- disks_to_use, num_disks_to_use,
- LSM_POOL_RAID_TYPE_0, &pool, &job,
- LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc, "lsmPoolCreateFromDisks %d (%s)", rc,
- error(lsm_error_last_get(c)));
- }
-
- G(rc, lsm_disk_record_array_free, disks, num_disks);
- memset(disks_to_use, 0, sizeof(disks_to_use));
-
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
-
- /* Test pool creation from pool */
- {
- if( pool_one ) {
- pool = NULL;
- job = NULL;
-
- rc = lsm_pool_create_from_pool(c, system, "New pool from pool",
- pool_one, 1024*1024*1024, &pool,
- &job, LSM_FLAG_RSVD);
-
- if( LSM_ERR_JOB_STARTED == rc ) {
- pool = wait_for_job_pool(c, &job);
- } else {
- fail_unless(LSM_ERR_OK == rc,
- "lsmPoolCreateFromVolumes %d (%s)",
- rc, error(lsm_error_last_get(c)));
- }
-
- G(rc, lsm_pool_record_free, pool);
- pool = NULL;
- }
- }
-
- if( pool_one ) {
- G(rc, lsm_pool_record_free, pool_one);
- pool_one = NULL;
- }
-
- if( system ) {
- G(rc, lsm_system_record_free, system);
- system = NULL;
- }
-}
-END_TEST
-
START_TEST(test_uri_parse)
{
const char uri_g[] = "sim://***@host:123/path/?namespace=root/uber";
@@ -2877,8 +2669,6 @@ Suite * lsm_suite(void)
tcase_add_test(basic, test_search_volumes);
tcase_add_test(basic, test_search_pools);
- tcase_add_test(basic, test_pool_delete);
- tcase_add_test(basic, test_pool_create);
tcase_add_test(basic, test_uri_parse);
tcase_add_test(basic, test_error_reporting);
diff --git a/tools/lsmcli/cmdline.py b/tools/lsmcli/cmdline.py
index 8d65abf..c7f9862 100644
--- a/tools/lsmcli/cmdline.py
+++ b/tools/lsmcli/cmdline.py
@@ -39,7 +39,6 @@ from lsm import (Client, Pool, VERSION, LsmError, Disk,
from lsm.lsmcli.data_display import (
DisplayData, PlugData, out,
- pool_raid_type_str_to_type, pool_member_type_str_to_type,
vol_provision_str_to_type, vol_rep_type_str_to_type,
ag_init_type_str_to_lsm)
@@ -133,30 +132,6 @@ replicate_help = "replication type: " + ", ".join(replicate_types)
size_help = 'Can use B, KiB, MiB, GiB, TiB, PiB postfix (IEC sizing)'
-member_types = ('DISK', 'VOLUME', 'POOL', 'DISK_ATA', 'DISK_SATA',
- 'DISK_SAS', 'DISK_FC', 'DISK_SOP', 'DISK_SCSI', 'DISK_NL_SAS',
- 'DISK_HDD', 'DISK_SSD', 'DISK_HYBRID')
-
-member_types_formatted = ''
-for i in range(0, len(member_types), 4):
- member_types_formatted += "\n "
- for member_type_str in member_types[i:i + 4]:
- member_types_formatted += "%-15s" % member_type_str
-
-member_help = "Valid member type: " + member_types_formatted
-
-raid_types = ('JBOD', 'RAID0', 'RAID1', 'RAID3', 'RAID4', 'RAID5', 'RAID6',
- 'RAID10', 'RAID50', 'RAID60', 'RAID51', 'RAID61',
- 'NOT_APPLICABLE')
-
-raid_types_formatted = ''
-for i in range(0, len(raid_types), 4):
- raid_types_formatted += "\n "
- for raid_type_str in raid_types[i:i + 4]:
- raid_types_formatted += "%-15s" % raid_type_str
-
-raid_help = "Valid RAID type:" + raid_types_formatted
-
sys_id_opt = dict(name='--sys', metavar='<SYS_ID>', help='System ID')
sys_id_filter_opt = sys_id_opt.copy()
sys_id_filter_opt['help'] = 'Search by System ID'
@@ -603,66 +578,6 @@ cmds = (
],
),
- dict(
- name='pool-create',
- help='Creates a storage pool',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(size_opt),
- ],
- optional=[
- dict(name="--raid-type", metavar='<RAID_TYPE>',
- help=raid_help,
- choices=raid_types,
- type=str.upper),
- dict(name="--member-type", metavar='<MEMBER_TYPE>',
- help=member_help,
- choices=member_types),
- ],
- ),
-
- dict(
- name='pool-create-from-disks',
- help='Creates a storage pool from disks',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(name="--member-id", metavar='<MEMBER_ID>',
- help='The ID of disks to create new pool\n'
- 'This is a repeatable argument',
- action='append'),
- dict(name="--raid-type", metavar='<RAID_TYPE>',
- help=raid_help,
- choices=raid_types,
- type=str.upper),
- ],
- ),
-
- dict(
- name='pool-create-from-pool',
- help='Creates a sub-pool from another storage pool',
- args=[
- dict(sys_id_opt),
- dict(name="--name", metavar="<POOL_NAME>",
- help="Human friendly name for new pool"),
- dict(name="--member-id", metavar='<POOL_ID>',
- help='The ID of pool to create new pool from\n',
- action='append'),
- dict(name="--size", metavar='<SIZE>',
- help='The size of new pool'),
- ],
- ),
-
- dict(
- name='pool-delete',
- help='Deletes a storage pool',
- args=[
- dict(pool_id_opt),
- ],
- ),
)
aliases = (
@@ -1389,95 +1304,6 @@ class CmdLine:
args.file),
None)
- ## Deletes a pool
- def pool_delete(self, args):
- pool = _get_item(self.c.pools(), args.pool, "pool id")
- if self.confirm_prompt(True):
- self._wait_for_it("pool-delete",
- self.c.pool_delete(pool),
- None)
-
- ## Creates a pool
- def pool_create(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- pool_name = args.name
- raid_type = Pool.RAID_TYPE_UNKNOWN
- member_type = Pool.MEMBER_TYPE_UNKNOWN
- size_bytes = self._size(self.args.size)
-
- if args.raid_type:
- raid_type = pool_raid_type_str_to_type(
- self.args.raid_type)
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- raise ArgError("Unknown RAID type specified: %s" %
- args.raid_type)
-
- if args.member_type:
- member_type = pool_member_type_str_to_type(
- args.member_type)
- if member_type == Pool.MEMBER_TYPE_UNKNOWN:
- raise ArgError("Unknown member type specified: %s" %
- args.member_type)
-
- pool = self._wait_for_it("pool-create",
- *self.c.pool_create(system,
- pool_name,
- size_bytes,
- raid_type,
- member_type,
- 0))
- self.display_data([pool])
-
- def pool_create_from_disks(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- if len(args.member_id) <= 0:
- raise ArgError("No disk ID was provided for new pool")
-
- member_ids = args.member_id
- disks_to_use = []
- disks = self.c.disks()
- disk_ids = dict((x.id, x) for x in disks)
- for member_id in member_ids:
- if member_id not in disk_ids:
- raise ArgError("Invalid Disk ID specified in " +
- "--member-id %s " % member_id)
- else:
- disks_to_use.append(disk_ids[member_id])
-
- raid_type = pool_raid_type_str_to_type(self.args.raid_type)
- if raid_type == Pool.RAID_TYPE_UNKNOWN:
- raise ArgError("Unknown RAID type specified: %s" %
- self.args.raid_type)
-
- pool_name = args.name
- pool = self._wait_for_it(
- "pool-create-from-disks",
- *self.c.pool_create_from_disks(
- system, pool_name, disks_to_use, raid_type, 0))
- self.display_data([pool])
-
- def pool_create_from_pool(self, args):
- system = _get_item(self.c.systems(), args.sys, "system id")
- if len(args.member_id) <= 0:
- raise ArgError("No volume ID was provided for new pool")
-
- member_ids = args.member_id
- if len(member_ids) > 1:
- raise ArgError("Two or more member defined, but creating pool " +
- "from pool only allow one member pool")
-
- member_id = member_ids[0]
- pool = _get_item(self.c.pools(), member_id, "pool id")
-
- size_bytes = self._size(self.args.size)
-
- pool_name = args.name
- pool = self._wait_for_it(
- "pool-create-from-pool",
- *self.c.pool_create_from_pool(
- system, pool_name, pool, size_bytes, 0))
- self.display_data([pool])
-
def _read_configfile(self):
"""
Set uri from config file. Will be overridden by cmdline option or
diff --git a/tools/lsmcli/data_display.py b/tools/lsmcli/data_display.py
index 9a18cd5..3c7a83d 100644
--- a/tools/lsmcli/data_display.py
+++ b/tools/lsmcli/data_display.py
@@ -131,81 +131,6 @@ def pool_element_type_to_str(element_type):
return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV)
-_POOL_RAID_TYPE_CONV = {
- Pool.RAID_TYPE_RAID0: 'RAID0', # stripe
- Pool.RAID_TYPE_RAID1: 'RAID1', # mirror
- Pool.RAID_TYPE_RAID3: 'RAID3', # byte-level striping with dedicated
- # parity
- Pool.RAID_TYPE_RAID4: 'RAID4', # block-level striping with dedicated
- # parity
- Pool.RAID_TYPE_RAID5: 'RAID5', # block-level striping with distributed
- # parity
- Pool.RAID_TYPE_RAID6: 'RAID6', # AKA, RAID-DP.
- Pool.RAID_TYPE_RAID10: 'RAID10', # stripe of mirrors
- Pool.RAID_TYPE_RAID15: 'RAID15', # parity of mirrors
- Pool.RAID_TYPE_RAID16: 'RAID16', # dual parity of mirrors
- Pool.RAID_TYPE_RAID50: 'RAID50', # stripe of parities
- Pool.RAID_TYPE_RAID60: 'RAID60', # stripe of dual parities
- Pool.RAID_TYPE_RAID51: 'RAID51', # mirror of parities
- Pool.RAID_TYPE_RAID61: 'RAID61', # mirror of dual parities
- Pool.RAID_TYPE_JBOD: 'JBOD', # Just Bunch of Disks
- Pool.RAID_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.RAID_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
- Pool.RAID_TYPE_MIXED: 'MIXED', # a Pool are having 2+ RAID groups with
- # different RAID type
-}
-
-
-def pool_raid_type_to_str(raid_type):
- return _enum_type_to_str(raid_type, _POOL_RAID_TYPE_CONV)
-
-
-def pool_raid_type_str_to_type(raid_type_str):
- return _str_to_enum(raid_type_str, _POOL_RAID_TYPE_CONV)
-
-
-_POOL_MEMBER_TYPE_CONV = {
- Pool.MEMBER_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.MEMBER_TYPE_DISK: 'DISK', # Pool was created from Disk(s).
- Pool.MEMBER_TYPE_DISK_MIX: 'DISK_MIX', # Has two or more types of disks.
- Pool.MEMBER_TYPE_DISK_ATA: 'DISK_ATA',
- Pool.MEMBER_TYPE_DISK_SATA: 'DISK_SATA',
- Pool.MEMBER_TYPE_DISK_SAS: 'DISK_SAS',
- Pool.MEMBER_TYPE_DISK_FC: 'DISK_FC',
- Pool.MEMBER_TYPE_DISK_SOP: 'DISK_SOP',
- Pool.MEMBER_TYPE_DISK_SCSI: 'DISK_SCSI',
- Pool.MEMBER_TYPE_DISK_NL_SAS: 'DISK_NL_SAS',
- Pool.MEMBER_TYPE_DISK_HDD: 'DISK_HDD',
- Pool.MEMBER_TYPE_DISK_SSD: 'DISK_SSD',
- Pool.MEMBER_TYPE_DISK_HYBRID: 'DISK_HYBRID',
- Pool.MEMBER_TYPE_POOL: 'POOL', # Pool was created from other Pool(s).
-}
-
-
-def pool_member_type_to_str(member_type):
- return _enum_type_to_str(member_type, _POOL_MEMBER_TYPE_CONV)
-
-
-def pool_member_type_str_to_type(member_type_str):
- return _str_to_enum(member_type_str, _POOL_MEMBER_TYPE_CONV)
-
-
-_POOL_THINP_TYPE_CONV = {
- Pool.THINP_TYPE_UNKNOWN: 'UNKNOWN',
- Pool.THINP_TYPE_THIN: 'THIN',
- Pool.THINP_TYPE_THICK: 'THICK',
- Pool.THINP_TYPE_NOT_APPLICABLE: 'NOT_APPLICABLE',
-}
-
-
-def pool_thinp_type_to_str(thinp_type):
- return _enum_type_to_str(thinp_type, _POOL_THINP_TYPE_CONV)
-
-
-def pool_thinp_type_str_to_type(thinp_type_str):
- return _str_to_enum(thinp_type_str, _POOL_THINP_TYPE_CONV)
-
-
_VOL_STATUS_CONV = {
Volume.STATUS_UNKNOWN: 'Unknown',
Volume.STATUS_OK: 'OK',
@@ -393,9 +318,6 @@ class DisplayData(object):
POOL_VALUE_CONV_ENUM = {
'status': pool_status_to_str,
- 'raid_type': pool_raid_type_to_str,
- 'member_type': pool_member_type_to_str,
- 'thinp_type': pool_thinp_type_to_str,
'element_type': pool_element_type_to_str,
}
--
1.8.3.1
1.8.3.1