Discussion:
[Libstoragemgmt-devel] [PATCH 0/7] Add pool create and delete support to NetApp ONTAP plugin
Gris Ge
2014-03-25 03:05:51 UTC
Permalink
* Only tested by NetApp ONTAP simulator 8.1.1.

TODO:
* Extend the error reporting on:
1. NetApp volume still exists when deleting aggregate.
2. NetApp LUN still exists when deleting volume.
3. Duplicate name for NetApp volume or aggregate when creating.

Gris Ge (7):
na.py: allowing deleting offlineed NetApp volume
na.py: allowing creating NetApp volume when no NFS license
na.py: Allowing list offlined NetApp aggregate
ontap.py: improve disk_type detection
ontap.py: more strict NetApp volume name parsing
ontap.py: trivial clean up for raid type detection
ontap.py: Add support of pool_create() and pool_delete()

lsm/lsm/na.py | 76 +++++++++-
lsm/lsm/ontap.py | 439 +++++++++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 481 insertions(+), 34 deletions(-)
--
1.8.3.1
Gris Ge
2014-03-25 03:05:52 UTC
Permalink
* Continue delete the volume if already offlined

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/na.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/lsm/lsm/na.py b/lsm/lsm/na.py
index cfa545f..0422488 100644
--- a/lsm/lsm/na.py
+++ b/lsm/lsm/na.py
@@ -170,6 +170,7 @@ class Filer(object):
EAPILICENSE = 13008 # Unlicensed API
EFSDOESNOTEXIST = 13040 # FS does not exist
EFSOFFLINE = 13042 # FS is offline.
+ EVOLUMEOFFLINE = 13042 # The volume is offline.
EFSNAMEINVALID = 13044 # FS Name invalid
ESERVICENOTLICENSED = 13902 # Not licensed
ECLONE_LICENSE_EXPIRED = 14955 # Not licensed
@@ -380,7 +381,8 @@ class Filer(object):
self._invoke('volume-offline', {'name': vol_name})
online = True
except FilerError as fe:
- if fe.errno != Filer.EFSDOESNOTEXIST:
+ if fe.errno != Filer.EFSDOESNOTEXIST and \
+ fe.errno != Filer.EVOLUMEOFFLINE:
raise fe

try:
--
1.8.3.1
Gris Ge
2014-03-25 03:05:53 UTC
Permalink
* Ignore error of no NFS license when creating NetApp volume.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/na.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/lsm/lsm/na.py b/lsm/lsm/na.py
index 0422488..e490f5e 100644
--- a/lsm/lsm/na.py
+++ b/lsm/lsm/na.py
@@ -359,7 +359,13 @@ class Filer(object):
'option-value': 'on', })

#Turn off auto export!
- self.nfs_export_remove(['/vol/' + vol_name])
+ try:
+ self.nfs_export_remove(['/vol/' + vol_name])
+ except FilerError as fe:
+ # Ignore error of no NFS license
+ if fe.errno != Filer.EAPILICENSE:
+ raise fe
+

def volume_clone(self, src_volume, dest_volume, snapshot=None):
"""
--
1.8.3.1
Gris Ge
2014-03-25 03:05:54 UTC
Permalink
* Allowing list the offlined NetApp aggregate(LSM Pool).
* Allowing query aggregate giving the aggregate name.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/na.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/lsm/lsm/na.py b/lsm/lsm/na.py
index e490f5e..5927584 100644
--- a/lsm/lsm/na.py
+++ b/lsm/lsm/na.py
@@ -208,13 +208,15 @@ class Filer(object):
disks = self._invoke('disk-list-info')
return disks['disk-details']['disk-detail-info']

- def aggregates(self):
+ def aggregates(self, aggr_name=None):
"""
Return a list of aggregates
"""
- pools = self._invoke('aggr-list-info')
- tmp = pools['aggregates']['aggr-info']
- return [p for p in to_list(tmp) if p['mount-state'] == 'online']
+ if aggr_name is None:
+ pools = self._invoke('aggr-list-info')
+ else:
+ pools = self._invoke('aggr-list-info', {'aggregate': aggr_name})
+ return to_list(pools['aggregates']['aggr-info'])

def aggregate_volume_names(self, aggr_name):
"""
--
1.8.3.1
Gris Ge
2014-03-25 03:05:55 UTC
Permalink
* Base on NetApp 'Physical Storage Management Guide' document for disk
type converting.
* Using 'effective-disk-type' for disk type detection. NetApp allowing
disks with the same 'effective-disk-type' to create aggregate.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/ontap.py | 37 +++++++++++++++++++++++++------------
1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/lsm/lsm/ontap.py b/lsm/lsm/ontap.py
index d52b3e4..86a0577 100644
--- a/lsm/lsm/ontap.py
+++ b/lsm/lsm/ontap.py
@@ -144,6 +144,23 @@ class Ontap(IStorageAreaNetwork, INfs):
'restricted': 'volume is restricted to protocol accesses',
}

+ NA_DISK_TYPE_TO_LSM = {
+ 'ATA': Disk.DISK_TYPE_ATA,
+ 'BSAS': Disk.DISK_TYPE_SATA,
+ 'EATA': Disk.DISK_TYPE_ATA,
+ 'FCAL': Disk.DISK_TYPE_FC,
+ 'FSAS': Disk.DISK_TYPE_NL_SAS,
+ 'LUN': Disk.DISK_TYPE_OTHER,
+ 'MSATA': Disk.DISK_TYPE_SATA,
+ 'SAS': Disk.DISK_TYPE_SAS,
+ 'SATA': Disk.DISK_TYPE_SATA,
+ 'SCSI': Disk.DISK_TYPE_SCSI,
+ 'SSD': Disk.DISK_TYPE_SSD,
+ 'XATA': Disk.DISK_TYPE_ATA,
+ 'XSAS': Disk.DISK_TYPE_SAS,
+ 'unknown': Disk.DISK_TYPE_UNKNOWN,
+ }
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -211,17 +228,13 @@ class Ontap(IStorageAreaNetwork, INfs):
s['access-time'])

@staticmethod
- def _disk_type(netapp_disk_type):
- conv = {'ATA': Disk.DISK_TYPE_ATA, 'BSAS': Disk.DISK_TYPE_SAS,
- 'EATA': Disk.DISK_TYPE_ATA, 'FCAL': Disk.DISK_TYPE_FC,
- 'FSAS': Disk.DISK_TYPE_SAS, 'LUN': Disk.DISK_TYPE_OTHER,
- 'SAS': Disk.DISK_TYPE_SAS, 'SATA': Disk.DISK_TYPE_SATA,
- 'SCSI': Disk.DISK_TYPE_SCSI, 'SSD': Disk.DISK_TYPE_SSD,
- 'XATA': Disk.DISK_TYPE_ATA, 'XSAS': Disk.DISK_TYPE_SAS,
- 'unknown': Disk.DISK_TYPE_UNKNOWN}
-
- if netapp_disk_type in conv:
- return conv[netapp_disk_type]
+ def _disk_type_of(na_disk):
+ """
+ Convert na_disk['effective-disk-type'] to LSM disk type.
+ """
+ na_disk_type = na_disk['effective-disk-type']
+ if na_disk_type in Ontap.NA_DISK_TYPE_TO_LSM.keys():
+ return Ontap.NA_DISK_TYPE_TO_LSM[na_disk_type]
return Disk.DISK_TYPE_UNKNOWN

@staticmethod
@@ -287,7 +300,7 @@ class Ontap(IStorageAreaNetwork, INfs):

return Disk(self._disk_id(d),
d['name'],
- Ontap._disk_type(d['disk-type']),
+ Ontap._disk_type_of(d),
int(d['bytes-per-sector']),
int(d['physical-blocks']),
status,
--
1.8.3.1
Gris Ge
2014-03-25 03:05:56 UTC
Permalink
* Return None if NetApp volume name is not valid. Useful for checking whether
defined a name if aggregate name or NetApp volume name.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/ontap.py | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/lsm/lsm/ontap.py b/lsm/lsm/ontap.py
index 86a0577..436436e 100644
--- a/lsm/lsm/ontap.py
+++ b/lsm/lsm/ontap.py
@@ -405,8 +405,15 @@ class Ontap(IStorageAreaNetwork, INfs):
return "%s/%s" % (Ontap.VOLUME_PREFIX, na_vol['name'])

@staticmethod
- def _na_vol_name_of_pool(pool):
- return pool.name.split('/')[-1]
+ def _na_vol_name_of_pool(pool_name):
+ """
+ Convert "/vol/vol_name" to "vol_name".
+ Return None if not a valid NetApp volume name format.
+ """
+ tmp_list = pool_name.split('/')
+ if len(tmp_list) >= 3:
+ return tmp_list[-1]
+ return None

def _pool_from_na_vol(self, na_vol, na_aggrs, flags):
pool_id = self._pool_id(na_vol)
@@ -593,7 +600,7 @@ class Ontap(IStorageAreaNetwork, INfs):
na_vol_name = ''
if pool.name not in na_aggr_names:
# user defined a NetApp volume.
- na_vol_name = self._na_vol_name_of_pool(pool)
+ na_vol_name = self._na_vol_name_of_pool(pool.name)
else:
# user defined a NetApp Aggregate
v = self.f.volume_names()
--
1.8.3.1
Gris Ge
2014-03-25 03:05:57 UTC
Permalink
* Just move value mapping out to constants for better maintenance.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/ontap.py | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/lsm/lsm/ontap.py b/lsm/lsm/ontap.py
index 436436e..6877f2e 100644
--- a/lsm/lsm/ontap.py
+++ b/lsm/lsm/ontap.py
@@ -161,6 +161,13 @@ class Ontap(IStorageAreaNetwork, INfs):
'unknown': Disk.DISK_TYPE_UNKNOWN,
}

+ NA_RAID_TYPE_TO_LSM = {
+ 'raid0': Pool.RAID_TYPE_RAID0,
+ 'raid4': Pool.RAID_TYPE_RAID4,
+ 'raid_dp': Pool.RAID_TYPE_RAID6,
+ 'mixed_raid_type': Pool.RAID_TYPE_MIXED,
+ }
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -321,14 +328,9 @@ class Ontap(IStorageAreaNetwork, INfs):
@staticmethod
def _raid_type_of_na_aggr(na_aggr):
na_raid_statuses = na_aggr['raid-status'].split(',')
- if 'raid0' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID0
- if 'raid4' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID4
- if 'raid_dp' in na_raid_statuses:
- return Pool.RAID_TYPE_RAID6
- if 'mixed_raid_type' in na_raid_statuses:
- return Pool.RAID_TYPE_MIXED
+ for na_raid_type in Ontap.NA_RAID_TYPE_TO_LSM.keys():
+ if na_raid_type in na_raid_statuses:
+ return Ontap.NA_RAID_TYPE_TO_LSM[na_raid_type]
return Pool.RAID_TYPE_UNKNOWN

@staticmethod
--
1.8.3.1
Gris Ge
2014-03-25 03:05:58 UTC
Permalink
* Support creating these two type of pool:
NetApp Aggregate -- LSM Disk Pool
NetApp Volume -- LSM Sub Pool
* Will fail if LUN still exists in pool.
* Support pool_delete()
* Tested in NetApp ONTAP simulator 8.1.1.
* PEP8 passed.

Signed-off-by: Gris Ge <***@redhat.com>
---
lsm/lsm/na.py | 54 ++++++++
lsm/lsm/ontap.py | 371 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 420 insertions(+), 5 deletions(-)

diff --git a/lsm/lsm/na.py b/lsm/lsm/na.py
index 5927584..2a37a41 100644
--- a/lsm/lsm/na.py
+++ b/lsm/lsm/na.py
@@ -171,11 +171,14 @@ class Filer(object):
EFSDOESNOTEXIST = 13040 # FS does not exist
EFSOFFLINE = 13042 # FS is offline.
EVOLUMEOFFLINE = 13042 # The volume is offline.
+ EAGGROFFLINE = 13042 # The aggregate is offline.
EFSNAMEINVALID = 13044 # FS Name invalid
ESERVICENOTLICENSED = 13902 # Not licensed
ECLONE_LICENSE_EXPIRED = 14955 # Not licensed
ECLONE_NOT_LICENSED = 14956 # Not licensed

+ POOL_CREATE_NAME_ALLOWED_REGX = r"^[a-zA-Z0-9_]+$"
+
def _invoke(self, command, parameters=None):

rc = netapp_filer(self.host, self.username, self.password,
@@ -232,6 +235,57 @@ class Filer(object):
vol_names = [e['name'] for e in to_list(vols)]
return vol_names

+ def aggregate_create(self, aggr_name, na_raid_type, na_disks):
+ """
+ Taking in_paras to create new aggregate.
+ in_paras = {
+ "aggregate": aggr_name,
+ "disks": {
+ 'disk-info': [
+ {'name': na_disk1['name']},
+ {'name': na_disk2['name']},
+ ]
+ },
+ "raid-type": na_raid_type,
+ }
+ Will raise FilerError if error found.
+ """
+ in_paras = {
+ 'aggregate': aggr_name,
+ 'raid-type': na_raid_type,
+ 'disks': [],
+ }
+ for na_disk in na_disks:
+ disk_info_dict = {'disk-info': {'name': na_disk['name']}}
+ in_paras['disks'].extend([disk_info_dict])
+
+ self._invoke("aggr-create", in_paras)
+
+ def aggregate_delete(self, aggr_name):
+ """
+ Delete a aggregate
+ """
+ online = False
+
+ try:
+ self._invoke('aggr-offline', {'aggregate': aggr_name})
+ online = True
+ except FilerError as fe:
+ if fe.errno != Filer.EFSDOESNOTEXIST and \
+ fe.errno != Filer.EAGGROFFLINE:
+ raise fe
+
+ try:
+ self._invoke('aggr-destroy', {'aggregate': aggr_name})
+ except FilerError as fe:
+ #If the volume was online, we will return it to same status
+ if online:
+ try:
+ self._invoke('aggr-online', {'aggregate': vol_name})
+ except FilerError:
+ pass
+ raise fe
+
def lun_build_name(self, volume_name, file_name):
"""
Given a volume name and file return full path"
diff --git a/lsm/lsm/ontap.py b/lsm/lsm/ontap.py
index 6877f2e..002020e 100644
--- a/lsm/lsm/ontap.py
+++ b/lsm/lsm/ontap.py
@@ -21,12 +21,13 @@ import traceback
import urllib2
import urlparse
import sys
+import re

import na
from lsm import (Volume, Initiator, FileSystem, Snapshot, NfsExport,
AccessGroup, System, Capabilities, Disk, Pool, OptionalData,
IStorageAreaNetwork, INfs, LsmError, ErrorNumber, JobStatus,
- md5, Error, VERSION)
+ md5, Error, VERSION, common)

#Maps na to lsm, this is expected to expand over time.
e_map = {
@@ -84,6 +85,8 @@ class Ontap(IStorageAreaNetwork, INfs):
(LSM_VOL_PREFIX, LSM_INIT_PREFIX) = ('lsm_lun_container', 'lsm_init_')

(SS_JOB, SPLIT_JOB) = ('ontap-ss-file-restore', 'ontap-clone-split')
+ AGGR_CREATE_JOB = 'aggr_create'
+ NA_VOL_CREATE_JOB = 'na_vol_create'

VOLUME_PREFIX = '/vol'

@@ -168,6 +171,10 @@ class Ontap(IStorageAreaNetwork, INfs):
'mixed_raid_type': Pool.RAID_TYPE_MIXED,
}

+ NA_POOL_CREATE_DEFAULT_RAID_TYPE = Pool.RAID_TYPE_RAID4
+
+ NA_AGGR_SYSTEM = 'aggr0'
+
def __init__(self):
self.f = None
self.sys_info = None
@@ -313,6 +320,12 @@ class Ontap(IStorageAreaNetwork, INfs):
status,
self.sys_info.id, opt_data)

+ @staticmethod
+ def _disk_total_space_of(na_disk):
+ return int(
+ int(na_disk['physical-blocks']) *
+ int(na_disk['bytes-per-sector']))
+
@handle_ontap_errors
def volumes(self, flags=0):
luns = self.f.luns_get_all()
@@ -353,11 +366,17 @@ class Ontap(IStorageAreaNetwork, INfs):
return Ontap.NA_AGGR_STATUS_TO_LSM_STATUS_INFO[na_aggr_state]
return ''

- def _pool_from_na_aggr(self, na_aggr, na_disks, flags):
+ @staticmethod
+ def _free_space_of_na_aggr(na_aggr):
+ return int(na_aggr['size-available'])
+
+ def _pool_from_na_aggr(self, na_aggr, na_disks=None, flags=None):
+ if na_disks is None:
+ na_disks = self.f.disks()
pool_id = self._pool_id(na_aggr)
pool_name = na_aggr['name']
total_space = int(na_aggr['size-total'])
- free_space = int(na_aggr['size-available'])
+ free_space = Ontap._free_space_of_na_aggr(na_aggr)
system_id = self.sys_info.id
status = self._status_of_na_aggr(na_aggr)
opt_data = OptionalData()
@@ -381,7 +400,7 @@ class Ontap(IStorageAreaNetwork, INfs):
Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME)
- if pool_name == 'aggr0':
+ if pool_name == Ontap.NA_AGGR_SYSTEM:
element_type = element_type | Pool.ELEMENT_TYPE_SYS_RESERVED
opt_data.set('element_type', element_type)

@@ -417,7 +436,9 @@ class Ontap(IStorageAreaNetwork, INfs):
return tmp_list[-1]
return None

- def _pool_from_na_vol(self, na_vol, na_aggrs, flags):
+ def _pool_from_na_vol(self, na_vol, na_aggrs=None, flags=None):
+ if na_aggrs is None:
+ na_aggrs = self.f.aggregates()
pool_id = self._pool_id(na_vol)
pool_name = self._pool_name_of_na_vol(na_vol)
total_space = int(na_vol['size-total'])
@@ -493,6 +514,16 @@ class Ontap(IStorageAreaNetwork, INfs):
cap.set(Capabilities.EXPORT_FS)
cap.set(Capabilities.EXPORT_REMOVE)
cap.set(Capabilities.EXPORT_CUSTOM_PATH)
+ # Pool creation
+ cap.set(Capabilities.POOL_CREATE)
+ cap.set(Capabilities.POOL_CREATE_MEMBER_TYPE_DISK)
+ cap.set(Capabilities.POOL_CREATE_MEMBER_TYPE_POOL)
+ # When changing POOL_CREATE_DISK_RAID_XXX, please change
+ # _na_aggr_create() also.
+ cap.set(Capabilities.POOL_CREATE_DISK_RAID_4)
+ cap.set(Capabilities.POOL_CREATE_DISK_RAID_6)
+ # Pool deletion
+ cap.set(Capabilities.POOL_DELETE)
return cap

@handle_ontap_errors
@@ -521,6 +552,332 @@ class Ontap(IStorageAreaNetwork, INfs):
return pools

@handle_ontap_errors
+ def pool_create(self, system_id, pool_name, size_bytes,
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ member_type=Pool.MEMBER_TYPE_UNKNOWN, flags=0):
+ """
+ If member_type is Pool.MEMBER_TYPE_DISK_XXX, we creat aggregate.
+ If member_type is Pool.MEMBER_TYPE_POOL, we creat NetApp volume.
+ If member_type is Pool.MEMBER_TYPE_VOLUME, no support.
+ If member_type is Pool.MEMBER_TYPE_UNKNOWN, we try in this sequence:
+ 1. Creating aggregate using autochoosing free disks.
+ 2. Creating NetApp volume using non-system aggregate.
+ 3. Creating NetApp volume using system aggregate.
+ """
+ if system_id != self.sys_info.id:
+ raise LsmError(ErrorNumber.INVALID_SYSTEM,
+ "Invalid system %s" % system_id)
+
+ if not re.match(na.Filer.POOL_CREATE_NAME_ALLOWED_REGX, pool_name):
+ raise LsmError(ErrorNumber.INVALID_NAME,
+ "Invalid pool name, only a-z, A-Z, 0-9 and - are"
+ "allowed")
+ raise_error = True
+ job_id = None
+ if Pool.member_type_is_disk(member_type):
+ job_id = self._na_aggr_create(
+ pool_name, size_bytes, raid_type, member_type, raise_error)[0]
+ elif member_type == Pool.MEMBER_TYPE_POOL:
+ if raid_type != Pool.RAID_TYPE_UNKNOWN and \
+ raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "Current plugin does not support create " +
+ "ONTAP traditional volume(RAIDed volume). " +
+ "Requested MEMBER_TYPE_POOL and raid %s(%d)." %
+ (Pool.raid_type_to_str(raid_type), raid_type))
+ job_id = self._na_vol_create(
+ pool_name, size_bytes, raise_error)[0]
+
+ elif member_type == Pool.MEMBER_TYPE_VOLUME:
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "NetApp ONTAP does not support creating pool "
+ "on LSM volumes(LUN)")
+ elif member_type == Pool.MEMBER_TYPE_UNKNOWN:
+ raise_error = False
+ if raid_type != Pool.RAID_TYPE_UNKNOWN and \
+ raid_type != Pool.RAID_TYPE_NOT_APPLICABLE:
+ # We can only create aggregate if RAID type defined.
+ raise_error = True
+ (job_id, errno, reason) = self._na_aggr_create(
+ pool_name, size_bytes, raid_type, member_type, raise_error)
+
+ if not job_id:
+ (job_id, errno2, reason2) = self._na_vol_create(
+ pool_name, size_bytes, raise_error)
+
+ if not job_id:
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Failed to create LSM pool via NetApp " +
+ "aggregate: error code(%d), reason: %s\n" %
+ (errno, reason) +
+ "Failed to create LSM pool via NetApp " +
+ "volume: error code(%d), reason: %s" %
+ (errno2, reason2))
+ if job_id:
+ return (job_id, None)
+
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "NetApp ONTAP does not support pool member_type: "
+ "%s(%d)" %
+ (Pool.member_type_to_str(member_type), member_type))
+
+ def _free_disks_list(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
+ """
+ Return a list of na_disk which is not used by any aggregate.
+ """
+ # In NetApp ONTAP simulator 8.1.1, na_disk['aggregate'] might poiting
+ # to non-exist aggregate. Here we get all existing aggregates.
+ na_aggrs = self.f.aggregates()
+ na_aggr_names = [na_aggr['name'] for na_aggr in na_aggrs]
+ na_disks = self.f.disks()
+ rc = []
+ for na_disk in na_disks:
+ if disk_type != Disk.DISK_TYPE_UNKNOWN and \
+ disk_type != self._disk_type_of(na_disk):
+ continue
+ if 'aggregate' not in na_disk or \
+ na_disk['aggregate'] not in na_aggr_names:
+ rc.extend([na_disk])
+ return rc
+
+ def _free_disks(self, disk_type=Disk.DISK_TYPE_UNKNOWN):
+ """
+ Usage:
+ Return a structure like this:
+ {
+ na_disk['effective-disk-type'] = {
+ Disk.total_space = [ na_disk, ],
+ },
+ }
+ When disk_type define, will only contain defined type of disk
+ in return structure.
+ Parameter:
+ disk_type # Disk.DISK_TYPE_XXX
+ Returns:
+ free_disks_t # Check above
+ or
+ dict() # Nothing found
+ Exceptions:
+ N/A
+ """
+ rc = dict()
+ free_na_disks = self._free_disks_list(disk_type)
+ if len(free_na_disks) == 0:
+ return rc
+ for na_disk in free_na_disks:
+ na_disk_type = na_disk['effective-disk-type']
+ disk_size = Ontap._disk_total_space_of(na_disk)
+ if disk_size == 0:
+ continue
+ if na_disk_type not in rc.keys():
+ rc[na_disk_type] = dict()
+ if disk_size not in rc[na_disk_type].keys():
+ rc[na_disk_type][disk_size] = []
+ rc[na_disk_type][disk_size].extend([na_disk])
+ return rc
+
+ def _auto_choose_disks(self, na_disks_t, size_bytes, raid_type):
+ """
+ Automatically choose the disks for given size_bytes, raid_type,
+ disk_type from na_disks_t.
+ The 'na_disks_t' was generated by self._free_disks(disk_type)
+ The 'raid_type' cannot be RAID_TYPE_UNKNOWN
+ """
+ for na_disk_type in na_disks_t.keys():
+ for disk_size in na_disks_t[na_disk_type].keys():
+ cur_na_disks = na_disks_t[na_disk_type][disk_size]
+ if len(cur_na_disks) < 2:
+ continue
+ for member_count in range(2, len(cur_na_disks) + 1):
+ raid_size = Ontap._size_of_raid(
+ disk_size, member_count, raid_type)
+ if raid_size >= size_bytes:
+ return cur_na_disks[0:member_count]
+ return []
+
+ def _auto_choose_aggr(self, size_bytes):
+ """
+ Automatically choose the na_aggr for given size_bytes.
+ The system aggregate -- aggr0 will be the last choice.
+ Return na_aggr if found.
+ Return None if not found
+ """
+ na_aggrs = self.f.aggregates()
+ sys_na_aggr = None
+ for na_aggr in na_aggrs:
+ if na_aggr['name'] == Ontap.NA_AGGR_SYSTEM:
+ sys_na_aggr = na_aggr
+ continue
+ if Ontap._free_space_of_na_aggr(na_aggr) >= size_bytes:
+ return na_aggr
+ if Ontap._free_space_of_na_aggr(sys_na_aggr) >= size_bytes:
+ return na_aggr
+ return None
+
+ @staticmethod
+ def _size_of_raid(member_size, member_count, raid_type):
+ """
+ Only check RAID 4 and RAID 6 as that's all ONTAP supported to create
+ aggregate.
+ Return 0 if error:
+ 1. Incorrect count of disks for RAID. RAID 6 need 3+ disks.
+ 2. Unkonwn RAID type.
+ By default, NetApp ONTAP will reserve 15% spaces:
+ 1. WAFL (ONTAP file system) reserve of 10%
+ 2. Aggregate snapshot reserve of 5%
+ """
+ reserve_ratio = 0.15
+ if raid_type == Pool.RAID_TYPE_RAID4:
+ if member_count < 2:
+ return 0
+ return int(member_size * (member_count - 1) * (1 - reserve_ratio))
+
+ elif raid_type == Pool.RAID_TYPE_RAID6:
+ if member_count < 3:
+ return 0
+ return int(member_size * (member_count - 2) * (1 - reserve_ratio))
+ else:
+ return 0
+
+ def _na_aggr_create(self, pool_name, size_bytes, raid_type, member_type,
+ raise_error):
+ """
+ Return (job_id, errno, reason)
+ NetApp ONTAP "aggr-create" require defining a list of disks to use
+ or define a count of disks to use. To meet the LSM API design,
+ we will provide a list of disks auto chose by ourselves.
+ """
+ in_paras = dict()
+ in_paras['aggregate'] = pool_name
+
+ disk_type = Disk.DISK_TYPE_UNKNOWN
+ if member_type != Pool.MEMBER_TYPE_UNKNOWN:
+ disk_type = Pool.member_type_to_disk_type(member_type)
+
+ if raid_type == Pool.RAID_TYPE_UNKNOWN:
+ raid_type = Ontap.NA_POOL_CREATE_DEFAULT_RAID_TYPE
+
+ if raid_type != Pool.RAID_TYPE_RAID4 and \
+ raid_type != Pool.RAID_TYPE_RAID6:
+ errno = ErrorNumber.NO_SUPPORT
+ reason = "NetApp ONTAP does not support RAID type: %s(%d)" % \
+ (Pool.raid_type_to_str(raid_type), raid_type)
+ if not raise_error:
+ return (None, errno, reason)
+ raise LsmError(errno, reason)
+
+ na_disks_t = self._free_disks(disk_type)
+
+ chose_disks = self._auto_choose_disks(na_disks_t, size_bytes,
+ raid_type)
+
+ if len(chose_disks) == 0:
+ errno = ErrorNumber.DISK_BUSY
+ reason = "No enough free disks to create NetApp aggregate " + \
+ "as LSM pool"
+ if not raise_error:
+ return (None, errno, reason)
+ raise LsmError(errno, reason)
+
+ chose_na_raid_type = None
+ for na_raid_type in Ontap.NA_RAID_TYPE_TO_LSM.keys():
+ if Ontap.NA_RAID_TYPE_TO_LSM[na_raid_type] == raid_type:
+ chose_na_raid_type = na_raid_type
+ break
+ # We already checked RAID type, we will surely got chose_na_raid_type
+ # defined.
+
+ try:
+ na_aggr = self.f.aggregate_create(
+ pool_name, chose_na_raid_type, chose_disks)
+ except na.FilerError as fe:
+ if raise_error:
+ raise LsmError(ErrorNumber.PLUGIN_ERROR,
+ "Failed to create NetApp aggregate with vendor "
+ "error number %d and message: %s" %
+ (fe.errno, fe.reason))
+ else:
+ return (None, fe.errno, fe.reason)
+
+ return ("%s@%s" % (Ontap.AGGR_CREATE_JOB, pool_name), None, None)
+
+ def _na_vol_create(self, pool_name, size_bytes, raise_error):
+ """
+ Return (job_id, errno, reason)
+ """
+ na_aggr = self._auto_choose_aggr(size_bytes)
+ if not na_aggr:
+ errno = ErrorNumber.SIZE_INSUFFICIENT_SPACE
+ reason = ("No free aggregate space to create new " +
+ "NetApp volume(LSM Pool) with size: %s(%d)" %
+ (common.size_bytes_2_size_human(size_bytes),
+ size_bytes))
+ if raise_error:
+ raise LsmError(errno, reason)
+ else:
+ return (None, errno, reason)
+ try:
+ na_vol = self.f.volume_create(
+ na_aggr['name'], pool_name, size_bytes)
+ except na.FilerError as fe:
+ if raise_error:
+ raise LsmError(ErrorNumber.PLUGIN_ERROR,
+ "Failed to create NetApp volume with vendor "
+ "error number %d and message: %s" %
+ (fe.errno, fe.reason))
+ else:
+ return (None, fe.errno, fe.reason)
+ return "%s@%s" % (Ontap.NA_VOL_CREATE_JOB, pool_name)
+
+ def _job_status_create_na_aggr(self, aggr_name):
+ na_aggrs = self.f.aggregates(aggr_name)
+ if len(na_aggrs) == 0:
+ raise LsmError(
+ ErrorNumber.INVALID_JOB,
+ "Invalid JOB ID %s: cannot find NetApp aggregate %s" %
+ ("%s@%s" % (Ontap.AGGR_CREATE_JOB, aggr_name),
+ aggr_name))
+ na_aggr = na_aggrs[0]
+ if Ontap._status_of_na_aggr(na_aggr) & Pool.STATUS_STARTING:
+ # NetApp does expose progress of aggregate creating.
+ # We just set to 1%
+ return JobStatus.INPROGRESS, 1, None
+ else:
+ pool = self._pool_from_na_aggr(
+ na_aggr, flags=Pool.RETRIEVE_FULL_INFO)
+ return JobStatus.COMPLETE, 100, pool
+
+ def _job_status_create_na_vol(self, na_vol_name):
+ na_vols = self.f.volumes(na_vol_name)
+ if len(na_vols) == 0:
+ raise LsmError(ErrorNumber.INVALID_JOB,
+ "Invalid JOB ID %s: cannot find NetApp volume %s" %
+ ("%s@%s" % (Ontap.NA_VOL_CREATE_JOB, na_vol_name),
+ na_vol_name))
+ na_vol = na_vols[0]
+ if Ontap._status_of_na_vol(na_vol) & Pool.STATUS_STARTING:
+ # NetApp does expose progress of volume creating.
+ # We just set to 1%
+ return JobStatus.INPROGRESS, 1, None
+ else:
+ pool = self._pool_from_na_vol(
+ na_vol, flags=Pool.RETRIEVE_FULL_INFO)
+ return JobStatus.COMPLETE, 100, pool
+
+ @handle_ontap_errors
+ def pool_delete(self, pool, flags=0):
+ na_vol_name = Ontap._na_vol_name_of_pool(pool.name)
+ # Since we already got pool object, assuming volume/aggregate is
+ # valid.
+ if na_vol_name:
+ # Deleting a volume
+ self.f.volume_delete(na_vol_name)
+ else:
+ # Deleting a aggregate
+ self.f.aggregate_delete(pool.name)
+ return None
+
+ @handle_ontap_errors
def systems(self, flags=0):
return [self.sys_info]

@@ -901,6 +1258,10 @@ class Ontap(IStorageAreaNetwork, INfs):
return self._restore_file_status(int(job[1]))
elif job[0] == Ontap.SPLIT_JOB:
return self._clone_split_status(job[1])
+ elif job[0] == Ontap.AGGR_CREATE_JOB:
+ return self._job_status_create_na_aggr(job[1])
+ elif job[0] == Ontap.NA_VOL_CREATE_JOB:
+ return self._job_status_create_na_vol(job[1])

raise LsmError(ErrorNumber.INVALID_JOB, "Invalid job")
--
1.8.3.1
Tony Asleson
2014-03-25 21:28:15 UTC
Permalink
I'm guessing you have your other patch set which wouldn't apply for me
before this set and maybe your tree is not up to date with latest on sf?

Please pull and rebase and double check.

See comments below.

Thanks,
Tony
Post by Gris Ge
NetApp Aggregate -- LSM Disk Pool
NetApp Volume -- LSM Sub Pool
* Will fail if LUN still exists in pool.
* Support pool_delete()
* Tested in NetApp ONTAP simulator 8.1.1.
* PEP8 passed.
---
lsm/lsm/na.py | 54 ++++++++
lsm/lsm/ontap.py | 371 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 420 insertions(+), 5 deletions(-)
diff --git a/lsm/lsm/na.py b/lsm/lsm/na.py
index 5927584..2a37a41 100644
--- a/lsm/lsm/na.py
+++ b/lsm/lsm/na.py
EFSDOESNOTEXIST = 13040 # FS does not exist
EFSOFFLINE = 13042 # FS is offline.
EVOLUMEOFFLINE = 13042 # The volume is offline.
+ EAGGROFFLINE = 13042 # The aggregate is offline.
EFSNAMEINVALID = 13044 # FS Name invalid
ESERVICENOTLICENSED = 13902 # Not licensed
ECLONE_LICENSE_EXPIRED = 14955 # Not licensed
ECLONE_NOT_LICENSED = 14956 # Not licensed
+ POOL_CREATE_NAME_ALLOWED_REGX = r"^[a-zA-Z0-9_]+$"
+
rc = netapp_filer(self.host, self.username, self.password,
vol_names = [e['name'] for e in to_list(vols)]
return vol_names
+ """
+ Taking in_paras to create new aggregate.
+ in_paras = {
+ "aggregate": aggr_name,
+ "disks": {
+ 'disk-info': [
+ {'name': na_disk1['name']},
+ {'name': na_disk2['name']},
+ ]
+ },
+ "raid-type": na_raid_type,
+ }
+ Will raise FilerError if error found.
+ """
+ in_paras = {
+ 'aggregate': aggr_name,
+ 'raid-type': na_raid_type,
+ 'disks': [],
+ }
+ disk_info_dict = {'disk-info': {'name': na_disk['name']}}
+ in_paras['disks'].extend([disk_info_dict])
+
+ self._invoke("aggr-create", in_paras)
+
+ """
+ Delete a aggregate
+ """
+ online = False
+
+ self._invoke('aggr-offline', {'aggregate': aggr_name})
+ online = True
+ if fe.errno != Filer.EFSDOESNOTEXIST and \
+ raise fe
+
+ self._invoke('aggr-destroy', {'aggregate': aggr_name})
+ #If the volume was online, we will return it to same status
+ self._invoke('aggr-online', {'aggregate': vol_name})
This should be aggr_name, not vol_name
Post by Gris Ge
+ pass
+ raise fe
+
"""
Given a volume name and file return full path"
diff --git a/lsm/lsm/ontap.py b/lsm/lsm/ontap.py
index 6877f2e..002020e 100644
--- a/lsm/lsm/ontap.py
+++ b/lsm/lsm/ontap.py
@@ -21,12 +21,13 @@ import traceback
import urllib2
import urlparse
import sys
+import re
import na
from lsm import (Volume, Initiator, FileSystem, Snapshot, NfsExport,
AccessGroup, System, Capabilities, Disk, Pool, OptionalData,
IStorageAreaNetwork, INfs, LsmError, ErrorNumber, JobStatus,
- md5, Error, VERSION)
+ md5, Error, VERSION, common)
should be _common, fix all references to it below as well.
Post by Gris Ge
#Maps na to lsm, this is expected to expand over time.
e_map = {
(LSM_VOL_PREFIX, LSM_INIT_PREFIX) = ('lsm_lun_container', 'lsm_init_')
(SS_JOB, SPLIT_JOB) = ('ontap-ss-file-restore', 'ontap-clone-split')
+ AGGR_CREATE_JOB = 'aggr_create'
+ NA_VOL_CREATE_JOB = 'na_vol_create'
VOLUME_PREFIX = '/vol'
'mixed_raid_type': Pool.RAID_TYPE_MIXED,
}
+ NA_POOL_CREATE_DEFAULT_RAID_TYPE = Pool.RAID_TYPE_RAID4
+
+ NA_AGGR_SYSTEM = 'aggr0'
+
self.f = None
self.sys_info = None
status,
self.sys_info.id, opt_data)
+ return int(
+ int(na_disk['physical-blocks']) *
+ int(na_disk['bytes-per-sector']))
+
@handle_ontap_errors
luns = self.f.luns_get_all()
return Ontap.NA_AGGR_STATUS_TO_LSM_STATUS_INFO[na_aggr_state]
return ''
+ return int(na_aggr['size-available'])
+
+ na_disks = self.f.disks()
pool_id = self._pool_id(na_aggr)
pool_name = na_aggr['name']
total_space = int(na_aggr['size-total'])
- free_space = int(na_aggr['size-available'])
+ free_space = Ontap._free_space_of_na_aggr(na_aggr)
system_id = self.sys_info.id
status = self._status_of_na_aggr(na_aggr)
opt_data = OptionalData()
Pool.ELEMENT_TYPE_POOL |
Pool.ELEMENT_TYPE_FS |
Pool.ELEMENT_TYPE_VOLUME)
element_type = element_type | Pool.ELEMENT_TYPE_SYS_RESERVED
opt_data.set('element_type', element_type)
return tmp_list[-1]
return None
+ na_aggrs = self.f.aggregates()
pool_id = self._pool_id(na_vol)
pool_name = self._pool_name_of_na_vol(na_vol)
total_space = int(na_vol['size-total'])
cap.set(Capabilities.EXPORT_FS)
cap.set(Capabilities.EXPORT_REMOVE)
cap.set(Capabilities.EXPORT_CUSTOM_PATH)
+ # Pool creation
+ cap.set(Capabilities.POOL_CREATE)
+ cap.set(Capabilities.POOL_CREATE_MEMBER_TYPE_DISK)
+ cap.set(Capabilities.POOL_CREATE_MEMBER_TYPE_POOL)
Both of these don't exist in Capabilities
Post by Gris Ge
+ # When changing POOL_CREATE_DISK_RAID_XXX, please change
+ # _na_aggr_create() also.
+ cap.set(Capabilities.POOL_CREATE_DISK_RAID_4)
+ cap.set(Capabilities.POOL_CREATE_DISK_RAID_6)
+ # Pool deletion
+ cap.set(Capabilities.POOL_DELETE)
return cap
@handle_ontap_errors
return pools
@handle_ontap_errors
+ def pool_create(self, system_id, pool_name, size_bytes,
+ raid_type=Pool.RAID_TYPE_UNKNOWN,
+ """
+ If member_type is Pool.MEMBER_TYPE_DISK_XXX, we creat aggregate.
+ If member_type is Pool.MEMBER_TYPE_POOL, we creat NetApp volume.
+ If member_type is Pool.MEMBER_TYPE_VOLUME, no support.
+ 1. Creating aggregate using autochoosing free disks.
+ 2. Creating NetApp volume using non-system aggregate.
+ 3. Creating NetApp volume using system aggregate.
+ """
+ raise LsmError(ErrorNumber.INVALID_SYSTEM,
+ "Invalid system %s" % system_id)
+
+ raise LsmError(ErrorNumber.INVALID_NAME,
+ "Invalid pool name, only a-z, A-Z, 0-9 and - are"
+ "allowed")
+ raise_error = True
+ job_id = None
+ job_id = self._na_aggr_create(
+ pool_name, size_bytes, raid_type, member_type, raise_error)[0]
+ if raid_type != Pool.RAID_TYPE_UNKNOWN and \
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "Current plugin does not support create " +
+ "ONTAP traditional volume(RAIDed volume). " +
+ "Requested MEMBER_TYPE_POOL and raid %s(%d)." %
+ (Pool.raid_type_to_str(raid_type), raid_type))
+ job_id = self._na_vol_create(
+ pool_name, size_bytes, raise_error)[0]
+
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "NetApp ONTAP does not support creating pool "
+ "on LSM volumes(LUN)")
+ raise_error = False
+ if raid_type != Pool.RAID_TYPE_UNKNOWN and \
+ # We can only create aggregate if RAID type defined.
+ raise_error = True
+ (job_id, errno, reason) = self._na_aggr_create(
+ pool_name, size_bytes, raid_type, member_type, raise_error)
+
+ (job_id, errno2, reason2) = self._na_vol_create(
+ pool_name, size_bytes, raise_error)
+
+ raise LsmError(ErrorNumber.SIZE_INSUFFICIENT_SPACE,
+ "Failed to create LSM pool via NetApp " +
+ "aggregate: error code(%d), reason: %s\n" %
+ (errno, reason) +
+ "Failed to create LSM pool via NetApp " +
+ "volume: error code(%d), reason: %s" %
+ (errno2, reason2))
+ return (job_id, None)
+
+ raise LsmError(ErrorNumber.NO_SUPPORT,
+ "NetApp ONTAP does not support pool member_type: "
+ "%s(%d)" %
+ (Pool.member_type_to_str(member_type), member_type))
+
+ """
+ Return a list of na_disk which is not used by any aggregate.
+ """
+ # In NetApp ONTAP simulator 8.1.1, na_disk['aggregate'] might poiting
+ # to non-exist aggregate. Here we get all existing aggregates.
+ na_aggrs = self.f.aggregates()
+ na_aggr_names = [na_aggr['name'] for na_aggr in na_aggrs]
+ na_disks = self.f.disks()
+ rc = []
+ if disk_type != Disk.DISK_TYPE_UNKNOWN and \
+ continue
+ if 'aggregate' not in na_disk or \
+ rc.extend([na_disk])
+ return rc
+
+ """
+ {
+ na_disk['effective-disk-type'] = {
+ Disk.total_space = [ na_disk, ],
+ },
+ }
+ When disk_type define, will only contain defined type of disk
+ in return structure.
+ disk_type # Disk.DISK_TYPE_XXX
+ free_disks_t # Check above
+ or
+ dict() # Nothing found
+ N/A
+ """
+ rc = dict()
+ free_na_disks = self._free_disks_list(disk_type)
+ return rc
+ na_disk_type = na_disk['effective-disk-type']
+ disk_size = Ontap._disk_total_space_of(na_disk)
+ continue
+ rc[na_disk_type] = dict()
+ rc[na_disk_type][disk_size] = []
+ rc[na_disk_type][disk_size].extend([na_disk])
+ return rc
+
+ """
+ Automatically choose the disks for given size_bytes, raid_type,
+ disk_type from na_disks_t.
+ The 'na_disks_t' was generated by self._free_disks(disk_type)
+ The 'raid_type' cannot be RAID_TYPE_UNKNOWN
+ """
+ cur_na_disks = na_disks_t[na_disk_type][disk_size]
+ continue
+ raid_size = Ontap._size_of_raid(
+ disk_size, member_count, raid_type)
+ return cur_na_disks[0:member_count]
+ return []
+
+ """
+ Automatically choose the na_aggr for given size_bytes.
+ The system aggregate -- aggr0 will be the last choice.
+ Return na_aggr if found.
+ Return None if not found
+ """
+ na_aggrs = self.f.aggregates()
+ sys_na_aggr = None
+ sys_na_aggr = na_aggr
+ continue
+ return na_aggr
+ return na_aggr
Should the above two lines be indented by 1?
Post by Gris Ge
+ return None
+
+ """
+ Only check RAID 4 and RAID 6 as that's all ONTAP supported to create
+ aggregate.
+ 1. Incorrect count of disks for RAID. RAID 6 need 3+ disks.
+ 2. Unkonwn RAID type.
+ 1. WAFL (ONTAP file system) reserve of 10%
+ 2. Aggregate snapshot reserve of 5%
+ """
+ reserve_ratio = 0.15
+ return 0
+ return int(member_size * (member_count - 1) * (1 - reserve_ratio))
+
+ return 0
+ return int(member_size * (member_count - 2) * (1 - reserve_ratio))
+ return 0
+
+ def _na_aggr_create(self, pool_name, size_bytes, raid_type, member_type,
+ """
+ Return (job_id, errno, reason)
+ NetApp ONTAP "aggr-create" require defining a list of disks to use
+ or define a count of disks to use. To meet the LSM API design,
+ we will provide a list of disks auto chose by ourselves.
+ """
+ in_paras = dict()
+ in_paras['aggregate'] = pool_name
+
+ disk_type = Disk.DISK_TYPE_UNKNOWN
+ disk_type = Pool.member_type_to_disk_type(member_type)
+
+ raid_type = Ontap.NA_POOL_CREATE_DEFAULT_RAID_TYPE
+
+ if raid_type != Pool.RAID_TYPE_RAID4 and \
+ errno = ErrorNumber.NO_SUPPORT
+ reason = "NetApp ONTAP does not support RAID type: %s(%d)" % \
+ (Pool.raid_type_to_str(raid_type), raid_type)
+ return (None, errno, reason)
+ raise LsmError(errno, reason)
+
+ na_disks_t = self._free_disks(disk_type)
+
+ chose_disks = self._auto_choose_disks(na_disks_t, size_bytes,
+ raid_type)
+
+ errno = ErrorNumber.DISK_BUSY
+ reason = "No enough free disks to create NetApp aggregate " + \
+ "as LSM pool"
+ return (None, errno, reason)
+ raise LsmError(errno, reason)
+
+ chose_na_raid_type = None
+ chose_na_raid_type = na_raid_type
+ break
+ # We already checked RAID type, we will surely got chose_na_raid_type
+ # defined.
+
+ na_aggr = self.f.aggregate_create(
+ pool_name, chose_na_raid_type, chose_disks)
+ raise LsmError(ErrorNumber.PLUGIN_ERROR,
+ "Failed to create NetApp aggregate with vendor "
+ "error number %d and message: %s" %
+ (fe.errno, fe.reason))
+ return (None, fe.errno, fe.reason)
+
+
+ """
+ Return (job_id, errno, reason)
+ """
+ na_aggr = self._auto_choose_aggr(size_bytes)
+ errno = ErrorNumber.SIZE_INSUFFICIENT_SPACE
+ reason = ("No free aggregate space to create new " +
+ "NetApp volume(LSM Pool) with size: %s(%d)" %
+ (common.size_bytes_2_size_human(size_bytes),
+ size_bytes))
+ raise LsmError(errno, reason)
+ return (None, errno, reason)
+ na_vol = self.f.volume_create(
+ na_aggr['name'], pool_name, size_bytes)
+ raise LsmError(ErrorNumber.PLUGIN_ERROR,
+ "Failed to create NetApp volume with vendor "
+ "error number %d and message: %s" %
+ (fe.errno, fe.reason))
+ return (None, fe.errno, fe.reason)
+
+ na_aggrs = self.f.aggregates(aggr_name)
+ raise LsmError(
+ ErrorNumber.INVALID_JOB,
+ "Invalid JOB ID %s: cannot find NetApp aggregate %s" %
+ aggr_name))
+ na_aggr = na_aggrs[0]
+ # NetApp does expose progress of aggregate creating.
+ # We just set to 1%
+ return JobStatus.INPROGRESS, 1, None
+ pool = self._pool_from_na_aggr(
+ na_aggr, flags=Pool.RETRIEVE_FULL_INFO)
+ return JobStatus.COMPLETE, 100, pool
+
+ na_vols = self.f.volumes(na_vol_name)
+ raise LsmError(ErrorNumber.INVALID_JOB,
+ "Invalid JOB ID %s: cannot find NetApp volume %s" %
+ na_vol_name))
+ na_vol = na_vols[0]
+ # NetApp does expose progress of volume creating.
+ # We just set to 1%
+ return JobStatus.INPROGRESS, 1, None
+ pool = self._pool_from_na_vol(
+ na_vol, flags=Pool.RETRIEVE_FULL_INFO)
+ return JobStatus.COMPLETE, 100, pool
+
+ na_vol_name = Ontap._na_vol_name_of_pool(pool.name)
+ # Since we already got pool object, assuming volume/aggregate is
+ # valid.
+ # Deleting a volume
+ self.f.volume_delete(na_vol_name)
+ # Deleting a aggregate
+ self.f.aggregate_delete(pool.name)
+ return None
+
return [self.sys_info]
return self._restore_file_status(int(job[1]))
return self._clone_split_status(job[1])
+ return self._job_status_create_na_aggr(job[1])
+ return self._job_status_create_na_vol(job[1])
raise LsmError(ErrorNumber.INVALID_JOB, "Invalid job")
Loading...