diff --git a/delfin/drivers/hpe/hpe_msa/__init__.py b/delfin/drivers/hpe/hpe_msa/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/delfin/drivers/hpe/hpe_msa/consts.py b/delfin/drivers/hpe/hpe_msa/consts.py
new file mode 100644
index 000000000..f93ea19ea
--- /dev/null
+++ b/delfin/drivers/hpe/hpe_msa/consts.py
@@ -0,0 +1,59 @@
+from delfin.common import constants
+
+
+class AlertOIDNumber:
+ OID_ERR_ID = '1.3.6.1.3.94.1.11.1.1'
+ OID_EVENT_TYPE = '1.3.6.1.3.94.1.11.1.7'
+ OID_LAST_TIME = '1.3.6.1.3.94.1.11.1.4'
+ OID_EVENT_DESC = '1.3.6.1.3.94.1.11.1.9'
+ OID_EVENT_ID = '1.3.6.1.3.94.1.11.1.3'
+ OID_SEVERITY = '1.3.6.1.3.94.1.11.1.6'
+
+
+class StorageVendor:
+ HPE_MSA_VENDOR = "HPE"
+
+
+class TrapSeverity:
+ TRAP_SEVERITY_MAP = {
+ '1': 'unknown',
+ '2': 'emergency',
+ '3': 'alert',
+ '4': constants.Severity.CRITICAL,
+ '5': 'error',
+ '6': constants.Severity.WARNING,
+ '7': 'notify',
+ '8': constants.Severity.INFORMATIONAL,
+ '9': 'debug',
+ '10': 'mark'
+ }
+
+ SEVERITY_MAP = {"warning": "Warning",
+ "informational": "Informational",
+ "error": "Major"
+ }
+
+
+class SecondsNumber:
+ SECONDS_TO_MS = 1000
+
+
+class RpmSpeed:
+ RPM_SPEED = 1000
+
+
+class DiskPhysicalType:
+ DISK_PHYSICAL_TYPE = {
+ 'fc': constants.DiskPhysicalType.FC,
+ 'SAS': constants.DiskPhysicalType.SAS
+ }
+
+
+class InitiatorType:
+ ISCSI_INITIATOR_TYPE = "9"
+ FC_INITIATOR_TYPE = "6"
+ SAS_INITIATOR_TYPE = "8"
+ ISCSI_INITIATOR_DESCRIPTION = 'iSCSI Initiator'
+ FC_INITIATOR_DESCRIPTION = 'FC Initiator'
+ IB_INITIATOR_DESCRIPTION = 'IB Initiator'
+ UNKNOWN_INITIATOR_DESCRIPTION = 'Unknown Initiator'
diff --git a/delfin/drivers/hpe/hpe_msa/hpe_msastor.py b/delfin/drivers/hpe/hpe_msa/hpe_msastor.py
new file mode 100644
index 000000000..7d117b939
--- /dev/null
+++ b/delfin/drivers/hpe/hpe_msa/hpe_msastor.py
@@ -0,0 +1,48 @@
+from delfin.drivers import driver
+from delfin.drivers.hpe.hpe_msa import ssh_handler
+from delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler
+
+
+class HpeMsaStorDriver(driver.StorageDriver):
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.ssh_handler = ssh_handler.SSHHandler(**kwargs)
+
+ def reset_connection(self, context, **kwargs):
+ self.ssh_handler.login()
+
+ def get_storage(self, context):
+ return self.ssh_handler.get_storage(self.storage_id)
+
+ def list_storage_pools(self, context):
+ return self.ssh_handler.list_storage_pools(self.storage_id)
+
+ def list_volumes(self, context):
+ return self.ssh_handler.list_storage_volume(self.storage_id)
+
+ def list_controllers(self, context):
+ return self.ssh_handler.\
+ list_storage_controller(self.storage_id)
+
+ def list_ports(self, context):
+ return self.ssh_handler.list_storage_ports(self.storage_id)
+
+ def list_disks(self, context):
+ return self.ssh_handler.list_storage_disks(self.storage_id)
+
+ def list_alerts(self, context, query_para=None):
+ return self.ssh_handler.list_alerts(query_para)
+
+ def add_trap_config(self, context, trap_config):
+ pass
+
+ def remove_trap_config(self, context, trap_config):
+ pass
+
+ @staticmethod
+ def parse_alert(context, alert):
+ return SSHHandler.parse_alert(alert)
+
+ def clear_alert(self, context, alert):
+ pass
diff --git a/delfin/drivers/hpe/hpe_msa/ssh_handler.py b/delfin/drivers/hpe/hpe_msa/ssh_handler.py
new file mode 100644
index 000000000..a752a435b
--- /dev/null
+++ b/delfin/drivers/hpe/hpe_msa/ssh_handler.py
@@ -0,0 +1,481 @@
+import hashlib
+import time
+
+import six
+from oslo_log import log as logging
+from operator import itemgetter
+from itertools import groupby
+from delfin import exception
+from delfin.common import constants, alert_util
+from delfin.drivers.utils.ssh_client import SSHPool
+from delfin.drivers.utils.tools import Tools
+from delfin.drivers.hpe.hpe_msa import consts
+
+try:
+ import xml.etree.cElementTree as Et
+except ImportError:
+ import xml.etree.ElementTree as Et
+
+LOG = logging.getLogger(__name__)
+
+
+class SSHHandler(object):
+
+ def __init__(self, **kwargs):
+ self.ssh_pool = SSHPool(**kwargs)
+
+ def login(self):
+ try:
+ self.ssh_pool.do_exec('show pools')
+ except Exception as e:
+ LOG.error("Failed to login msa %s" %
+ (six.text_type(e)))
+ raise e
+
+ def get_storage(self, storage_id):
+ try:
+ system_info = self.ssh_pool.do_exec('show system')
+ system_data = self.handle_xml_to_dict(system_info, 'system')
+ version_info = self.ssh_pool.do_exec('show version')
+ version_arr = self.handle_xml_to_json(version_info, 'versions')
+ version_id = ""
+ if version_arr:
+ version_id = version_arr[0].get('bundle-version')
+ if system_data:
+ pools_list = self.list_storage_pools(storage_id)
+ total_capacity = 0
+ if pools_list:
+ for pool in pools_list:
+ total_capacity += int(pool.get('total_capacity'))
+ disks_list = self.list_storage_disks(storage_id)
+ raw_capacity = 0
+ if disks_list:
+ for disk in disks_list:
+ raw_capacity += int(disk.get('capacity'))
+ volumes_list = self.list_storage_volume(storage_id)
+ volume_all_size = 0
+ if volumes_list:
+ for volume in volumes_list:
+ volume_all_size += int(volume.get('total_capacity'))
+ health = system_data.get('health')
+ status = constants.StorageStatus.OFFLINE
+ if health == 'OK':
+ status = constants.StorageStatus.NORMAL
+ elif health == 'Degraded':
+ status = constants.StorageStatus.DEGRADED
+ serial_num = system_data.get('midplane-serial-number')
+ storage_map = {
+ 'name': system_data.get('system-name'),
+ 'vendor': consts.StorageVendor.HPE_MSA_VENDOR,
+ 'model': system_data.get('product-id'),
+ 'status': status,
+ 'serial_number': serial_num,
+ 'firmware_version': version_id,
+ 'location': system_data.get('system-location'),
+ 'raw_capacity': int(raw_capacity),
+ 'total_capacity': int(total_capacity),
+ 'used_capacity': int(volume_all_size),
+ 'free_capacity': int(total_capacity - volume_all_size)
+ }
+ return storage_map
+ except Exception as e:
+ err_msg = "Failed to get system info : %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ def list_storage_disks(self, storage_id):
+ try:
+ disk_info = self.ssh_pool.do_exec('show disks')
+ disk_detail = self.handle_xml_to_json(disk_info, 'drives')
+ disks_arr = []
+ if disk_detail:
+ for data in disk_detail:
+ health = data.get('health')
+ status = constants.StoragePoolStatus.OFFLINE
+ if health == 'OK':
+ status = constants.StoragePoolStatus.NORMAL
+ size = self.parse_string_to_bytes(data.get('size'))
+ physical_type = consts.DiskPhysicalType.\
+ DISK_PHYSICAL_TYPE.get(data.get('description'),
+ constants.DiskPhysicalType.
+ UNKNOWN)
+ rpm = data.get('rpm')
+ if rpm:
+ rpm = int(rpm) * consts.RpmSpeed.RPM_SPEED
+ data_map = {
+ 'native_disk_id': data.get('location'),
+ 'name': data.get('location'),
+ 'physical_type': physical_type,
+ 'status': status,
+ 'storage_id': storage_id,
+ 'native_disk_group_id': data.get('disk-group'),
+ 'serial_number': data.get('serial-number'),
+ 'manufacturer': data.get('vendor'),
+ 'model': data.get('model'),
+ 'speed': rpm,
+ 'capacity': int(size),
+ 'health_score': status
+ }
+ disks_arr.append(data_map)
+ return disks_arr
+ except Exception as e:
+ err_msg = "Failed to get storage disk: %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ def list_storage_ports(self, storage_id):
+ try:
+ ports_info = self.ssh_pool.do_exec('show ports')
+ ports_split = ports_info.split('\n')
+ ports_array = ports_split[1:len(ports_split) - 1]
+ ports_xml_data = ''.join(ports_array)
+ xml_element = Et.fromstring(ports_xml_data)
+ ports_json = []
+ for element_data in xml_element.iter('OBJECT'):
+ property_name = element_data.get('basetype')
+ if property_name != 'status':
+ msg = {}
+ for child in element_data.iter('PROPERTY'):
+ msg[child.get('name')] = child.text
+ ports_json.append(msg)
+ ports_elements_info = []
+ for i in range(0, len(ports_json) - 1, 2):
+ port_element = ports_json[i].copy()
+ port_element.update(ports_json[i + 1])
+ ports_elements_info.append(port_element)
+ list_ports = []
+ for data in ports_elements_info:
+ status = constants.PortHealthStatus.NORMAL
+ conn_status = constants.PortConnectionStatus.CONNECTED
+ if data.get('health') != 'OK':
+ status = constants.PortHealthStatus.ABNORMAL
+ conn_status = constants.PortConnectionStatus.\
+ DISCONNECTED
+ wwn = None
+ port_type = constants.PortType.FC
+ location_port_type = data.get('port-type')
+ if location_port_type:
+ location_port_type = location_port_type.upper()
+ if location_port_type == 'ISCSI':
+ port_type = constants.PortType.ETH
+ else:
+ target_id = data.get('target-id')
+ if target_id:
+ wwn = target_id
+ location = '%s_%s' % (data.get('port'),
+ location_port_type)
+ speed = data.get('configured-speed', None)
+ max_speed = 0
+ if speed != 'Auto' and speed is not None:
+ max_speed = self.parse_string_to_bytes(speed)
+ data_map = {
+ 'native_port_id': data.get('durable-id'),
+ 'name': data.get('port'),
+ 'type': port_type,
+ 'connection_status': conn_status,
+ 'health_status': status,
+ 'location': location,
+ 'storage_id': storage_id,
+ 'speed': max_speed,
+ 'max_speed': max_speed,
+ 'mac_address': data.get('mac-address'),
+ 'ipv4': data.get('ip-address'),
+ 'wwn': wwn
+ }
+ list_ports.append(data_map)
+ return list_ports
+ except Exception as e:
+ err_msg = "Failed to get storage ports: %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ def list_storage_controller(self, storage_id):
+ try:
+ controller_info = self.ssh_pool\
+ .do_exec('show controllers')
+ controller_detail = self.handle_xml_to_json(
+ controller_info, 'controllers')
+ controller_arr = []
+ for data in controller_detail:
+ health = data.get('health')
+ status = constants.StoragePoolStatus.OFFLINE
+ if health == 'OK':
+ status = constants.StoragePoolStatus.NORMAL
+ cpu_info = data.get('sc-cpu-type')
+ memory_size = data.get('system-memory-size')
+ if memory_size is not None:
+ memory_size += "MB"
+ system_memory_size = self.parse_string_to_bytes(
+ memory_size)
+ data_map = {
+ 'native_controller_id': data.get('controller-id'),
+ 'name': data.get('durable-id'),
+ 'storage_id': storage_id,
+ 'status': status,
+ 'location': data.get('position'),
+ 'soft_version': data.get('sc-fw'),
+ 'cpu_info': cpu_info,
+ 'memory_size': int(system_memory_size)
+ }
+ controller_arr.append(data_map)
+ return controller_arr
+ except Exception as e:
+ err_msg = "Failed to get storage controllers: %s"\
+ % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ def list_storage_volume(self, storage_id):
+ try:
+ volume_infos = self.ssh_pool.do_exec('show volumes')
+ volume_detail = self.handle_xml_to_json(volume_infos, 'volumes')
+ pools_info = self.ssh_pool.do_exec('show pools')
+ pool_detail = self.handle_xml_to_json(pools_info, 'pools')
+ list_volumes = []
+ for data in volume_detail:
+ health = data.get('health')
+ status = constants.StoragePoolStatus.OFFLINE
+ if health == 'OK':
+ status = constants.StoragePoolStatus.NORMAL
+ total_size = self.parse_string_to_bytes(data.get('total-size'))
+ total_avail = self.parse_string_to_bytes(
+ data.get('allocated-size'))
+ native_storage_pool_id = ''
+ if pool_detail:
+ native_storage_pool_id = pool_detail[0]. \
+ get('serial-number')
+ for pools in pool_detail:
+ if data.get('virtual-disk-name') == pools.\
+ get('name'):
+ native_storage_pool_id = pools.\
+ get('serial-number')
+ blocks = data.get('blocks')
+ if blocks is not None:
+ blocks = int(blocks)
+ volume_map = {
+ 'name': data.get('volume-name'),
+ 'storage_id': storage_id,
+ 'description': data.get('volume-name'),
+ 'status': status,
+ 'native_volume_id': str(data.get('durable-id')),
+ 'native_storage_pool_id': native_storage_pool_id,
+ 'wwn': str(data.get('wwn')),
+ 'type': data.get('volume-type'),
+ 'total_capacity': int(total_size),
+ 'free_capacit': int(total_size - total_avail),
+ 'used_capacity': int(total_avail),
+ 'blocks': int(blocks),
+ 'compressed': True,
+ 'deduplicated': True
+ }
+ list_volumes.append(volume_map)
+ return list_volumes
+ except Exception as e:
+ err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ def list_storage_pools(self, storage_id):
+ try:
+ pool_infos = self.ssh_pool.do_exec('show pools')
+ pool_detail = self.handle_xml_to_json(pool_infos, 'pools')
+ volume_list = self.list_storage_volume(storage_id)
+ pools_list = []
+ if pool_detail:
+ for data in pool_detail:
+ volume_size = 0
+ blocks = 0
+ if volume_list:
+ for volume in volume_list:
+ if volume.get('native_storage_pool_id') == data.\
+ get('serial-number'):
+ volume_size += volume.get('total_capacity')
+ blocks += volume.get('blocks')
+ health = data.get('health')
+ status = constants.StoragePoolStatus.OFFLINE
+ if health == 'OK':
+ status = constants.StoragePoolStatus.NORMAL
+ total_size = self.parse_string_to_bytes(
+ data.get('total-size'))
+ pool_map = {
+ 'name': data.get('name'),
+ 'storage_id': storage_id,
+ 'native_storage_pool_id': data.get('serial-number'),
+ 'status': status,
+ 'storage_type': constants.StorageType.BLOCK,
+ 'total_capacity': int(total_size),
+ 'subscribed_capacity': int(blocks),
+ 'used_capacity': volume_size,
+ 'free_capacity': int(total_size - volume_size)
+ }
+ pools_list.append(pool_map)
+ return pools_list
+ except Exception as e:
+ err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ @staticmethod
+ def parse_string_to_bytes(value):
+ capacity = 0
+ if value:
+ if value.isdigit():
+ capacity = float(value)
+ else:
+ if value == '0B':
+ capacity = 0
+ else:
+ unit = value[-2:]
+ capacity = float(value[:-2]) * int(
+ Tools.change_capacity_to_bytes(unit))
+ return capacity
+
+ @staticmethod
+ def handle_xml_to_json(detail_info, element):
+ detail_arr = []
+ detail_data = detail_info.split('\n')
+ detail = detail_data[1:len(detail_data) - 1]
+ detail_xml = ''.join(detail)
+ xml_element = Et.fromstring(detail_xml)
+ for children in xml_element.iter('OBJECT'):
+ property_name = children.get('basetype')
+ if element == property_name:
+ msg = {}
+ for child in children.iter('PROPERTY'):
+ msg[child.get('name')] = child.text
+ detail_arr.append(msg)
+ return detail_arr
+
+ def list_alerts(self, query_para):
+ alert_list = []
+ try:
+ alert_infos = self.ssh_pool.do_exec('show events error')
+ alert_json = self.handle_xml_to_json(alert_infos, 'events')
+ for alert_map in alert_json:
+ now = time.time()
+ occur_time = int(round(now * consts.SecondsNumber
+ .SECONDS_TO_MS))
+ time_stamp = alert_map.get('time-stamp-numeric')
+ if time_stamp is not None:
+ occur_time = int(time_stamp) * consts.SecondsNumber\
+ .SECONDS_TO_MS
+ if not alert_util.is_alert_in_time_range(query_para,
+ occur_time):
+ continue
+ event_code = alert_map.get('event-code')
+ event_id = alert_map.get('event-id')
+ location = alert_map.get('message')
+ resource_type = alert_map.get('event-code')
+ severity = alert_map.get('severity')
+ additional_info = str(alert_map.get('additional-information'))
+ match_key = None
+ if event_code:
+ match_key = event_code
+ if severity:
+ match_key += severity
+ if location:
+ match_key += location
+ description = None
+ if additional_info:
+ description = additional_info
+ if severity == 'Informational' or severity == 'RESOLVED':
+ continue
+ alert_model = {
+ 'alert_id': event_id,
+ 'alert_name': event_code,
+ 'severity': severity,
+ 'category': constants.Category.FAULT,
+ 'type': 'EquipmentAlarm',
+ 'sequence_number': event_id,
+ 'occur_time': occur_time,
+ 'description': description,
+ 'resource_type': resource_type,
+ 'location': location,
+ 'match_key': hashlib.md5(match_key.encode()).hexdigest()
+ }
+ alert_list.append(alert_model)
+ alert_list_data = SSHHandler.get_last_alert_data(alert_list)
+ return alert_list_data
+ except Exception as e:
+ err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
+ LOG.error(err_msg)
+ raise e
+
+ @staticmethod
+ def get_last_alert_data(alert_json):
+ alert_list = []
+ alert_json.sort(key=itemgetter('alert_name', 'location', 'severity'))
+ for key, item in groupby(alert_json, key=itemgetter(
+ 'alert_name', 'location', 'severity')):
+ alert_last_index = 0
+ alert_list.append(list(item)[alert_last_index])
+ return alert_list
+
+ @staticmethod
+ def parse_alert(alert):
+ try:
+ alert_model = dict()
+ alert_id = None
+ description = None
+ severity = consts.TrapSeverity.TRAP_SEVERITY_MAP.get('8')
+ sequence_number = None
+ event_type = None
+ for alert_key, alert_value in alert.items():
+ if consts.AlertOIDNumber.OID_ERR_ID in alert_key:
+ alert_id = str(alert_value)
+ elif consts.AlertOIDNumber.OID_EVENT_TYPE in alert_key:
+ event_type = alert_value
+ elif consts.AlertOIDNumber.OID_EVENT_DESC in alert_key:
+ description = alert_value
+ elif consts.AlertOIDNumber.OID_SEVERITY in alert_key:
+ severity = consts.TrapSeverity.TRAP_SEVERITY_MAP\
+ .get(alert.get(consts.AlertOIDNumber.OID_SEVERITY),
+ constants.Severity.INFORMATIONAL)
+ elif consts.AlertOIDNumber.OID_EVENT_ID in alert_key:
+ sequence_number = alert_value
+ if description:
+ desc_arr = description.split(",")
+ if desc_arr:
+ alert_id = SSHHandler.split_by_char_and_number(
+ desc_arr[0], ":", 1)
+ alert_model['alert_id'] = str(alert_id)
+ alert_model['alert_name'] = event_type
+ alert_model['severity'] = severity
+ alert_model['category'] = constants.Category.FAULT
+ alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
+ alert_model['sequence_number'] = sequence_number
+ now = time.time()
+ alert_model['occur_time'] = int(round(now * consts.
+ SecondsNumber.SECONDS_TO_MS))
+ alert_model['description'] = description
+ alert_model['location'] = description
+ return alert_model
+ except Exception as e:
+ LOG.error(e)
+ msg = "Failed to build alert model: %s." % (six.text_type(e))
+ raise exception.InvalidResults(msg)
+
+ @staticmethod
+ def split_by_char_and_number(split_str, split_char, arr_number):
+ split_value = ''
+ if split_str:
+ tmp_value = split_str.split(split_char, 1)
+ if arr_number == 1 and len(tmp_value) > 1:
+ split_value = tmp_value[arr_number].strip()
+ elif arr_number == 0:
+ split_value = tmp_value[arr_number].strip()
+ return split_value
+
+ @staticmethod
+ def handle_xml_to_dict(xml_info, element):
+ msg = {}
+ xml_split = xml_info.split('\n')
+ xml_data = xml_split[1:len(xml_split) - 1]
+ detail_xml = ''.join(xml_data)
+ xml_element = Et.fromstring(detail_xml)
+ for children in xml_element.iter('OBJECT'):
+ property_name = children.get('basetype')
+ if element == property_name:
+ for child in children.iter('PROPERTY'):
+ msg[child.get('name')] = child.text
+ return msg
diff --git a/delfin/tests/unit/drivers/hpe/hpe_msa/__init__.py b/delfin/tests/unit/drivers/hpe/hpe_msa/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py b/delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py
new file mode 100644
index 000000000..10ef2a5f2
--- /dev/null
+++ b/delfin/tests/unit/drivers/hpe/hpe_msa/test_constans.py
@@ -0,0 +1,551 @@
+LIST_CONTROLLERS = """
+
+
+
+
+
+
+
+"""
+
+LIST_SYSTEM = """
+
+
+
+
+
+"""
+
+LIST_VISION = """
+
+
+
+
+
+"""
+
+LIST_PORTS = """
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+LIST_POOLS = """
+
+
+
+
+
+"""
+
+LIST_VOLUMES = """
+
+
+
+
+
+
+
+"""
+
+LIST_DISKS = """
+
+
+
+
+
+
+
+
+
+
+
+"""
+LIST_ERROR = """
+
+
+
+
+
+"""
+
+error_result = [
+ {
+ 'alert_id': 'A891',
+ 'alert_name': '557',
+ 'category': 'Fault',
+ 'description': 'Management',
+ 'location': 'An Enclosure Management Processor(EMP)',
+ 'match_key': 'd0317252aed04fd8b68e79d7eab08277',
+ 'occur_time': 1636704980000,
+ 'resource_type': '557',
+ 'sequence_number': 'A891',
+ 'severity': 'ERROR',
+ 'type': 'EquipmentAlarm'
+ }
+]
+
+volume_result = [
+ {
+ 'name': 'Vol0001',
+ 'storage_id': 'kkk',
+ 'description': 'Vol0001',
+ 'status': 'normal',
+ 'native_volume_id': 'V1',
+ 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
+ 'wwn': '600C0FF00026C4EAFA80546101000000',
+ 'type': 'base',
+ 'total_capacity': 107266808217,
+ 'free_capacit': 107266808217,
+ 'used_capacity': 0,
+ 'blocks': 195305472,
+ 'compressed': True,
+ 'deduplicated': True
+ }, {
+ 'name': 'Vol0002',
+ 'storage_id': 'kkk',
+ 'description': 'Vol0002',
+ 'status': 'normal',
+ 'native_volume_id': 'V2',
+ 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
+ 'wwn': '600C0FF00026C4EA0A81546101000000',
+ 'type': 'base',
+ 'total_capacity': 107266808217,
+ 'free_capacit': 107266808217,
+ 'used_capacity': 0,
+ 'blocks': 195305472,
+ 'compressed': True,
+ 'deduplicated': True
+ }
+]
+
+pools_result = [
+ {
+ 'name': 'A',
+ 'storage_id': 'kkk',
+ 'native_storage_pool_id': '00c0ff26c4ea0000d980546101000000',
+ 'status': 'normal',
+ 'storage_type': 'block',
+ 'total_capacity': 1285054214963,
+ 'subscribed_capacity': 390610944,
+ 'used_capacity': 214533616434,
+ 'free_capacity': 1070520598529
+ }
+]
+
+ports_result = [
+ {
+ 'native_port_id': 'hostport_A1',
+ 'name': 'A1', 'type': 'fc',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'A1_FC',
+ 'storage_id': 'kkk',
+ 'speed': 8589934592.0,
+ 'max_speed': 8589934592.0,
+ 'mac_address': None,
+ 'ipv4': None,
+ 'wwn': '207000c0ff26dcb0'
+ }, {
+ 'native_port_id': 'hostport_A2',
+ 'name': 'A2',
+ 'type': 'fc',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'A2_FC',
+ 'storage_id': 'kkk',
+ 'speed': 8589934592.0,
+ 'max_speed': 8589934592.0,
+ 'mac_address': None,
+ 'ipv4': None,
+ 'wwn': '217000c0ff26dcb0'
+ }, {
+ 'native_port_id': 'hostport_A3',
+ 'name': 'A3',
+ 'type': 'eth',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'A3_ISCSI',
+ 'storage_id': 'kkk',
+ 'speed': 0,
+ 'max_speed': 0,
+ 'mac_address': '00:C0:FF:35:BD:64',
+ 'ipv4': '0.0.0.0',
+ 'wwn': None
+ }, {
+ 'native_port_id': 'hostport_A4',
+ 'name': 'A4',
+ 'type': 'eth',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'A4_ISCSI',
+ 'storage_id': 'kkk',
+ 'speed': 0,
+ 'max_speed': 0,
+ 'mac_address': '00:C0:FF:35:BD:65',
+ 'ipv4': '0.0.0.0',
+ 'wwn': None
+ }, {
+ 'native_port_id': 'hostport_B1',
+ 'name': 'B1',
+ 'type': 'fc',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'B1_FC',
+ 'storage_id': 'kkk',
+ 'speed': 8589934592.0,
+ 'max_speed': 8589934592.0,
+ 'mac_address': None,
+ 'ipv4': None,
+ 'wwn': '247000c0ff26dcb0'
+ }, {
+ 'native_port_id': 'hostport_B2',
+ 'name': 'B2',
+ 'type': 'fc',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'B2_FC',
+ 'storage_id': 'kkk',
+ 'speed': 8589934592.0,
+ 'max_speed': 8589934592.0,
+ 'mac_address': None,
+ 'ipv4': None,
+ 'wwn': '257000c0ff26dcb0'
+ }, {
+ 'native_port_id': 'hostport_B3',
+ 'name': 'B3',
+ 'type': 'eth',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'B3_ISCSI', 'storage_id': 'kkk',
+ 'speed': 0,
+ 'max_speed': 0,
+ 'mac_address': '00:C0:FF:35:BA:BC',
+ 'ipv4': '0.0.0.0',
+ 'wwn': None
+ }, {
+ 'native_port_id': 'hostport_B4',
+ 'name': 'B4',
+ 'type': 'eth',
+ 'connection_status': 'disconnected',
+ 'health_status': 'abnormal',
+ 'location': 'B4_ISCSI',
+ 'storage_id': 'kkk',
+ 'speed': 0,
+ 'max_speed': 0,
+ 'mac_address': '00:C0:FF:35:BA:BD',
+ 'ipv4': '0.0.0.0',
+ 'wwn': None
+ }]
+
+disks_result = [
+ {
+ 'native_disk_id': '1.1',
+ 'name': '1.1',
+ 'physical_type': 'sas',
+ 'status': 'normal',
+ 'storage_id': 'kkk',
+ 'native_disk_group_id': 'dgA01',
+ 'serial_number': '6SL9CD560000N51404EF',
+ 'manufacturer': 'SEAGATE',
+ 'model': 'ST3600057SS',
+ 'speed': 15000,
+ 'capacity': 644352468582,
+ 'health_score': 'normal'
+ }, {
+ 'native_disk_id': '1.2',
+ 'name': '1.2',
+ 'physical_type': 'sas',
+ 'status': 'normal',
+ 'storage_id': 'kkk',
+ 'native_disk_group_id': 'dgA01',
+ 'serial_number': '6SL7X4RE0000B42601SF',
+ 'manufacturer': 'SEAGATE',
+ 'model': 'ST3600057SS',
+ 'speed': 15000,
+ 'capacity': 644352468582,
+ 'health_score': 'normal'
+ }, {
+ 'native_disk_id': '1.3',
+ 'name': '1.3',
+ 'physical_type': 'sas',
+ 'status': 'normal',
+ 'storage_id': 'kkk',
+ 'native_disk_group_id': 'dgA01',
+ 'serial_number': '6SL9QR5T0000N52120SK',
+ 'manufacturer': 'SEAGATE',
+ 'model': 'ST3600057SS',
+ 'speed': 15000, 'capacity': 644352468582,
+ 'health_score': 'normal'
+ }, {
+ 'native_disk_id': '1.4',
+ 'name': '1.4',
+ 'physical_type': 'sas',
+ 'status': 'normal',
+ 'storage_id': 'kkk',
+ 'native_disk_group_id': 'dgA01',
+ 'serial_number': '3SL0WT7G00009051YBTF',
+ 'manufacturer': 'SEAGATE',
+ 'model': 'ST3600057SS',
+ 'speed': 15000,
+ 'capacity': 644352468582,
+ 'health_score': 'normal'
+ }
+]
+
+system_info = {
+ 'name': 'msa2040',
+ 'vendor': 'HPE',
+ 'model': 'MSA 2040 SAN',
+ 'status': 'normal',
+ 'serial_number': '00C0FF26DCB0',
+ 'firmware_version': 'GL210R004',
+ 'location': 'Uninitialized Location',
+ 'raw_capacity': 2577409874328,
+ 'total_capacity': 1285054214963,
+ 'used_capacity': 214533616434,
+ 'free_capacity': 1070520598529
+}
+
+controller_result = [
+ {
+ 'native_controller_id': 'A',
+ 'name': 'controller_a',
+ 'storage_id': 'kkk',
+ 'status': 'normal',
+ 'location': 'Top',
+ 'soft_version': 'GLS210R04-01',
+ 'cpu_info': 'Gladden',
+ 'memory_size': 6442450944
+ },
+ {
+ 'native_controller_id': 'B',
+ 'name': 'controller_b',
+ 'storage_id': 'kkk',
+ 'status': 'normal',
+ 'location': 'Bottom',
+ 'soft_version': 'GLS210R04-01',
+ 'cpu_info': 'Gladden',
+ 'memory_size': 6442450944
+ }
+]
diff --git a/delfin/tests/unit/drivers/hpe/hpe_msa/test_hpe_msastor.py b/delfin/tests/unit/drivers/hpe/hpe_msa/test_hpe_msastor.py
new file mode 100644
index 000000000..7d6637893
--- /dev/null
+++ b/delfin/tests/unit/drivers/hpe/hpe_msa/test_hpe_msastor.py
@@ -0,0 +1,95 @@
+import sys
+import paramiko
+
+from delfin import context
+from unittest import TestCase, mock
+from delfin.tests.unit.drivers.hpe.hpe_msa import test_constans
+from delfin.drivers.utils.ssh_client import SSHPool
+from delfin.drivers.hpe.hpe_msa.ssh_handler import SSHHandler
+from delfin.drivers.hpe.hpe_msa.hpe_msastor import HpeMsaStorDriver
+
+sys.modules['delfin.cryptor'] = mock.Mock()
+
+ACCESS_INFO = {
+ "storage_id": "kkk",
+ "ssh": {
+ "host": "110.143.132.231",
+ "port": 22,
+ "username": "user",
+ "password": "pass",
+ "pub_key": "ddddddddddddddddddddddddd"
+ }
+}
+
+
+class TestHpeMsaStorageDriver(TestCase):
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ def test_list_ports(self, mock_ssh_get, mock_control):
+ mock_ssh_get.return_value = {paramiko.SSHClient()}
+ mock_control.side_effect = [test_constans.LIST_PORTS]
+ ports = HpeMsaStorDriver(**ACCESS_INFO).list_ports(context)
+ self.assertEqual(ports, test_constans.ports_result)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ def test_list_disks(self, mock_ssh_get, mock_control):
+ mock_ssh_get.return_value = {paramiko.SSHClient()}
+ mock_control.side_effect = [test_constans.LIST_DISKS]
+ disks = HpeMsaStorDriver(**ACCESS_INFO).list_disks(context)
+ self.assertEqual(disks, test_constans.disks_result)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ def test_list_controllers(self, mock_ssh_get, mock_control):
+ mock_ssh_get.return_value = {paramiko.SSHClient()}
+ mock_control.side_effect = [test_constans.LIST_CONTROLLERS]
+ controller = HpeMsaStorDriver(**ACCESS_INFO).\
+ list_controllers(context)
+ self.assertEqual(controller, test_constans.controller_result)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ def test_list_volumes(self, mock_ssh_get, mock_control):
+ mock_ssh_get.return_value = {paramiko.SSHClient()}
+ mock_control.side_effect = [test_constans.LIST_VOLUMES,
+ test_constans.LIST_POOLS]
+ volumes = HpeMsaStorDriver(**ACCESS_INFO).list_volumes(context)
+ self.assertEqual(volumes, test_constans.volume_result)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ @mock.patch.object(SSHHandler, 'list_storage_pools')
+ @mock.patch.object(SSHHandler, 'list_storage_disks')
+ @mock.patch.object(SSHHandler, 'list_storage_volume')
+ def test_list_storage(self, mock_system, mock_ssh_get,
+ mock_pools, mock_disks, mock_volume):
+ mock_volume.side_effect = [test_constans.LIST_SYSTEM,
+ test_constans.LIST_VISION]
+ mock_disks.return_value = {paramiko.SSHClient()}
+ mock_pools.side_effect = [test_constans.pools_result]
+ mock_ssh_get.side_effect = [test_constans.disks_result]
+ mock_system.side_effect = [test_constans.volume_result]
+ system = HpeMsaStorDriver(**ACCESS_INFO).get_storage(context)
+ self.assertEqual(system, test_constans.system_info)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ @mock.patch.object(SSHHandler, 'list_storage_volume')
+ def test_list_storage_pools(self, mock_ssh_get, mock_control,
+ mock_volume):
+ mock_ssh_get.return_value = test_constans.volume_result
+ mock_control.side_effect = {paramiko.SSHClient()}
+ mock_volume.side_effect = [test_constans.LIST_POOLS]
+ pools = HpeMsaStorDriver(**ACCESS_INFO).list_storage_pools(context)
+ self.assertEqual(pools, test_constans.pools_result)
+
+ @mock.patch.object(SSHPool, 'do_exec')
+ @mock.patch.object(SSHPool, 'get')
+ def test_list_alerts(self, mock_ssh_get, mock_control):
+ query_para = None
+ mock_ssh_get.return_value = {paramiko.SSHClient()}
+ mock_control.side_effect = [test_constans.LIST_ERROR]
+ alerts = HpeMsaStorDriver(**ACCESS_INFO).list_alerts(query_para)
+ self.assertEqual(alerts, test_constans.error_result)
diff --git a/setup.py b/setup.py
index 4f60ac4b1..224f498f8 100644
--- a/setup.py
+++ b/setup.py
@@ -45,6 +45,7 @@
'dellemc vplex = delfin.drivers.dell_emc.vplex.vplex_stor:VplexStorageDriver',
'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver',
'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver',
+ 'hpe msa = delfin.drivers.hpe.hpe_msa.hpe_msastor:HpeMsaStorDriver',
'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver',
'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver',
'netapp cmode = delfin.drivers.netapp.dataontap.cluster_mode:NetAppCmodeDriver',