From 2d71a43e49af858203982fec58e2b1cbfed8e3e0 Mon Sep 17 00:00:00 2001 From: Divyanshu Kumar Date: Thu, 7 Oct 2021 17:11:37 +0000 Subject: [PATCH 01/24] Changed link on line 84 (#735) Correct link i.e. https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc is replaced with previous one. --- installer/precheck | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/installer/precheck b/installer/precheck index 8a8bb8c0f..5b668ecb0 100755 --- a/installer/precheck +++ b/installer/precheck @@ -81,7 +81,7 @@ check_install_rabbitmq(){ else #TODO check erlang # Import rabbitMQ - ret=$(wget -O- https://dl.bintray.com/rabbitmq/Keys/rabbitmq-release-signing-key.asc | sudo apt-key add -) + ret=$(wget -O- https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc | sudo apt-key add -) if [ $? -eq 0 ]; then ret=$(wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add -) if [ $? -ne 0 ]; then From 121caad6031d9df99730e64d0f771a2254ef16de Mon Sep 17 00:00:00 2001 From: tanjy Date: Tue, 25 Jan 2022 11:42:39 +0800 Subject: [PATCH 02/24] vsp change disk status --- delfin/drivers/hitachi/vsp/vsp_stor.py | 15 ++++++++++++--- .../drivers/hitachi/vsp/test_hitachi_vspstor.py | 8 ++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index f9f554813..8619f5f6d 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -64,6 +64,16 @@ class HitachiVspDriver(driver.StorageDriver): "HNASS": constants.PortType.OTHER, "HNASU": constants.PortType.OTHER } + DISK_STATUS_TYPE = {"NML": constants.DiskStatus.NORMAL, + "CPY": constants.DiskStatus.NORMAL, + "CPI": constants.DiskStatus.NORMAL, + "RSV": constants.DiskStatus.NORMAL, + "FAI": constants.DiskStatus.ABNORMAL, + "BLK": constants.DiskStatus.ABNORMAL, + "WAR": constants.DiskStatus.ABNORMAL, + "UNK": constants.DiskStatus.ABNORMAL, + "Unknown": constants.DiskStatus.ABNORMAL + } TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' AUTO_PORT_SPEED = 8 * units.Gi @@ -359,9 +369,8 @@ def list_disks(self, context): if disks is not None: disk_entries = disks.get('data') for disk in disk_entries: - status = constants.DiskStatus.ABNORMAL - if disk.get('status' == 'NML'): - status = constants.DiskStatus.NORMAL + status = HitachiVspDriver.DISK_STATUS_TYPE.get( + disk.get('status'), constants.DiskStatus.NORMAL) physical_type = \ HitachiVspDriver.DISK_PHYSICAL_TYPE_MAP.get( disk.get('driveTypeName'), diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py index 52df5aaef..dc1f58ce5 100644 --- a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -327,7 +327,7 @@ def __init__(self): 'serial_number': '123456789012345678901', 'speed': 10000, 'capacity': 644245094400, - 'status': 'abnormal', + 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', @@ -339,7 +339,7 @@ def __init__(self): 'serial_number': '123456789012345678902', 'speed': 10000, 'capacity': 644245094400, - 'status': 'abnormal', + 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', @@ -351,7 +351,7 @@ def __init__(self): 'serial_number': '123456789012345678903', 'speed': 10000, 'capacity': 644245094400, - 'status': 'abnormal', + 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', @@ -363,7 +363,7 @@ def __init__(self): 'serial_number': '123456789012345678904', 'speed': 10000, 'capacity': 644245094400, - 'status': 'abnormal', + 'status': 'normal', 'physical_type': 'sas', 'logical_type': 'member', 'native_disk_group_id': '1-6', From c7512ccaa2182b653c499b7fc36346e2c92df0d3 Mon Sep 17 00:00:00 2001 From: tanjy Date: Tue, 25 Jan 2022 12:12:01 +0800 Subject: [PATCH 03/24] vsp change disk status --- delfin/drivers/hitachi/vsp/vsp_stor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 8619f5f6d..631ed9d44 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -71,8 +71,8 @@ class HitachiVspDriver(driver.StorageDriver): "FAI": constants.DiskStatus.ABNORMAL, "BLK": constants.DiskStatus.ABNORMAL, "WAR": constants.DiskStatus.ABNORMAL, - "UNK": constants.DiskStatus.ABNORMAL, - "Unknown": constants.DiskStatus.ABNORMAL + "UNK": constants.DiskStatus.NORMAL, + "Unknown": constants.DiskStatus.NORMAL } TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' From 991dc641e09b7176e684c4b8204137d09f8390e9 Mon Sep 17 00:00:00 2001 From: tanjy Date: Tue, 25 Jan 2022 12:23:31 +0800 Subject: [PATCH 04/24] vsp change disk status --- delfin/drivers/hitachi/vsp/vsp_stor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 631ed9d44..d4d47d5f0 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -74,7 +74,6 @@ class HitachiVspDriver(driver.StorageDriver): "UNK": constants.DiskStatus.NORMAL, "Unknown": constants.DiskStatus.NORMAL } - TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' AUTO_PORT_SPEED = 8 * units.Gi From 7208a005479465d042d2f509923aa456457aa440 Mon Sep 17 00:00:00 2001 From: tanjy Date: Tue, 25 Jan 2022 16:43:09 +0800 Subject: [PATCH 05/24] vsp change disk status --- delfin/drivers/hitachi/vsp/rest_handler.py | 34 +++++++++++++++++++ delfin/drivers/hitachi/vsp/vsp_stor.py | 14 +++++--- .../hitachi/vsp/test_hitachi_vspstor.py | 4 +-- 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index 55e34fc0b..ff5a465ea 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -261,3 +261,37 @@ def get_alerts(self, param, start, end): param, start, end) result_json = self.get_rest_info(url) return result_json + + def get_all_host_groups(self): + url = '%s/%s/host-groups' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_specific_host_group(self, group_id): + url = '%s/%s/host-groups/%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, group_id) + result_json = self.get_rest_info(url) + return result_json + + def get_host_wwn(self, port_id, group_number): + url = '%s/%s/host-wwns?portId=%s&hostGroupNumber=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json + + def get_iscsi_name(self, port_id, group_number): + url = '%s/%s/host-iscsis?portId=%s&hostGroupNumber=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json + + def get_lun_path(self, port_id, group_number): + url = '%s/%s/luns?portId=%s&hostGroupNumber=%s&' \ + 'isBasicLunInformation=true' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index d4d47d5f0..82bcdfb92 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -370,10 +370,16 @@ def list_disks(self, context): for disk in disk_entries: status = HitachiVspDriver.DISK_STATUS_TYPE.get( disk.get('status'), constants.DiskStatus.NORMAL) - physical_type = \ - HitachiVspDriver.DISK_PHYSICAL_TYPE_MAP.get( - disk.get('driveTypeName'), - constants.DiskPhysicalType.UNKNOWN) + if disk.get('driveTypeName'): + type_name = 'SSD' if 'SSD' in \ + disk.get('driveTypeName').upper()\ + else disk.get('driveTypeName') + physical_type = \ + HitachiVspDriver.DISK_PHYSICAL_TYPE_MAP.get( + type_name, + constants.DiskPhysicalType.UNKNOWN) + else: + physical_type = constants.DiskPhysicalType.UNKNOWN logical_type = HitachiVspDriver.DISK_LOGIC_TYPE_MAP.get( disk.get('usageType'), constants.DiskLogicalType.UNKNOWN) diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py index dc1f58ce5..e235501ac 100644 --- a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -146,7 +146,7 @@ def __init__(self): "data": [ { "driveLocationId": "0-0", - "driveTypeName": "SAS", + "driveTypeName": "SSD(FMC)", "driveSpeed": 10000, "totalCapacity": 600, "driveType": "DKR5D-J600SS", @@ -328,7 +328,7 @@ def __init__(self): 'speed': 10000, 'capacity': 644245094400, 'status': 'normal', - 'physical_type': 'sas', + 'physical_type': 'ssd', 'logical_type': 'member', 'native_disk_group_id': '1-6', 'location': '0-0' From 183d0be9bb44546ea1b856b14880b53031b86411 Mon Sep 17 00:00:00 2001 From: tanjy Date: Thu, 10 Feb 2022 16:24:48 +0800 Subject: [PATCH 06/24] vsp change disk status --- delfin/drivers/hitachi/vsp/rest_handler.py | 34 ---------------------- 1 file changed, 34 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index ff5a465ea..55e34fc0b 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -261,37 +261,3 @@ def get_alerts(self, param, start, end): param, start, end) result_json = self.get_rest_info(url) return result_json - - def get_all_host_groups(self): - url = '%s/%s/host-groups' % \ - (RestHandler.COMM_URL, self.storage_device_id) - result_json = self.get_rest_info(url) - return result_json - - def get_specific_host_group(self, group_id): - url = '%s/%s/host-groups/%s' % \ - (RestHandler.COMM_URL, self.storage_device_id, group_id) - result_json = self.get_rest_info(url) - return result_json - - def get_host_wwn(self, port_id, group_number): - url = '%s/%s/host-wwns?portId=%s&hostGroupNumber=%s' % \ - (RestHandler.COMM_URL, self.storage_device_id, port_id, - group_number) - result_json = self.get_rest_info(url) - return result_json - - def get_iscsi_name(self, port_id, group_number): - url = '%s/%s/host-iscsis?portId=%s&hostGroupNumber=%s' % \ - (RestHandler.COMM_URL, self.storage_device_id, port_id, - group_number) - result_json = self.get_rest_info(url) - return result_json - - def get_lun_path(self, port_id, group_number): - url = '%s/%s/luns?portId=%s&hostGroupNumber=%s&' \ - 'isBasicLunInformation=true' % \ - (RestHandler.COMM_URL, self.storage_device_id, port_id, - group_number) - result_json = self.get_rest_info(url) - return result_json From cf6bb3b82e58d8f4cfe83a87a4fb4a04a30a4a55 Mon Sep 17 00:00:00 2001 From: qinwang-murphy <59107077+qinwang-murphy@users.noreply.github.com> Date: Fri, 18 Feb 2022 17:45:33 +0800 Subject: [PATCH 07/24] Adding host oss and dependency package. (#825) Co-authored-by: Erik --- delfin/common/constants.py | 4 +++- requirements.txt | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 64079316a..02d852c6d 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -262,10 +262,12 @@ class HostOSTypes(object): WINDOWS_SERVER_2012 = 'Windows Server 2012' ORACLE_VM = 'Oracle VM' OPEN_VMS = 'Open VMS' + MAC_OS = 'Mac OS' UNKNOWN = 'Unknown' ALL = (LINUX, WINDOWS, SOLARIS, HP_UX, AIX, XEN_SERVER, VMWARE_ESX, - LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, UNKNOWN) + LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, MAC_OS, + UNKNOWN) class InitiatorStatus(object): diff --git a/requirements.txt b/requirements.txt index 957a01ff6..003d402b0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,3 +43,4 @@ importlib-metadata==3.7.0; python_version < "3.8" tenacity==6.3.1 tzlocal<3.0 scp>=0.13.0 +defusedxml==0.6.0 From 0f352bba76fa141ed38db1fd056a61aed3cfca50 Mon Sep 17 00:00:00 2001 From: JiuyunZhao <46752751+JiuyunZhao@users.noreply.github.com> Date: Mon, 21 Feb 2022 19:11:08 +0800 Subject: [PATCH 08/24] Add an enumerated value "degraded" for disk status. (#829) Co-authored-by: enxichen --- delfin/common/constants.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 02d852c6d..7483242e2 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -144,9 +144,10 @@ class PortLogicalType(object): class DiskStatus(object): NORMAL = 'normal' ABNORMAL = 'abnormal' + DEGRADED = 'degraded' OFFLINE = 'offline' - ALL = (NORMAL, ABNORMAL, OFFLINE) + ALL = (NORMAL, ABNORMAL, DEGRADED, OFFLINE) class DiskPhysicalType(object): From 7037a59ba3043b4ee0172103df03f0fba98f43b8 Mon Sep 17 00:00:00 2001 From: JiuyunZhao <46752751+JiuyunZhao@users.noreply.github.com> Date: Thu, 24 Feb 2022 10:04:25 +0800 Subject: [PATCH 09/24] Add a performance metric "ioSize" of filesystem. (#834) --- delfin/api/schemas/storage_capabilities_schema.py | 9 +++++++++ delfin/drivers/driver.py | 4 +++- delfin/drivers/fake_storage/__init__.py | 7 ++++++- delfin/tests/unit/api/fakes.py | 7 ++++++- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/delfin/api/schemas/storage_capabilities_schema.py b/delfin/api/schemas/storage_capabilities_schema.py index 7c7f68e9f..f99ff1472 100644 --- a/delfin/api/schemas/storage_capabilities_schema.py +++ b/delfin/api/schemas/storage_capabilities_schema.py @@ -565,6 +565,15 @@ 'maxLength': 255} }, }, + 'ioSize': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["KB"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, 'readIoSize': { 'type': 'object', 'properties': { diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index 8df9ea381..580ee88f7 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -176,7 +176,9 @@ def list_shares(self, context): @staticmethod def get_capabilities(context, filters=None): - """Get capability of driver""" + """Get capability of driver, please refer to + STORAGE_CAPABILITIES_SCHEMA + in api/schemas/storage_capabilities_schema.py.""" pass def list_storage_host_initiators(self, context): diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index d94ba77ee..fd2a3a179 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -840,6 +840,11 @@ def get_capabilities(context, filters=None): "unit": "IOPS", "description": "Write operations per second" }, + "ioSize": { + "unit": "KB", + "description": "The average size of IO requests " + "in KB." + }, "readIoSize": { "unit": "KB", "description": "The average size of read IO requests " @@ -847,7 +852,7 @@ def get_capabilities(context, filters=None): }, "writeIoSize": { "unit": "KB", - "description": "The average size of read IO requests" + "description": "The average size of write IO requests" " in KB." }, }, diff --git a/delfin/tests/unit/api/fakes.py b/delfin/tests/unit/api/fakes.py index 6f3ce224b..d61e62a45 100644 --- a/delfin/tests/unit/api/fakes.py +++ b/delfin/tests/unit/api/fakes.py @@ -741,6 +741,11 @@ def fake_get_capabilities(context, storage_id): "unit": "IOPS", "description": "Write operations per second" }, + "ioSize": { + "unit": "KB", + "description": "The average size of IO requests " + "in KB." + }, "readIoSize": { "unit": "KB", "description": "The average size of read IO requests " @@ -748,7 +753,7 @@ def fake_get_capabilities(context, storage_id): }, "writeIoSize": { "unit": "KB", - "description": "The average size of read IO requests" + "description": "The average size of write IO requests" " in KB." }, }, From 92b31fa5d3a11916c2399356139efc19fde1f825 Mon Sep 17 00:00:00 2001 From: Joseph Vazhappilly Date: Wed, 12 Jan 2022 15:08:48 +0530 Subject: [PATCH 10/24] Add hostmapping support to VMAX driver --- delfin/drivers/dell_emc/vmax/client.py | 204 +++++++ delfin/drivers/dell_emc/vmax/rest.py | 197 ++++++ delfin/drivers/dell_emc/vmax/vmax.py | 18 + .../unit/drivers/dell_emc/vmax/test_vmax.py | 564 +++++++++++++++++- 4 files changed, 982 insertions(+), 1 deletion(-) diff --git a/delfin/drivers/dell_emc/vmax/client.py b/delfin/drivers/dell_emc/vmax/client.py index b5635e1cf..7b45c4eae 100644 --- a/delfin/drivers/dell_emc/vmax/client.py +++ b/delfin/drivers/dell_emc/vmax/client.py @@ -427,6 +427,210 @@ def list_disks(self, storage_id): LOG.error("Failed to get disk details from VMAX") raise + def list_storage_host_initiators(self, storage_id): + try: + # Get list of initiators + initiators = self.rest.get_initiator_list(self.array_id, + self.uni_version) + + initiator_list = [] + for initiator in initiators: + initiator_info = self.rest.get_initiator( + self.array_id, self.uni_version, initiator) + type_string = initiator_info.get('type', '').upper() + initiator_type = constants.InitiatorType.UNKNOWN + if 'FIBRE' in type_string: + initiator_type = constants.InitiatorType.FC + if 'ISCSI' in type_string: + initiator_type = constants.InitiatorType.ISCSI + + initiator_status = constants.InitiatorStatus.ONLINE + if not initiator_info.get('on_fabric', False): + initiator_status = constants.InitiatorStatus.OFFLINE + + initiator_item = { + 'name': initiator, + 'storage_id': storage_id, + 'native_storage_host_initiator_id': initiator, + 'alias': initiator_info.get('alias'), + 'wwn': initiator_info.get('initiatorId'), + 'type': initiator_type, + 'status': initiator_status, + 'native_storage_host_id': initiator_info.get('host'), + } + initiator_list.append(initiator_item) + return initiator_list + + except Exception: + LOG.error("Failed to get host initiator details from VMAX") + raise + + def list_storage_hosts(self, storage_id): + try: + # Get list of storage hosts + hosts = self.rest.get_host_list(self.array_id, + self.uni_version) + host_list = [] + for host in hosts: + host_info = self.rest.get_host( + self.array_id, self.uni_version, host) + + host_item = { + 'storage_id': storage_id, + 'native_storage_host_id': host_info.get('hostId'), + 'name': host_info.get('hostId'), + 'os_type': constants.HostOSTypes.UNKNOWN, + 'status': constants.HostStatus.NORMAL, + } + host_list.append(host_item) + return host_list + + except Exception: + LOG.error("Failed to get storage host details from VMAX") + raise + + def list_storage_host_groups(self, storage_id): + try: + # Get list of storage host groups + host_groups = self.rest.get_host_group_list(self.array_id, + self.uni_version) + host_group_list = [] + storage_host_grp_relation_list = [] + for host_group in host_groups: + host_group_info = self.rest.get_host_group( + self.array_id, self.uni_version, host_group) + host_group_item = { + 'name': host_group, + 'storage_id': storage_id, + 'native_storage_host_group_id': host_group, + } + host_group_list.append(host_group_item) + + for storage_host in host_group_info['host']: + storage_host_group_relation = { + 'storage_id': storage_id, + 'native_storage_host_group_id': host_group, + 'native_storage_host_id': storage_host.get('hostId') + } + storage_host_grp_relation_list \ + .append(storage_host_group_relation) + + result = { + 'storage_host_groups': host_group_list, + 'storage_host_grp_host_rels': storage_host_grp_relation_list + } + + return result + + except Exception: + LOG.error("Failed to get storage host group details from VMAX") + raise + + def list_port_groups(self, storage_id): + try: + # Get list of port groups + port_groups = self.rest.get_port_group_list(self.array_id, + self.uni_version) + port_group_list = [] + port_group_relation_list = [] + for port_group in port_groups: + port_group_info = self.rest.get_port_group( + self.array_id, self.uni_version, port_group) + port_group_item = { + 'name': port_group, + 'storage_id': storage_id, + 'native_port_group_id': port_group, + } + port_group_list.append(port_group_item) + + for port in port_group_info['symmetrixPortKey']: + port_name = port['directorId'] + ':' + port['portId'] + port_group_relation = { + 'storage_id': storage_id, + 'native_port_group_id': port_group, + 'native_port_id': port_name + } + port_group_relation_list.append(port_group_relation) + result = { + 'port_groups': port_group_list, + 'port_grp_port_rels': port_group_relation_list + } + return result + + except Exception: + LOG.error("Failed to get port group details from VMAX") + raise + + def list_volume_groups(self, storage_id): + try: + # Get list of volume groups + volume_groups = self.rest.get_volume_group_list(self.array_id, + self.uni_version) + volume_group_list = [] + volume_group_relation_list = [] + for volume_group in volume_groups: + # volume_group_info = self.rest.get_volume_group( + # self.array_id, self.uni_version, volume_group) + + volume_group_item = { + 'name': volume_group, + 'storage_id': storage_id, + 'native_volume_group_id': volume_group, + } + volume_group_list.append(volume_group_item) + + # List all volumes except data volumes + volumes = self.rest.get_volume_list( + self.array_id, version=self.uni_version, + params={'data_volume': 'false', + 'storageGroupId': volume_group}) + if not volumes: + continue + for volume in volumes: + volume_group_relation = { + 'storage_id': storage_id, + 'native_volume_group_id': volume_group, + 'native_volume_id': volume + } + volume_group_relation_list.append(volume_group_relation) + + result = { + 'volume_groups': volume_group_list, + 'vol_grp_vol_rels': volume_group_relation_list + } + return result + + except Exception: + LOG.error("Failed to get volume group details from VMAX") + raise + + def list_masking_views(self, storage_id): + try: + # Get list of masking_views + masking_views = self.rest.get_masking_view_list(self.array_id, + self.uni_version) + masking_view_list = [] + for masking_view in masking_views: + mv_info = self.rest.get_masking_view( + self.array_id, self.uni_version, masking_view) + + masking_view_item = { + 'name': masking_view, + 'storage_id': storage_id, + 'native_masking_view_id': mv_info['maskingViewId'], + 'native_storage_host_id': mv_info.get('hostId'), + 'native_storage_host_group_id': mv_info.get( + 'hostGroupId'), + 'native_volume_group_id': mv_info.get('storageGroupId'), + 'native_port_group_id': mv_info.get('portGroupId'), + } + masking_view_list.append(masking_view_item) + return masking_view_list + + except Exception: + LOG.error("Failed to get masking views details from VMAX") + raise + def list_alerts(self, query_para): """Get all alerts from an array.""" return self.rest.get_alerts(query_para, version=self.uni_version, diff --git a/delfin/drivers/dell_emc/vmax/rest.py b/delfin/drivers/dell_emc/vmax/rest.py index b7f27d1ed..a1e92cd1d 100644 --- a/delfin/drivers/dell_emc/vmax/rest.py +++ b/delfin/drivers/dell_emc/vmax/rest.py @@ -733,6 +733,203 @@ def get_disk_list(self, array, version, params=None): array, SYSTEM, 'disk', version=version, params=params) return disk_dict_list.get('disk_ids', []) + def get_initiator(self, array, version, initiator_id): + """Get a VMax initiator from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param initiator_id: the initiator id + :returns: initiator dict + :raises: StorageHostInitiatorNotFound + """ + initiator_dict = self.get_resource( + array, SLOPROVISIONING, 'initiator', resource_name=initiator_id, + version=version) + if not initiator_dict: + exception_message = (_("Initiator %(initiator_id)s not found.") + % {'initiator_id': initiator_id}) + LOG.error(exception_message) + raise exception.StorageHostInitiatorNotFound(initiator_id) + return initiator_dict + + def get_initiator_list(self, array, version, params=None): + """Get a filtered list of VMax initiators from array. + Filter parameters are required as the unfiltered initiator list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: initiatorId -- list + """ + initiator_dict_list = self.get_resource( + array, SLOPROVISIONING, 'initiator', + version=version, params=params) + return initiator_dict_list.get('initiatorId', []) + + def get_host(self, array, version, host_id): + """Get a VMax host from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param host_id: the host id + :returns: host dict + :raises: StorageHostNotFound + """ + host_dict = self.get_resource( + array, SLOPROVISIONING, 'host', + resource_name=host_id, + version=version) + if not host_dict: + exception_message = (_("Host %(host_id)s not found.") + % {'host_id': host_id}) + LOG.error(exception_message) + raise exception.StorageHostNotFound(host_id) + return host_dict + + def get_host_list(self, array, version, params=None): + """Get a filtered list of VMax hosts from array. + Filter parameters are required as the unfiltered host list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: hostId -- list + """ + host_dict_list = self.get_resource( + array, SLOPROVISIONING, 'host', + version=version, params=params) + return host_dict_list.get('hostId', []) + + def get_host_group(self, array, version, host_group_id): + """Get a VMax host group from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param host_group_id: the host group id + :returns: host group dict + :raises: StorageHostGroupNotFound + """ + host_group_dict = self.get_resource( + array, SLOPROVISIONING, 'hostgroup', + resource_name=host_group_id, + version=version) + if not host_group_dict: + exception_message = (_("HostGroup %(host_group_id)s not found.") + % {'host_group_id': host_group_id}) + LOG.error(exception_message) + raise exception.StorageHostGroupNotFound(host_group_id) + return host_group_dict + + def get_host_group_list(self, array, version, params=None): + """Get a filtered list of VMax host groups from array. + Filter parameters are required as the unfiltered host list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: hostGroupId -- list + """ + host_group_dict_list = self.get_resource( + array, SLOPROVISIONING, 'hostgroup', + version=version, params=params) + return host_group_dict_list.get('hostGroupId', []) + + def get_port_group(self, array, version, port_group_id): + """Get a VMax port group from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param port_group_id: the port group id + :returns: port group dict + :raises: PortGroupNotFound + """ + port_group_dict = self.get_resource( + array, SLOPROVISIONING, 'portgroup', + resource_name=port_group_id, + version=version) + if not port_group_dict: + exception_message = (_("PortGroup %(port_group_id)s not found.") + % {'port_group_id': port_group_id}) + LOG.error(exception_message) + raise exception.PortGroupNotFound(port_group_id) + return port_group_dict + + def get_port_group_list(self, array, version, params=None): + """Get a filtered list of VMax port groups from array. + Filter parameters are required as the unfiltered host list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: portGroupId -- list + """ + port_group_dict_list = self.get_resource( + array, SLOPROVISIONING, 'portgroup', + version=version, params=params) + return port_group_dict_list.get('portGroupId', []) + + def get_volume_group(self, array, version, storage_group_id): + """Get a VMax storage/volume group from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param storage_group_id: the storage group id + :returns: volume group dict + :raises: VolumeGroupNotFound + """ + storage_group_dict = self.get_resource( + array, SLOPROVISIONING, 'storagegroup', + resource_name=storage_group_id, + version=version) + if not storage_group_dict: + exception_message = (_("StorageGroup %(sid)s not found.") + % {'id': storage_group_id}) + LOG.error(exception_message) + raise exception.VolumeGroupNotFound(storage_group_id) + return storage_group_dict + + def get_volume_group_list(self, array, version, params=None): + """Get a filtered list of VMax storage groups from array. + Filter parameters are required as the unfiltered host list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: storageGroupId -- list + """ + storage_group_dict_list = self.get_resource( + array, SLOPROVISIONING, 'storagegroup', + version=version, params=params) + return storage_group_dict_list.get('storageGroupId', []) + + def get_masking_view(self, array, version, masking_view_id): + """Get a VMax masking view from array. + :param array: the array serial number + :param version: the unisphere version -- int + :param masking_view_id: the masking view id + :returns: masking view dict + :raises: MaskingViewNotFound + """ + masking_view_dict = self.get_resource( + array, SLOPROVISIONING, 'maskingview', + resource_name=masking_view_id, + version=version) + if not masking_view_dict: + exception_message = (_("Masking View %(id)s not found.") + % {'id': masking_view_id}) + LOG.error(exception_message) + raise exception.MaskingViewNotFound(masking_view_id) + return masking_view_dict + + def get_masking_view_list(self, array, version, params=None): + """Get a filtered list of VMax masking views from array. + Filter parameters are required as the unfiltered initiator list + could bevery large and could affect performance if called often. + :param array: the array serial number + :param version: the unisphere version + :param params: filter parameters + :returns: maskingViewId -- list + """ + masking_view_dict_list = self.get_resource( + array, SLOPROVISIONING, 'maskingview', + version=version, params=params) + return masking_view_dict_list.get('maskingViewId', []) + def post_request(self, target_uri, payload): """Generate a POST request. :param target_uri: the uri to query from unipshere REST API diff --git a/delfin/drivers/dell_emc/vmax/vmax.py b/delfin/drivers/dell_emc/vmax/vmax.py index f3cc7161b..c92f718cc 100644 --- a/delfin/drivers/dell_emc/vmax/vmax.py +++ b/delfin/drivers/dell_emc/vmax/vmax.py @@ -83,6 +83,24 @@ def list_ports(self, context): def list_disks(self, context): return self.client.list_disks(self.storage_id) + def list_storage_host_initiators(self, context): + return self.client.list_storage_host_initiators(self.storage_id) + + def list_storage_hosts(self, context): + return self.client.list_storage_hosts(self.storage_id) + + def list_storage_host_groups(self, context): + return self.client.list_storage_host_groups(self.storage_id) + + def list_port_groups(self, context): + return self.client.list_port_groups(self.storage_id) + + def list_volume_groups(self, context): + return self.client.list_volume_groups(self.storage_id) + + def list_masking_views(self, context): + return self.client.list_masking_views(self.storage_id) + def add_trap_config(self, context, trap_config): pass diff --git a/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py b/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py index 066810565..553cf4d21 100644 --- a/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py +++ b/delfin/tests/unit/drivers/dell_emc/vmax/test_vmax.py @@ -624,7 +624,6 @@ def test_list_disks(self, mock_unisphere_version, self.assertEqual(driver.storage_id, "12345") self.assertEqual(driver.client.array_id, "00112233") ret = driver.list_disks(context) - print("return", ret) self.assertDictEqual(ret[0], expected[0]) self.assertDictEqual(ret[1], expected[1]) @@ -644,6 +643,569 @@ def test_list_disks(self, mock_unisphere_version, self.assertIn('Exception from Storage Backend', str(exc.exception)) + @mock.patch.object(VMaxRest, 'get_initiator') + @mock.patch.object(VMaxRest, 'get_initiator_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_storage_host_initiators(self, mock_unisphere_version, + mock_version, mock_array, + mock_initiators, mock_initiator): + expected = \ + [ + { + 'name': '1001', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '1001', + 'alias': 'I1', + 'wwn': '1001', + 'type': 'fc', + 'status': 'online', + 'native_storage_host_id': 'host1', + }, + { + 'name': '1002', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '1002', + 'alias': 'I2', + 'wwn': '1002', + 'type': 'iscsi', + 'status': 'offline', + 'native_storage_host_id': 'host2', + }, + { + 'name': '1003', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '1003', + 'alias': 'I3', + 'wwn': '1003', + 'type': 'fc', + 'status': 'offline', + 'native_storage_host_id': 'host3', + } + ] + init_1 = { + 'initiatorId': '1001', + 'wwn': '1001', + 'alias': 'I1', + 'host': 'host1', + 'on_fabric': True, + 'type': 'FIBRE' + } + init_2 = { + 'initiatorId': '1002', + 'wwn': '1002', + 'alias': 'I2', + 'host': 'host2', + 'type': 'ISCSI' + } + init_3 = { + 'initiatorId': '1003', + 'wwn': '1003', + 'alias': 'I3', + 'host': 'host3', + 'type': 'FIBRE' + } + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_initiators.side_effect = [['1001', '1002', '1003']] + mock_initiator.side_effect = [init_1, init_2, init_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_storage_host_initiators(context) + self.assertDictEqual(ret[0], expected[0]) + self.assertDictEqual(ret[1], expected[1]) + self.assertDictEqual(ret[2], expected[2]) + + mock_initiators.side_effect = [['1001']] + mock_initiator.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_storage_host_initiators(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_initiators.side_effect = [exception.StorageBackendException] + mock_initiator.side_effect = [init_1] + with self.assertRaises(Exception) as exc: + driver.list_storage_host_initiators(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + @mock.patch.object(VMaxRest, 'get_host') + @mock.patch.object(VMaxRest, 'get_host_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_storage_hosts(self, mock_unisphere_version, + mock_version, mock_array, + mock_hosts, mock_host): + expected = \ + [ + { + 'storage_id': '12345', + 'name': 'h1', + 'native_storage_host_id': 'h1', + 'os_type': 'Unknown', + 'status': 'normal', + }, + { + 'storage_id': '12345', + 'name': 'h2', + 'native_storage_host_id': 'h2', + 'os_type': 'Unknown', + 'status': 'normal', + }, + { + 'storage_id': '12345', + 'name': 'h3', + 'native_storage_host_id': 'h3', + 'os_type': 'Unknown', + 'status': 'normal', + } + ] + host_1 = { + 'hostId': 'h1', + } + host_2 = { + 'hostId': 'h2', + } + host_3 = { + 'hostId': 'h3', + } + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_hosts.side_effect = [['h1', 'h2', 'h3']] + mock_host.side_effect = [host_1, host_2, host_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_storage_hosts(context) + self.assertDictEqual(ret[0], expected[0]) + self.assertDictEqual(ret[1], expected[1]) + self.assertDictEqual(ret[2], expected[2]) + + mock_hosts.side_effect = [['h1']] + mock_host.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_storage_hosts(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_hosts.side_effect = [exception.StorageBackendException] + mock_host.side_effect = [host_1] + with self.assertRaises(Exception) as exc: + driver.list_storage_hosts(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + @mock.patch.object(VMaxRest, 'get_host_group') + @mock.patch.object(VMaxRest, 'get_host_group_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_storage_host_groups(self, mock_unisphere_version, + mock_version, mock_array, + mock_host_groups, mock_host_group): + expected = \ + [ + { + 'name': 'hg1', + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg1', + }, + { + 'name': 'hg2', + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg2', + }, + { + 'name': 'hg3', + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg3', + } + ] + expected_rel = [ + { + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg1', + 'native_storage_host_id': 'h1', + }, + { + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg1', + 'native_storage_host_id': 'h2', + }, + { + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg2', + 'native_storage_host_id': 'h2', + }, + { + 'storage_id': '12345', + 'native_storage_host_group_id': 'hg3', + 'native_storage_host_id': 'h1', + }, + ] + hg_1 = { + 'hostGroupId': 'hg1', + 'host': [{'hostId': 'h1'}, {'hostId': 'h2'}], + } + hg_2 = { + 'hostGroupId': 'hg2', + 'host': [{'hostId': 'h2'}], + } + hg_3 = { + 'hostGroupId': 'hg3', + 'host': [{'hostId': 'h1'}], + } + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_host_groups.side_effect = [['hg1', 'hg2', 'hg3']] + mock_host_group.side_effect = [hg_1, hg_2, hg_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_storage_host_groups(context) + ret_hgs = ret['storage_host_groups'] + ret_hg_rels = ret['storage_host_grp_host_rels'] + self.assertDictEqual(ret_hgs[0], expected[0]) + self.assertDictEqual(ret_hgs[1], expected[1]) + self.assertDictEqual(ret_hgs[2], expected[2]) + self.assertDictEqual(ret_hg_rels[0], expected_rel[0]) + self.assertDictEqual(ret_hg_rels[1], expected_rel[1]) + self.assertDictEqual(ret_hg_rels[2], expected_rel[2]) + self.assertDictEqual(ret_hg_rels[3], expected_rel[3]) + + mock_host_groups.side_effect = [['hg1']] + mock_host_group.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_storage_host_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_host_groups.side_effect = [exception.StorageBackendException] + mock_host_group.side_effect = [hg_1] + with self.assertRaises(Exception) as exc: + driver.list_storage_host_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + @mock.patch.object(VMaxRest, 'get_port_group') + @mock.patch.object(VMaxRest, 'get_port_group_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_port_groups(self, mock_unisphere_version, + mock_version, mock_array, + mock_port_groups, mock_port_group): + expected = \ + [ + { + 'name': 'pg1', + 'storage_id': '12345', + 'native_port_group_id': 'pg1', + }, + { + 'name': 'pg2', + 'storage_id': '12345', + 'native_port_group_id': 'pg2', + }, + { + 'name': 'pg3', + 'storage_id': '12345', + 'native_port_group_id': 'pg3', + } + ] + expected_rel = [ + { + 'storage_id': '12345', + 'native_port_group_id': 'pg1', + 'native_port_id': 'FA-1D:1', + }, + { + 'storage_id': '12345', + 'native_port_group_id': 'pg1', + 'native_port_id': 'FA-1D:2', + }, + { + 'storage_id': '12345', + 'native_port_group_id': 'pg2', + 'native_port_id': 'FA-2D:2', + }, + { + 'storage_id': '12345', + 'native_port_group_id': 'pg3', + 'native_port_id': 'FA-3D:1', + }, + ] + pg_1 = { + 'hostGroupId': 'hg1', + 'symmetrixPortKey': [ + { + "directorId": "FA-1D", + "portId": "1" + }, + { + "directorId": "FA-1D", + "portId": "2" + } + ], + } + pg_2 = { + 'hostGroupId': 'hg2', + 'symmetrixPortKey': [ + { + "directorId": "FA-2D", + "portId": "2" + } + ], + } + pg_3 = { + 'hostGroupId': 'hg3', + 'symmetrixPortKey': [ + { + "directorId": "FA-3D", + "portId": "1" + }, + ], + } + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_port_groups.side_effect = [['pg1', 'pg2', 'pg3']] + mock_port_group.side_effect = [pg_1, pg_2, pg_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_port_groups(context) + ret_pgs = ret['port_groups'] + ret_pg_rels = ret['port_grp_port_rels'] + self.assertDictEqual(ret_pgs[0], expected[0]) + self.assertDictEqual(ret_pgs[1], expected[1]) + self.assertDictEqual(ret_pgs[2], expected[2]) + self.assertDictEqual(ret_pg_rels[0], expected_rel[0]) + self.assertDictEqual(ret_pg_rels[1], expected_rel[1]) + self.assertDictEqual(ret_pg_rels[2], expected_rel[2]) + self.assertDictEqual(ret_pg_rels[3], expected_rel[3]) + + mock_port_groups.side_effect = [['pg1']] + mock_port_group.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_port_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_port_groups.side_effect = [exception.StorageBackendException] + mock_port_group.side_effect = [pg_1] + with self.assertRaises(Exception) as exc: + driver.list_port_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + @mock.patch.object(VMaxRest, 'get_volume_list') + @mock.patch.object(VMaxRest, 'get_volume_group_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_volume_groups(self, mock_unisphere_version, + mock_version, mock_array, + mock_volume_groups, mock_volumes): + expected = \ + [ + { + 'name': 'vg1', + 'storage_id': '12345', + 'native_volume_group_id': 'vg1', + }, + { + 'name': 'vg2', + 'storage_id': '12345', + 'native_volume_group_id': 'vg2', + }, + { + 'name': 'vg3', + 'storage_id': '12345', + 'native_volume_group_id': 'vg3', + } + ] + expected_rel = [ + { + 'storage_id': '12345', + 'native_volume_group_id': 'vg1', + 'native_volume_id': 'volume1', + }, + { + 'storage_id': '12345', + 'native_volume_group_id': 'vg1', + 'native_volume_id': 'volume2', + }, + { + 'storage_id': '12345', + 'native_volume_group_id': 'vg2', + 'native_volume_id': 'volume2', + }, + { + 'storage_id': '12345', + 'native_volume_group_id': 'vg3', + 'native_volume_id': 'volume1', + }, + ] + v_1 = ['volume1', 'volume2'] + v_2 = ['volume2'] + v_3 = ['volume1'] + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_volume_groups.side_effect = [['vg1', 'vg2', 'vg3']] + mock_volumes.side_effect = [v_1, v_2, v_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_volume_groups(context) + ret_vgs = ret['volume_groups'] + ret_vg_rels = ret['vol_grp_vol_rels'] + self.assertDictEqual(ret_vgs[0], expected[0]) + self.assertDictEqual(ret_vgs[1], expected[1]) + self.assertDictEqual(ret_vgs[2], expected[2]) + self.assertDictEqual(ret_vg_rels[0], expected_rel[0]) + self.assertDictEqual(ret_vg_rels[1], expected_rel[1]) + self.assertDictEqual(ret_vg_rels[2], expected_rel[2]) + self.assertDictEqual(ret_vg_rels[3], expected_rel[3]) + + mock_volume_groups.side_effect = [['vg1']] + mock_volumes.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_volume_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_volume_groups.side_effect = [exception.StorageBackendException] + mock_volumes.side_effect = [v_1] + with self.assertRaises(Exception) as exc: + driver.list_volume_groups(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + @mock.patch.object(VMaxRest, 'get_masking_view') + @mock.patch.object(VMaxRest, 'get_masking_view_list') + @mock.patch.object(VMaxRest, 'get_array_detail') + @mock.patch.object(VMaxRest, 'get_uni_version') + @mock.patch.object(VMaxRest, 'get_unisphere_version') + def test_list_masking_views(self, mock_unisphere_version, + mock_version, mock_array, + mock_masking_views, mock_masking_view): + expected = \ + [ + { + 'storage_id': '12345', + 'native_storage_host_id': 'host1', + 'native_storage_host_group_id': 'hg1', + 'native_volume_group_id': 'sg1', + 'native_port_group_id': 'pg1', + 'native_masking_view_id': 'mv1', + 'name': 'mv1', + }, + { + 'storage_id': '12345', + 'native_storage_host_id': 'host2', + 'native_storage_host_group_id': 'hg2', + 'native_volume_group_id': 'sg2', + 'native_port_group_id': 'pg2', + 'native_masking_view_id': 'mv2', + 'name': 'mv2', + }, + { + 'storage_id': '12345', + 'native_storage_host_id': 'host3', + 'native_storage_host_group_id': 'hg3', + 'native_volume_group_id': 'sg3', + 'native_port_group_id': 'pg3', + 'native_masking_view_id': 'mv3', + 'name': 'mv3', + } + ] + mv_1 = { + 'maskingViewId': 'mv1', + 'hostId': 'host1', + 'hostGroupId': 'hg1', + 'storageGroupId': 'sg1', + 'portGroupId': 'pg1', + } + mv_2 = { + 'maskingViewId': 'mv2', + 'hostId': 'host2', + 'hostGroupId': 'hg2', + 'storageGroupId': 'sg2', + 'portGroupId': 'pg2', + } + mv_3 = { + 'maskingViewId': 'mv3', + 'hostId': 'host3', + 'hostGroupId': 'hg3', + 'storageGroupId': 'sg3', + 'portGroupId': 'pg3', + } + + kwargs = VMAX_STORAGE_CONF + mock_version.return_value = ['V9.2.2.7', '92'] + mock_unisphere_version.return_value = ['V9.2.2.7', '92'] + mock_array.return_value = {'symmetrixId': ['00112233']} + mock_masking_views.side_effect = [['mv1', 'mv2', 'mv3']] + mock_masking_view.side_effect = [mv_1, mv_2, mv_3] + + driver = VMAXStorageDriver(**kwargs) + self.assertEqual(driver.storage_id, "12345") + self.assertEqual(driver.client.array_id, "00112233") + ret = driver.list_masking_views(context) + self.assertDictEqual(ret[0], expected[0]) + self.assertDictEqual(ret[1], expected[1]) + self.assertDictEqual(ret[2], expected[2]) + + mock_masking_views.side_effect = [['mv1']] + mock_masking_view.side_effect = [exception.StorageBackendException] + with self.assertRaises(Exception) as exc: + driver.list_masking_views(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + + mock_masking_views.side_effect = [exception.StorageBackendException] + mock_masking_view.side_effect = [mv_1] + with self.assertRaises(Exception) as exc: + driver.list_masking_views(context) + + self.assertIn('Exception from Storage Backend', + str(exc.exception)) + @mock.patch.object(Session, 'request') @mock.patch.object(VMaxRest, 'get_array_detail') @mock.patch.object(VMaxRest, 'get_uni_version') From ae88bc2bf088a7ac61eb613ccc7c2ba64b65e80c Mon Sep 17 00:00:00 2001 From: Skm26 <72231348+Skm26@users.noreply.github.com> Date: Mon, 21 Mar 2022 16:20:43 +0530 Subject: [PATCH 11/24] Update README.md --- installer/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/installer/README.md b/installer/README.md index 5a84b3317..238d59eba 100644 --- a/installer/README.md +++ b/installer/README.md @@ -189,3 +189,5 @@ The logs can be uniquely identified based upon the timestamp. 4. [Optional] If prometheus is configured, monitor the performance metrics on prometheus server at default location http://localhost:9090/graph + +# Limitation From d14f5b30c179688c18f9395672faed374290abe9 Mon Sep 17 00:00:00 2001 From: Skm26 <72231348+Skm26@users.noreply.github.com> Date: Mon, 21 Mar 2022 16:22:48 +0530 Subject: [PATCH 12/24] Update README.md --- installer/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/installer/README.md b/installer/README.md index 238d59eba..d26f74387 100644 --- a/installer/README.md +++ b/installer/README.md @@ -191,3 +191,4 @@ The logs can be uniquely identified based upon the timestamp. http://localhost:9090/graph # Limitation +Local installation, unlike Ansible installer, does not support SODA Dashboard integration. From a391b5e18721debbed0d585ff271d11e2f8db0bb Mon Sep 17 00:00:00 2001 From: UtkarshShah0 <93548048+UtkarshShah0@users.noreply.github.com> Date: Thu, 24 Mar 2022 01:24:15 +0530 Subject: [PATCH 13/24] Updated quick-start link Readme fixed issue #838 sodacode2022 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4880ed3a6..84063ab09 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,11 @@ This is one of the SODA Core Projects and is maintained by SODA Foundation direc ## Quick Start - To Use/Experience -[https://docs.sodafoundation.io](https://docs.sodafoundation.io/) +[https://docs.sodafoundation.io/guides/user-guides/delfin](https://docs.sodafoundation.io/guides/user-guides/delfin/) ## Quick Start - To Develop -[https://docs.sodafoundation.io](https://docs.sodafoundation.io/) +[https://docs.sodafoundation.io/guides/developer-guides/delfin](https://docs.sodafoundation.io/guides/developer-guides/delfin/) ## Latest Releases From f3f5510be08af06b8d630108a1447b3b35db3395 Mon Sep 17 00:00:00 2001 From: UtkarshShah0 <93548048+UtkarshShah0@users.noreply.github.com> Date: Thu, 21 Apr 2022 22:36:47 +0530 Subject: [PATCH 14/24] Fixed issue #873 Fixed the return value in the function get_remote_file_to_xml --- delfin/drivers/utils/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/delfin/drivers/utils/tools.py b/delfin/drivers/utils/tools.py index d77bf4e12..93a172995 100644 --- a/delfin/drivers/utils/tools.py +++ b/delfin/drivers/utils/tools.py @@ -136,4 +136,4 @@ def get_remote_file_to_xml(ssh, file, local_path, remote_path): finally: if os.path.exists(local_file): Tools.remove_file_with_same_type(file, local_path) - return root_node + return root_node From 77fb689d4664e8f0e17e31940271375017a28ec8 Mon Sep 17 00:00:00 2001 From: Yash <93548927+code4Y@users.noreply.github.com> Date: Thu, 21 Apr 2022 23:08:03 +0530 Subject: [PATCH 15/24] Removed inappropriate operator (And) #872 And operator and duplicate host was removed --- delfin/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/delfin/test.py b/delfin/test.py index 39e6bc5b3..cac4802e9 100644 --- a/delfin/test.py +++ b/delfin/test.py @@ -153,7 +153,7 @@ def flags(self, **kw): CONF.set_override(k, v) def start_service(self, name, host=None, **kwargs): - host = host and host or uuidutils.generate_uuid() + host = host or uuidutils.generate_uuid() kwargs.setdefault('host', host) kwargs.setdefault('binary', 'delfin-%s' % name) svc = service.Service.create(**kwargs) From 2f3bccd14ddba89fd32c029d443f5f72de1ab6f5 Mon Sep 17 00:00:00 2001 From: nikita15p Date: Fri, 22 Apr 2022 11:06:57 +0530 Subject: [PATCH 16/24] fixed 871 Signed-off-by: nikita15p --- delfin/tests/unit/api/fakes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/delfin/tests/unit/api/fakes.py b/delfin/tests/unit/api/fakes.py index d61e62a45..00b928658 100644 --- a/delfin/tests/unit/api/fakes.py +++ b/delfin/tests/unit/api/fakes.py @@ -329,7 +329,7 @@ def fake_access_infos_show_all(context): return [access_info] -def fake_update_access_info(self, context, access_info): +def fake_update_access_info(self, context): access_info = models.AccessInfo() access_info.updated_at = '2020-06-15T09:50:31.698956' From ac3ec46b40c0e264953f6469dfa4e2c0b3c98eeb Mon Sep 17 00:00:00 2001 From: UtkarshShah0 <93548048+UtkarshShah0@users.noreply.github.com> Date: Fri, 22 Apr 2022 15:32:32 +0530 Subject: [PATCH 17/24] updated_post_install_verification Fixed issue #840 Added post install verification step --- installer/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/installer/README.md b/installer/README.md index d26f74387..2dc362620 100644 --- a/installer/README.md +++ b/installer/README.md @@ -131,6 +131,13 @@ $ installer/install Note: Multiple instances of exporter and api is not allowed currently. +#### Post install verification +After delfin installation use the following command to verify all process +of delfin are running. +```sh +ps -aux | grep delfin +``` + # Uninstall Running the uninstall script will stop all delfin processes and do cleanup ```sh From 5286cd7b42cd3b76f70e22b7e7b68972743d8657 Mon Sep 17 00:00:00 2001 From: Navaneetha167 <103172664+Navaneetha167@users.noreply.github.com> Date: Tue, 26 Apr 2022 19:31:07 +0530 Subject: [PATCH 18/24] the updated swagger file error is in snmp_config --- openapi-spec/swagger.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openapi-spec/swagger.yaml b/openapi-spec/swagger.yaml index 538ac832e..619e772ad 100644 --- a/openapi-spec/swagger.yaml +++ b/openapi-spec/swagger.yaml @@ -3539,7 +3539,7 @@ components: description: Response for all access infos configuration. type: object properties: - snmp_configs: + access_infos: type: array description: the list of access info items: From 5618992adf54cc3beef0dc0896564744a556539b Mon Sep 17 00:00:00 2001 From: Yash <93548927+code4Y@users.noreply.github.com> Date: Wed, 27 Apr 2022 10:57:10 +0530 Subject: [PATCH 19/24] Comment updated for Fakedriver - for issue #888 Added comments explaining why Fakedriver methods are empty : add_trap_config remove_trap_config parse_alert clear_alert --- delfin/drivers/fake_storage/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index fd2a3a179..db58d3e5f 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -422,17 +422,17 @@ def list_shares(self, ctx): return share_list def add_trap_config(self, context, trap_config): - pass + pass # Fakedriver do not require to add trap config def remove_trap_config(self, context, trap_config): - pass + pass # Fakedriver do not require to remove trap config @staticmethod def parse_alert(context, alert): - pass + pass # Fakedriver do not require to parse alert def clear_alert(self, context, alert): - pass + pass # Fakedriver do not require to clear alert def list_alerts(self, context, query_para=None): alert_list = [{ From a001e742d82125bc094cad7dcb9678645d5223f6 Mon Sep 17 00:00:00 2001 From: Yash <93548927+code4Y@users.noreply.github.com> Date: Wed, 27 Apr 2022 17:58:02 +0530 Subject: [PATCH 20/24] Added spaces before comments Added two spaces before comments --- delfin/drivers/fake_storage/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index db58d3e5f..fe43ec95c 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -422,17 +422,17 @@ def list_shares(self, ctx): return share_list def add_trap_config(self, context, trap_config): - pass # Fakedriver do not require to add trap config + pass # Fakedriver do not require to add trap config def remove_trap_config(self, context, trap_config): - pass # Fakedriver do not require to remove trap config + pass # Fakedriver do not require to remove trap config @staticmethod def parse_alert(context, alert): - pass # Fakedriver do not require to parse alert + pass # Fakedriver do not require to parse alert def clear_alert(self, context, alert): - pass # Fakedriver do not require to clear alert + pass # Fakedriver do not require to clear alert def list_alerts(self, context, query_para=None): alert_list = [{ From e8066026ff278ce53ba89b750b7ccacb6beaf468 Mon Sep 17 00:00:00 2001 From: tanjiangyu-ghca <79631193+tanjiangyu-ghca@users.noreply.github.com> Date: Thu, 28 Apr 2022 15:57:18 +0800 Subject: [PATCH 21/24] ds8000 add host mapping (#822) --- delfin/drivers/ibm/ds8k/consts.py | 2 + delfin/drivers/ibm/ds8k/ds8k.py | 86 ++++++++++++++++- .../drivers/ibm/ibm_ds8k/test_ibm_ds8k.py | 96 +++++++++++++++++++ 3 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 delfin/drivers/ibm/ds8k/consts.py diff --git a/delfin/drivers/ibm/ds8k/consts.py b/delfin/drivers/ibm/ds8k/consts.py new file mode 100644 index 000000000..d7d907371 --- /dev/null +++ b/delfin/drivers/ibm/ds8k/consts.py @@ -0,0 +1,2 @@ +HOST_PORT_URL = '/api/v1/host_ports' +HOST_URL = '/api/v1/hosts' diff --git a/delfin/drivers/ibm/ds8k/ds8k.py b/delfin/drivers/ibm/ds8k/ds8k.py index 41b25fe09..550109626 100644 --- a/delfin/drivers/ibm/ds8k/ds8k.py +++ b/delfin/drivers/ibm/ds8k/ds8k.py @@ -18,7 +18,7 @@ from delfin import exception from delfin.common import constants from delfin.drivers import driver -from delfin.drivers.ibm.ds8k import rest_handler, alert_handler +from delfin.drivers.ibm.ds8k import rest_handler, alert_handler, consts LOG = log.getLogger(__name__) @@ -35,6 +35,10 @@ class DS8KDriver(driver.StorageDriver): 'fenced': constants.PortHealthStatus.UNKNOWN, 'quiescing': constants.PortHealthStatus.UNKNOWN } + INITIATOR_STATUS_MAP = {'logged in': constants.InitiatorStatus.ONLINE, + 'logged out': constants.InitiatorStatus.OFFLINE, + 'unconfigured': constants.InitiatorStatus.UNKNOWN + } def __init__(self, **kwargs): super().__init__(**kwargs) @@ -259,3 +263,83 @@ def clear_alert(self, context, alert): @staticmethod def get_access_url(): return 'https://{ip}:{port}' + + def list_storage_hosts(self, context): + try: + host_list = [] + hosts = self.rest_handler.get_rest_info(consts.HOST_URL) + if not hosts: + return host_list + host_entries = hosts.get('data', {}).get('hosts', []) + for host in host_entries: + status = constants.HostStatus.NORMAL if \ + host.get('state') == 'online' else \ + constants.HostStatus.OFFLINE + os_type = constants.HostOSTypes.VMWARE_ESX if \ + host.get('hosttype') == 'VMware' else \ + constants.HostOSTypes.UNKNOWN + host_result = { + "name": host.get('name'), + "storage_id": self.storage_id, + "native_storage_host_id": host.get('name'), + "os_type": os_type, + "status": status + } + host_list.append(host_result) + return host_list + except Exception as e: + LOG.error("Failed to get hosts from ds8k") + raise e + + def list_masking_views(self, context): + try: + view_list = [] + hosts = self.rest_handler.get_rest_info(consts.HOST_URL) + if not hosts: + return view_list + host_entries = hosts.get('data', {}).get('hosts', []) + for host in host_entries: + view_url = '%s/%s/mappings' % (consts.HOST_URL, + host.get('name')) + views = self.rest_handler.get_rest_info(view_url) + if not views: + continue + view_entries = views.get('data', {}).get('mappings', []) + for view in view_entries: + view_id = '%s_%s' % (view.get('lunid'), host.get('name')) + view_result = { + "name": view_id, + "native_storage_host_id": host.get('name'), + "storage_id": self.storage_id, + "native_volume_id": view.get('volume', {}).get('id'), + "native_masking_view_id": view_id, + } + view_list.append(view_result) + return view_list + except Exception as e: + LOG.error("Failed to get views from ds8k") + raise e + + def list_storage_host_initiators(self, context): + try: + initiator_list = [] + host_ports = self.rest_handler.get_rest_info(consts.HOST_PORT_URL) + if not host_ports: + return initiator_list + port_entries = host_ports.get('data', {}).get('host_ports', []) + for port in port_entries: + status = DS8KDriver.INITIATOR_STATUS_MAP.get(port.get('state')) + init_result = { + "name": port.get('wwpn'), + "storage_id": self.storage_id, + "native_storage_host_initiator_id": port.get('wwpn'), + "wwn": port.get('wwpn'), + "status": status, + "type": constants.InitiatorType.UNKNOWN, + "native_storage_host_id": port.get('host', {}).get('name') + } + initiator_list.append(init_result) + return initiator_list + except Exception as e: + LOG.error("Failed to get initiators from ds8k") + raise e diff --git a/delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py b/delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py index e3dcd7936..145608c03 100644 --- a/delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py +++ b/delfin/tests/unit/drivers/ibm/ibm_ds8k/test_ibm_ds8k.py @@ -344,6 +344,80 @@ 'resource_type': 'Storage', 'location': 'eeeeeeeee' } +GET_INITORATORS = { + "data": { + "host_ports": + [ + { + "wwpn": "50050763030813A2", + "state": "logged in", + "hosttype": "VMware", + "addrdiscovery": "lunpolling", + "lbs": "512", + "host": { + "name": "myhost" + } + } + ] + } +} +INIT_RESULT = [ + { + 'name': '50050763030813A2', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '50050763030813A2', + 'wwn': '50050763030813A2', + 'status': 'online', + 'type': 'unknown', + 'native_storage_host_id': 'myhost' + } +] +GET_ALL_HOSTS = { + "data": { + "hosts": + [ + { + "name": "test_host", + "state": "online", + "hosttype": "VMware", + "addrmode": "SCSI mask", + "addrdiscovery": "lunpolling", + "lbs": "512" + } + ] + } +} +HOST_RESULT = [ + { + 'name': 'test_host', + 'storage_id': '12345', + 'native_storage_host_id': 'test_host', + 'os_type': 'VMware ESX', + 'status': 'normal' + } +] +GET_HOST_MAPPING = { + "data": { + "mappings": + [ + { + "lunid": "00", + "volume": { + "id": "0005" + } + } + ] + } +} +VIEW_RESULT = [ + { + 'name': '00_test_host', + 'native_storage_host_id': 'test_host', + 'storage_id': '12345', + 'native_volume_id': '0005', + 'native_masking_view_id': '00_test_host' + } +] class TestDS8KDriver(TestCase): @@ -401,3 +475,25 @@ def test_list_list_controllers(self, mock_contrl): mock_contrl.return_value = GET_ALL_CONTROLLERS controller = DS8KDriver(**ACCESS_INFO).list_controllers(context) self.assertEqual(controller, contrl_result) + + @mock.patch.object(RestHandler, 'get_rest_info') + def test_host_initiators(self, mock_init): + RestHandler.login = mock.Mock(return_value=None) + mock_init.return_value = GET_INITORATORS + initiators = DS8KDriver( + **ACCESS_INFO).list_storage_host_initiators(context) + self.assertEqual(initiators, INIT_RESULT) + + @mock.patch.object(RestHandler, 'get_rest_info') + def test_hosts(self, mock_host): + RestHandler.login = mock.Mock(return_value=None) + mock_host.return_value = GET_ALL_HOSTS + hosts = DS8KDriver(**ACCESS_INFO).list_storage_hosts(context) + self.assertEqual(hosts, HOST_RESULT) + + @mock.patch.object(RestHandler, 'get_rest_info') + def test_masking_views(self, mock_view): + RestHandler.login = mock.Mock(return_value=None) + mock_view.side_effect = [GET_ALL_HOSTS, GET_HOST_MAPPING] + views = DS8KDriver(**ACCESS_INFO).list_masking_views(context) + self.assertEqual(views, VIEW_RESULT) From f5a82ab9928aa4db93fe8fab7504824ce0f48833 Mon Sep 17 00:00:00 2001 From: tanjiangyu-ghca <79631193+tanjiangyu-ghca@users.noreply.github.com> Date: Thu, 28 Apr 2022 16:18:44 +0800 Subject: [PATCH 22/24] vsp add host view mapping (#810) --- delfin/drivers/hitachi/vsp/consts.py | 2 +- delfin/drivers/hitachi/vsp/rest_handler.py | 42 ++- delfin/drivers/hitachi/vsp/vsp_stor.py | 267 ++++++++++++++++++ .../hitachi/vsp/test_hitachi_vspstor.py | 161 +++++++++++ 4 files changed, 467 insertions(+), 5 deletions(-) diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py index b8538606e..29bd9b98e 100644 --- a/delfin/drivers/hitachi/vsp/consts.py +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -SOCKET_TIMEOUT = 90 +SOCKET_TIMEOUT = 180 ERROR_SESSION_INVALID_CODE = 403 ERROR_SESSION_IS_BEING_USED_CODE = 409 BLOCK_SIZE = 512 diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py index 55e34fc0b..7d85fc397 100644 --- a/delfin/drivers/hitachi/vsp/rest_handler.py +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -177,14 +177,14 @@ def get_device_id(self): system_info = storage_systems.get('data') for system in system_info: succeed = True - if system.get('model') in consts.SUPPORTED_VSP_SERIES: - if system.get('ctl1Ip') == self.rest_host or \ - system.get('ctl2Ip') == self.rest_host: + if system.get('svpIp'): + if system.get('svpIp') == self.rest_host: self.storage_device_id = system.get('storageDeviceId') self.device_model = system.get('model') self.serial_number = system.get('serialNumber') break - elif system.get('svpIp') == self.rest_host: + elif system.get('ctl1Ip') == self.rest_host or \ + system.get('ctl2Ip') == self.rest_host: self.storage_device_id = system.get('storageDeviceId') self.device_model = system.get('model') self.serial_number = system.get('serialNumber') @@ -261,3 +261,37 @@ def get_alerts(self, param, start, end): param, start, end) result_json = self.get_rest_info(url) return result_json + + def get_all_host_groups(self): + url = '%s/%s/host-groups' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_specific_host_group(self, port_id): + url = '%s/%s/host-groups?portId=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id) + result_json = self.get_rest_info(url) + return result_json + + def get_host_wwn(self, port_id, group_number): + url = '%s/%s/host-wwns?portId=%s&hostGroupNumber=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json + + def get_iscsi_name(self, port_id, group_number): + url = '%s/%s/host-iscsis?portId=%s&hostGroupNumber=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json + + def get_lun_path(self, port_id, group_number): + url = '%s/%s/luns?portId=%s&hostGroupNumber=%s&' \ + 'isBasicLunInformation=true' % \ + (RestHandler.COMM_URL, self.storage_device_id, port_id, + group_number) + result_json = self.get_rest_info(url) + return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py index 82bcdfb92..87f4f9de3 100644 --- a/delfin/drivers/hitachi/vsp/vsp_stor.py +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -64,6 +64,18 @@ class HitachiVspDriver(driver.StorageDriver): "HNASS": constants.PortType.OTHER, "HNASU": constants.PortType.OTHER } + OS_TYPE_MAP = {"HP-UX": constants.HostOSTypes.HP_UX, + "SOLARIS": constants.HostOSTypes.SOLARIS, + "AIX": constants.HostOSTypes.AIX, + "WIN": constants.HostOSTypes.WINDOWS, + "LINUX/IRIX": constants.HostOSTypes.LINUX, + "TRU64": constants.HostOSTypes.UNKNOWN, + "OVMS": constants.HostOSTypes.OPEN_VMS, + "NETWARE": constants.HostOSTypes.UNKNOWN, + "VMWARE": constants.HostOSTypes.VMWARE_ESX, + "VMWARE_EX": constants.HostOSTypes.VMWARE_ESX, + "WIN_EX": constants.HostOSTypes.WINDOWS + } DISK_STATUS_TYPE = {"NML": constants.DiskStatus.NORMAL, "CPY": constants.DiskStatus.NORMAL, "CPI": constants.DiskStatus.NORMAL, @@ -501,3 +513,258 @@ def parse_alert(context, alert): def clear_alert(self, context, alert): pass + + @staticmethod + def handle_group_with_port(group_info): + group_list = {} + if not group_info: + return group_list + group_entries = group_info.get('data') + for group in group_entries: + if group_list.get(group.get('portId')): + group_list[group.get('portId')].append( + group.get('hostGroupNumber')) + else: + group_list[group.get('portId')] = [] + group_list[group.get('portId')].append( + group.get('hostGroupNumber')) + return group_list + + @staticmethod + def get_host_info(data, storage_id, host_list, type, os_type): + if data: + host_entries = data.get('data') + if not host_entries: + return True + for host in host_entries: + if type == 'iscsi': + host_id = host.get('hostIscsiId') + host_name = host.get('iscsiNickname') if \ + host.get('iscsiNickname') != '-' \ + else host.get('iscsiName') + else: + host_id = host.get('hostWwnId') + host_name = host.get('wwnNickname') if \ + host.get('wwnNickname') != '-' \ + else host.get('hostWwn') + host_result = { + "name": host_name, + "storage_id": storage_id, + "native_storage_host_id": host_id.replace(",", "_"), + "os_type": os_type, + "status": constants.HostStatus.NORMAL + } + host_list.append(host_result) + return True + + def list_storage_hosts(self, context): + try: + host_groups = self.rest_handler.get_all_host_groups() + host_list = [] + if not host_groups: + return host_list + group_with_port = HitachiVspDriver.handle_group_with_port( + host_groups) + for port in group_with_port: + kwargs = { + 'method': 'host', + 'port': port, + 'result': host_list + } + self.handle_san_info(**kwargs) + return host_list + except Exception as e: + LOG.error("Failed to get host from vsp") + raise e + + @staticmethod + def get_initiator_from_host(data, storage_id, initiator_list, type): + if data: + host_entries = data.get('data') + if not host_entries: + return True + for host in host_entries: + if type == 'iscsi': + initiator_id = host.get('hostIscsiId') + init_type = constants.InitiatorType.ISCSI + init_name = host.get('iscsiName') + else: + initiator_id = host.get('hostWwnId') + init_type = constants.InitiatorType.FC + init_name = host.get('hostWwn') + for initiator in initiator_list: + if initiator.get('wwn') == init_name: + continue + init_result = { + "name": init_name, + "storage_id": storage_id, + "native_storage_host_initiator_id": init_name, + "wwn": init_name, + "status": constants.InitiatorStatus.ONLINE, + "type": init_type, + "alias": host.get('portId'), + "native_storage_host_id": initiator_id.replace(",", "_") + } + initiator_list.append(init_result) + return True + + def list_storage_host_initiators(self, context): + try: + initiator_list = [] + host_groups = self.rest_handler.get_all_host_groups() + if not host_groups: + return initiator_list + group_with_port = HitachiVspDriver.handle_group_with_port( + host_groups) + for port in group_with_port: + kwargs = { + 'method': 'initator', + 'port': port, + 'result': initiator_list + } + self.handle_san_info(**kwargs) + return initiator_list + except Exception as e: + LOG.error("Failed to get initiators from vsp") + raise e + + @staticmethod + def get_host_ids(data, target, host_ids, host_grp_relation_list, + storage_id, group_id): + if data: + host_entries = data.get('data') + if not host_entries: + return True + for host in host_entries: + if host.get(target): + host_ids.append(host.get(target).replace(",", "_")) + relation = { + 'storage_id': storage_id, + 'native_storage_host_group_id': group_id, + 'native_storage_host_id': + host.get(target).replace(",", "_") + } + host_grp_relation_list.append(relation) + + def list_storage_host_groups(self, context): + try: + host_groups = self.rest_handler.get_all_host_groups() + host_group_list = [] + host_grp_relation_list = [] + if not host_groups: + return host_group_list + group_with_port = HitachiVspDriver.handle_group_with_port( + host_groups) + for port in group_with_port: + kwargs = { + 'method': 'group', + 'port': port, + 'result': host_grp_relation_list, + 'group_list': host_group_list + } + self.handle_san_info(**kwargs) + result = { + 'storage_host_groups': host_group_list, + 'storage_host_grp_host_rels': host_grp_relation_list + } + return result + except Exception: + LOG.error("Failed to get host_groups from vsp") + raise + + def handle_lun_path(self, **kwargs): + view_list = [] + views = self.rest_handler.get_lun_path( + kwargs.get('port'), kwargs.get('group')) + if not views: + return None + view_entries = views.get('data') + if not view_entries: + return None + for view in view_entries: + group_id = '%s_%s' % (view.get('portId'), + view.get('hostGroupNumber')) + view_result = { + "name": view.get('lunId'), + "native_storage_host_group_id": group_id, + "storage_id": self.storage_id, + "native_volume_id": HitachiVspDriver.to_vsp_lun_id_format( + view.get('ldevId')), + "native_masking_view_id": view.get('lunId').replace(",", "_"), + } + kwargs.get('result').append(view_result) + return view_list + + def list_masking_views(self, context): + try: + view_list = [] + host_groups = self.rest_handler.get_all_host_groups() + if not host_groups: + return view_list + group_data = host_groups.get('data') + for group in group_data: + kwargs = { + 'group': group.get('hostGroupNumber'), + 'port': group.get('portId'), + 'result': view_list + } + self.handle_lun_path(**kwargs) + return view_list + except Exception as e: + LOG.error("Failed to get views from vsp") + raise e + + def handle_san_info(self, **kwargs): + groups = self.rest_handler.get_specific_host_group( + kwargs.get('port')) + group_data = groups.get('data') + for specific_group in group_data: + iscsis = None + wwns = None + if specific_group.get('iscsiName'): + iscsis = self.rest_handler.get_iscsi_name( + specific_group.get('portId'), + specific_group.get('hostGroupNumber')) + else: + wwns = self.rest_handler.get_host_wwn( + specific_group.get('portId'), + specific_group.get('hostGroupNumber')) + if kwargs.get('method') == 'host': + os_type = HitachiVspDriver.OS_TYPE_MAP.get( + specific_group.get('hostMode'), + constants.HostOSTypes.UNKNOWN) + if specific_group.get('iscsiName'): + HitachiVspDriver.get_host_info( + iscsis, self.storage_id, kwargs.get('result'), + 'iscsi', os_type) + else: + HitachiVspDriver.get_host_info( + wwns, self.storage_id, + kwargs.get('result'), 'fc', os_type) + elif kwargs.get('method') == 'group': + host_ids = [] + group_id = specific_group.get('hostGroupId').replace(",", "_") + if specific_group.get('iscsiName'): + HitachiVspDriver.get_host_ids( + iscsis, 'hostIscsiId', host_ids, + kwargs.get('result'), self.storage_id, + group_id) + else: + HitachiVspDriver.get_host_ids( + wwns, 'hostWwnId', host_ids, + kwargs.get('result'), self.storage_id, + group_id) + group_result = { + "name": specific_group.get('hostGroupName'), + "storage_id": self.storage_id, + "native_storage_host_group_id": group_id, + "storage_hosts": ','.join(host_ids) + } + kwargs.get('group_list').append(group_result) + else: + if specific_group.get('iscsiName'): + HitachiVspDriver.get_initiator_from_host( + iscsis, self.storage_id, kwargs.get('result'), 'iscsi') + else: + HitachiVspDriver.get_initiator_from_host( + wwns, self.storage_id, kwargs.get('result'), 'fc') diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py index e235501ac..a00e7ecb2 100644 --- a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -454,6 +454,123 @@ def __init__(self): 'ipv4_mask': '255.255.0.0', 'ipv6': None }] +GET_ALL_GROUPS = { + "data": [ + { + "hostGroupId": "CL1-A,0", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostGroupName": "1A-G00", + "hostMode": "LINUX/IRIX" + } + ] +} +GET_SINGLE_WWN_GROUP = { + "data": [ + { + "hostGroupId": "CL1-A,0", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostGroupName": "1A-G00", + "hostMode": "LINUX/IRIX" + } + ] +} +GET_SINGLE_ISCSI_GROUP = { + "data": [ + { + "hostGroupId": "CL1-A,0", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostGroupName": "1A-G00", + "hostMode": "LINUX/IRIX", + "iscsiName": "iqn.ewdhehdhdhh" + } + ] +} +GET_HOST_WWN = { + "data": [ + { + "hostWwnId": "CL1-A,0,21000024ff8f5296", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostGroupName": "1A-G00", + "hostWwn": "21000024ff8f5296", + "wwnNickname": "-" + } + ] +} +GET_HOST_ISCSI = { + "data": [ + { + "hostIscsiId": "CL1-A,0,iqn.ewdhehdhdhh", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostGroupName": "3C-G00", + "iscsiName": "iqn.ewdhehdhdhh", + "iscsiNickname": "test_tjy" + } + ] +} +GET_LUN_PATH = { + "data": [ + { + "lunId": "CL1-A,1,1", + "portId": "CL1-A", + "hostGroupNumber": 0, + "hostMode": "LINUX/IRIX", + "lun": 1, + "ldevId": 1 + } + ] +} +initator_result = [ + { + 'name': '21000024ff8f5296', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '21000024ff8f5296', + 'wwn': '21000024ff8f5296', + 'status': 'online', + 'type': 'fc', + 'alias': 'CL1-A', + 'native_storage_host_id': 'CL1-A_0_21000024ff8f5296' + } +] +host_result = [ + { + 'name': 'test_tjy', + 'storage_id': '12345', + 'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh', + 'os_type': 'Linux', + 'status': 'normal' + } +] +view_result = [ + { + 'name': 'CL1-A,1,1', + 'native_storage_host_group_id': 'CL1-A_0', + 'storage_id': '12345', + 'native_volume_id': '00:00:01', + 'native_masking_view_id': 'CL1-A_1_1' + } +] +groups_result = { + 'storage_host_groups': [ + { + 'name': '1A-G00', + 'storage_id': '12345', + 'native_storage_host_group_id': 'CL1-A_0', + 'storage_hosts': 'CL1-A_0_iqn.ewdhehdhdhh' + } + ], + 'storage_host_grp_host_rels': [ + { + 'storage_id': '12345', + 'native_storage_host_group_id': 'CL1-A_0', + 'native_storage_host_id': 'CL1-A_0_iqn.ewdhehdhdhh' + } + ] +} def create_driver(): @@ -551,3 +668,47 @@ def test_list_ports(self, mock_detail, mock_all): mock_detail.return_value = GET_DETAIL_PORT port = HitachiVspDriver(**ACCESS_INFO).list_ports(context) self.assertEqual(port, port_result) + + @mock.patch.object(RestHandler, 'get_specific_host_group') + @mock.patch.object(RestHandler, 'get_all_host_groups') + @mock.patch.object(RestHandler, 'get_host_wwn') + def test_host_initiators(self, mock_wwn, mock_groups, mock_group): + RestHandler.login = mock.Mock(return_value=None) + mock_groups.return_value = GET_ALL_GROUPS + mock_group.return_value = GET_SINGLE_WWN_GROUP + mock_wwn.return_value = GET_HOST_WWN + initiators = HitachiVspDriver( + **ACCESS_INFO).list_storage_host_initiators(context) + self.assertEqual(initiators, initator_result) + + @mock.patch.object(RestHandler, 'get_specific_host_group') + @mock.patch.object(RestHandler, 'get_all_host_groups') + @mock.patch.object(RestHandler, 'get_iscsi_name') + def test_hosts(self, mock_iscsi, mock_groups, mock_group): + RestHandler.login = mock.Mock(return_value=None) + mock_groups.return_value = GET_ALL_GROUPS + mock_group.return_value = GET_SINGLE_ISCSI_GROUP + mock_iscsi.return_value = GET_HOST_ISCSI + hosts = HitachiVspDriver(**ACCESS_INFO).list_storage_hosts(context) + self.assertEqual(hosts, host_result) + + @mock.patch.object(RestHandler, 'get_all_host_groups') + @mock.patch.object(RestHandler, 'get_lun_path') + def test_masking_views(self, mock_view, mock_groups): + RestHandler.login = mock.Mock(return_value=None) + mock_groups.return_value = GET_ALL_GROUPS + mock_view.return_value = GET_LUN_PATH + views = HitachiVspDriver(**ACCESS_INFO).list_masking_views(context) + self.assertEqual(views, view_result) + + @mock.patch.object(RestHandler, 'get_specific_host_group') + @mock.patch.object(RestHandler, 'get_all_host_groups') + @mock.patch.object(RestHandler, 'get_iscsi_name') + def test_host_groups(self, mock_iscsi, mock_groups, mock_group): + RestHandler.login = mock.Mock(return_value=None) + mock_groups.return_value = GET_ALL_GROUPS + mock_group.return_value = GET_SINGLE_ISCSI_GROUP + mock_iscsi.return_value = GET_HOST_ISCSI + groups = \ + HitachiVspDriver(**ACCESS_INFO).list_storage_host_groups(context) + self.assertEqual(groups, groups_result) From 09f3b1fd4687c95a65cb40386e2d8c4ad661f1e9 Mon Sep 17 00:00:00 2001 From: muniraj321 <76615492+muniraj321@users.noreply.github.com> Date: Thu, 28 Apr 2022 21:49:39 +0530 Subject: [PATCH 23/24] update in this swagger.yaml update name and description in this file should be disk-> get/v1/disks --- openapi-spec/swagger.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openapi-spec/swagger.yaml b/openapi-spec/swagger.yaml index 538ac832e..879f493f5 100644 --- a/openapi-spec/swagger.yaml +++ b/openapi-spec/swagger.yaml @@ -1041,16 +1041,16 @@ paths: - abnormal responses: '200': - description: List port query was success + description: List disk query was success content: application/json: schema: type: object required: - - ports + - disks additionalProperties: true properties: - ports: + disks: type: array title: the disk schema items: From e53d3a164e008261ffc2d9c05dd340d1f886a10b Mon Sep 17 00:00:00 2001 From: yuanyu-ghca <79956159+yuanyu-ghca@users.noreply.github.com> Date: Fri, 6 May 2022 15:31:40 +0800 Subject: [PATCH 24/24] Emc vnx block add host mapping view (#807) --- .../vnx/vnx_block/component_handler.py | 122 ++++++++++++++++++ .../drivers/dell_emc/vnx/vnx_block/consts.py | 11 ++ .../dell_emc/vnx/vnx_block/navi_handler.py | 94 ++++++++++++++ .../dell_emc/vnx/vnx_block/vnx_block.py | 9 ++ .../dell_emc/vnx/vnx_block/test_vnx_block.py | 88 +++++++++++++ 5 files changed, 324 insertions(+) diff --git a/delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py b/delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py index 57310ec4c..914885c22 100644 --- a/delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py +++ b/delfin/drivers/dell_emc/vnx/vnx_block/component_handler.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import copy import re import six @@ -443,3 +444,124 @@ def get_iscsi_ports(self): name = '%s-%s' % (iscsi_port.get('sp'), iscsi_port.get('port_id')) iscsi_port_map[name] = iscsi_port return iscsi_port_map + + def list_masking_views(self, storage_id): + views = self.navi_handler.list_masking_views() + views_list = [] + host_vv_set = set() + if views: + for view in views: + name = view.get('storage_group_name') + host_names = view.get('host_names') + lun_ids = view.get('lun_ids') + if name: + if name == '~physical' or name == '~management': + continue + view_model_template = { + 'native_masking_view_id': view.get( + 'storage_group_uid'), + "name": view.get('storage_group_name'), + "storage_id": storage_id + } + if host_names and lun_ids: + host_names = list(set(host_names)) + for host_name in host_names: + host_id = host_name.replace(' ', '') + for lun_id in lun_ids: + host_vv_key = '%s_%s' % (host_id, lun_id) + if host_vv_key in host_vv_set: + continue + host_vv_set.add(host_vv_key) + view_model = copy.deepcopy(view_model_template) + view_model[ + 'native_storage_host_id'] = host_id + view_model['native_volume_id'] = lun_id + view_model[ + 'native_masking_view_id'] = '%s_%s_%s' % ( + view_model.get('native_masking_view_id'), + host_id, lun_id) + views_list.append(view_model) + return views_list + + def list_storage_host_initiators(self, storage_id): + initiators = self.navi_handler.list_hbas() + initiators_list = [] + initiator_set = set() + port_types = {} + if initiators: + ports = self.list_ports(storage_id) + for port in (ports or []): + if port and port.get('type'): + port_types[port.get('name')] = port.get('type') + for initiator in (initiators or []): + if initiator and initiator.get('hba_uid'): + hba_uid = initiator.get('hba_uid') + type = '' + if port_types: + ports = initiator.get('port_ids') + if ports: + port_id = list(ports)[0] + type = port_types.get(port_id, '') + host_id = initiator.get('server_name', '').replace(' ', '') + if host_id == hba_uid: + host_id = None + if not host_id: + continue + if hba_uid in initiator_set: + continue + initiator_set.add(hba_uid) + + initiator_model = { + "name": hba_uid, + "storage_id": storage_id, + "native_storage_host_initiator_id": hba_uid, + "wwn": hba_uid, + "type": consts.INITIATOR_TYPE_MAP.get( + type.upper(), constants.InitiatorType.UNKNOWN), + "status": constants.InitiatorStatus.ONLINE, + "native_storage_host_id": host_id + } + initiators_list.append(initiator_model) + return initiators_list + + def list_storage_hosts(self, storage_id): + hosts = self.navi_handler.list_hbas() + host_list = [] + host_ids = set() + host_ips = {} + for host in (hosts or []): + if host and host.get('server_name'): + os_type = constants.HostOSTypes.UNKNOWN + os_name = host.get('hba_vendor_description') + ip_addr = host.get('server_ip_address') + if ip_addr == 'UNKNOWN': + continue + if os_name and 'VMware ESXi' in os_name: + os_type = constants.HostOSTypes.VMWARE_ESX + id = host.get('server_name').replace(' ', '') + if id in host_ids: + continue + host_ids.add(id) + + if ip_addr in host_ips.keys(): + first_port_ids = host_ips.get(ip_addr) + cur_port_ids = host.get('port_ids') + add_host = False + intersections = list( + set(first_port_ids).intersection(set(cur_port_ids))) + if not intersections: + add_host = True + if not add_host: + continue + host_ips[ip_addr] = host.get('port_ids') + + host_model = { + "name": host.get('server_name'), + "storage_id": storage_id, + "native_storage_host_id": id, + "os_type": os_type, + "status": constants.HostStatus.NORMAL, + "ip_address": ip_addr + } + host_list.append(host_model) + return host_list diff --git a/delfin/drivers/dell_emc/vnx/vnx_block/consts.py b/delfin/drivers/dell_emc/vnx/vnx_block/consts.py index 9348d8c4c..d012a74bb 100644 --- a/delfin/drivers/dell_emc/vnx/vnx_block/consts.py +++ b/delfin/drivers/dell_emc/vnx/vnx_block/consts.py @@ -64,6 +64,8 @@ GET_LOG_API = 'getlog -date %(begin_time)s %(end_time)s' EMCVNX_VENDOR = 'DELL EMC' RAID_GROUP_ID_PREFIX = 'raid_group_' +GET_SG_LIST_HOST_API = 'storagegroup -messner -list -host' +GET_PORT_LIST_HBA_API = 'port -list -hba' STATUS_MAP = { 'Ready': constants.StoragePoolStatus.NORMAL, 'Offline': constants.StoragePoolStatus.OFFLINE, @@ -143,3 +145,12 @@ 'SAS': constants.PortType.SAS, 'UNKNOWN': constants.PortType.OTHER } +INITIATOR_TYPE_MAP = { + 'FC': constants.InitiatorType.FC, + 'FCOE': constants.InitiatorType.FC, + 'ISCSI': constants.InitiatorType.ISCSI, + 'SAS': constants.InitiatorType.SAS, + 'UNKNOWN': constants.InitiatorType.UNKNOWN +} +ALU_PAIRS_PATTERN = '^[0-9]+\\s+[0-9]+$' +HBA_UID_PATTERN = "^\\s*HBA UID\\s+SP Name\\s+SPPort" diff --git a/delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py b/delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py index 767815976..7483ec955 100644 --- a/delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py +++ b/delfin/drivers/dell_emc/vnx/vnx_block/navi_handler.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re import threading import six @@ -606,3 +607,96 @@ def navi_exe(self, command_str, host_ip=None): raise e finally: self.session_lock.release() + + def list_masking_views(self): + return self.get_resources_info(consts.GET_SG_LIST_HOST_API, + self.cli_sg_to_list) + + def cli_sg_to_list(self, resource_info): + obj_list = [] + obj_model = {} + try: + obj_infos = resource_info.split('\n') + pattern = re.compile(consts.ALU_PAIRS_PATTERN) + for obj_info in obj_infos: + str_line = obj_info.strip() + if str_line: + if ':' not in str_line: + search_obj = pattern.search(str_line) + if search_obj: + str_info = str_line.split() + lun_ids = obj_model.get('lun_ids') + if lun_ids: + lun_ids.add(str_info[1]) + else: + lun_ids = set() + lun_ids.add(str_info[1]) + obj_model['lun_ids'] = lun_ids + else: + str_info = self.split_str_by_colon(str_line) + if 'Host name:' in str_line: + host_names = obj_model.get('host_names') + if host_names: + host_names.add(str_info[1]) + else: + host_names = set() + host_names.add(str_info[1]) + obj_model['host_names'] = host_names + continue + + obj_model = self.str_info_to_model(str_info, obj_model) + + if str_line.startswith('Shareable:'): + obj_list = self.add_model_to_list(obj_model, + obj_list) + obj_model = {} + except Exception as e: + err_msg = "arrange sg info error: %s", six.text_type(e) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + return obj_list + + def list_hbas(self): + return self.get_resources_info(consts.GET_PORT_LIST_HBA_API, + self.cli_hba_to_list) + + def cli_hba_to_list(self, resource_info): + obj_list = [] + obj_model = {} + sp_name = '' + port_ids = set() + try: + obj_infos = resource_info.split('\n') + for obj_info in obj_infos: + str_line = obj_info.strip() + if str_line: + if 'Information about each HBA:' in obj_info: + if obj_model: + obj_model['port_ids'] = port_ids + obj_list = self.add_model_to_list(obj_model, + obj_list) + obj_model = {} + port_ids = set() + sp_name = '' + if ':' in obj_info: + str_info = self.split_str_by_colon(str_line) + obj_model = self.str_info_to_model(str_info, obj_model) + if 'SP Name:' in obj_info: + sp_name = obj_info.replace('SP Name:', '').replace( + 'SP', '').replace('\r', '').replace(' ', '') + if 'SP Port ID:' in obj_info: + port_id = obj_info.replace('SP Port ID:', + '').replace('\r', + '').replace( + ' ', '') + port_id = '%s-%s' % (sp_name, port_id) + port_ids.add(port_id) + + if obj_model: + obj_model['port_ids'] = port_ids + obj_list.append(obj_model) + except Exception as e: + err_msg = "arrange host info error: %s", six.text_type(e) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + return obj_list diff --git a/delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py b/delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py index bd836b262..467094208 100644 --- a/delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py +++ b/delfin/drivers/dell_emc/vnx/vnx_block/vnx_block.py @@ -77,3 +77,12 @@ def clear_alert(self, context, sequence_number): @staticmethod def get_access_url(): return 'https://{ip}' + + def list_storage_host_initiators(self, context): + return self.com_handler.list_storage_host_initiators(self.storage_id) + + def list_storage_hosts(self, context): + return self.com_handler.list_storage_hosts(self.storage_id) + + def list_masking_views(self, context): + return self.com_handler.list_masking_views(self.storage_id) diff --git a/delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/test_vnx_block.py b/delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/test_vnx_block.py index 00309f476..0f137da28 100644 --- a/delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/test_vnx_block.py +++ b/delfin/tests/unit/drivers/dell_emc/vnx/vnx_block/test_vnx_block.py @@ -354,6 +354,46 @@ I/O Module Type : SAS """ +VIEW_DATAS = """ +Storage Group Name: AIX_PowerHA_node2 +Storage Group UID: 0B:33:4A:6E:81:38:EC:11:90:2B:00:60:16:63 +HBA/SP Pairs: + + HBA UID SP Name SPPort + ------- ------- ------ + 20:00:00:00:C9:76:5E:79:10:00:00:00:C9:76:5E:79 SP A 6 +Host name: AIX_21 + 20:00:00:00:C9:75:80:4C:10:00:00:00:C9:75:80:4C SP B 3 +Host name: AIX_21 + +HLU/ALU Pairs: + + HLU Number ALU Number + ---------- ---------- + 1 335 +Shareable: YES +""" +HBA_DATAS = """ +Information about each HBA: + +HBA UID: 20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79 +Server Name: aix_ma +Server IP Address: 8.44.129.26 +HBA Model Description: +HBA Vendor Description: +HBA Device Driver Name: N/A +Information about each port of this HBA: + + SP Name: SP A + SP Port ID: 6 + HBA Devicename: N/A + Trusted: NO + Logged In: NO + Defined: YES + Initiator Type: 3 + StorageGroup Name: None +""" + AGENT_RESULT = { 'agent_rev': '7.33.1 (0.38)', 'name': 'K10', @@ -520,6 +560,35 @@ 'ipv6': None, 'ipv6_mask': None }] +VIEW_RESULT = [ + { + 'native_masking_view_id': '0B:33:4A:6E:81:38:EC:11:90:2B:00:' + '60:16:63_AIX_21_335', + 'name': 'AIX_PowerHA_node2', + 'storage_id': '12345', + 'native_storage_host_id': 'AIX_21', + 'native_volume_id': '335' + }] +INITIATOR_RESULT = [ + { + 'name': '20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79', + 'storage_id': '12345', + 'native_storage_host_initiator_id': '20:00:00:00:C9:9B:57:79:10:' + '00:00:00:C9:9B:57:79', + 'wwn': '20:00:00:00:C9:9B:57:79:10:00:00:00:C9:9B:57:79', + 'type': 'fc', + 'status': 'online', + 'native_storage_host_id': 'aix_ma' + }] +HOST_RESULT = [ + { + 'name': 'aix_ma', + 'storage_id': '12345', + 'native_storage_host_id': 'aix_ma', + 'os_type': 'Unknown', + 'status': 'normal', + 'ip_address': '8.44.129.26' + }] def create_driver(): @@ -696,3 +765,22 @@ def test_get_ports(self): BUS_PORT_DATAS, BUS_PORT_STATE_DATAS]) ports = self.driver.list_ports(context) self.assertDictEqual(ports[0], PORT_RESULT[0]) + + def test_get_masking_views(self): + NaviClient.exec = mock.Mock(side_effect=[VIEW_DATAS]) + views = self.driver.list_masking_views(context) + self.assertDictEqual(views[0], VIEW_RESULT[0]) + + def test_get_initiators(self): + NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS, + IO_PORT_CONFIG_DATAS, + ISCSI_PORT_DATAS, PORT_DATAS, + BUS_PORT_DATAS, + BUS_PORT_STATE_DATAS]) + initiators = self.driver.list_storage_host_initiators(context) + self.assertDictEqual(initiators[0], INITIATOR_RESULT[0]) + + def test_get_hosts(self): + NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS]) + hosts = self.driver.list_storage_hosts(context) + self.assertDictEqual(hosts[0], HOST_RESULT[0])