diff --git a/.github/workflows/delfin_ci.yml b/.github/workflows/delfin_ci.yml index ac59b04bd..0edf3da7e 100644 --- a/.github/workflows/delfin_ci.yml +++ b/.github/workflows/delfin_ci.yml @@ -7,8 +7,7 @@ jobs: strategy: max-parallel: 6 matrix: - platform: - - ubuntu-18.04 + platform: [ubuntu-18.04, ubuntu-20.04] python-version: [ 3.6, 3.7, 3.8 ] steps: diff --git a/.github/workflows/delfin_e2e_test.yml b/.github/workflows/delfin_e2e_test.yml index 1ae643308..b87c2b2e7 100644 --- a/.github/workflows/delfin_e2e_test.yml +++ b/.github/workflows/delfin_e2e_test.yml @@ -7,8 +7,7 @@ jobs: strategy: max-parallel: 6 matrix: - platform: - - ubuntu-18.04 + platform: [ubuntu-18.04, ubuntu-20.04] python-version: [ 3.6 ] steps: diff --git a/delfin/common/constants.py b/delfin/common/constants.py index f6a352445..a53b399ad 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -492,6 +492,9 @@ class StorageMetric: RESPONSE_TIME = Metrics.RESPONSE_TIME READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME + CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO + READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO + WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class StoragePoolMetric: @@ -503,6 +506,11 @@ class StoragePoolMetric: READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME + READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME + WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME + CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO + READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO + WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class VolumeMetric: @@ -545,6 +553,11 @@ class PortMetric: READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME + READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME + WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME + CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO + READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO + WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class DiskMetric: @@ -556,6 +569,11 @@ class DiskMetric: READ_THROUGHPUT = Metrics.READ_THROUGHPUT WRITE_THROUGHPUT = Metrics.WRITE_THROUGHPUT RESPONSE_TIME = Metrics.RESPONSE_TIME + READ_RESPONSE_TIME = Metrics.READ_RESPONSE_TIME + WRITE_RESPONSE_TIME = Metrics.WRITE_RESPONSE_TIME + CACHE_HIT_RATIO = Metrics.CACHE_HIT_RATIO + READ_CACHE_HIT_RATIO = Metrics.READ_CACHE_HIT_RATIO + WRITE_CACHE_HIT_RATIO = Metrics.WRITE_CACHE_HIT_RATIO class FileSystemMetric: diff --git a/delfin/drivers/macro_san/__init__.py b/delfin/drivers/macro_san/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/macro_san/ms/__init__.py b/delfin/drivers/macro_san/ms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/macro_san/ms/consts.py b/delfin/drivers/macro_san/ms/consts.py new file mode 100644 index 000000000..fbed42965 --- /dev/null +++ b/delfin/drivers/macro_san/ms/consts.py @@ -0,0 +1,764 @@ +# Copyright 2022 The SODA Authors. +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from delfin.common import constants + +# Command +ODSP_SH = '/odsp/scripts/odsp_sh.sh' +SYSTEM_QUERY = 'system mgt query' +SYSTEM_VERSION = 'system mgt getversion' +SYSTEM_CPU = 'system mgt getcpuinfo' +POOL_LIST = 'pool mgt getlist' +RAID_LIST = 'raid mgt getlist -p {}' +LUN_LIST = 'lun mgt getlist -p {}' +LUN_QUERY = 'lun mgt query -n {}' +DSU_LIST = 'dsu mgt getlist' +DISK_LIST = 'disk mgt getlist -d {}' +DISK_QUERY = 'disk mgt query -d {}' +HA_STATUS = 'ha mgt getstatus' +CLIENT_INITIATOR_GETLIST = 'client initiator getlist -t all' +CLIENT_LIST = 'client mgt getclientlist' +CLIENT_HOST = 'client host gethostlist' +HOST_GROUP = 'client hostgroup gethglist' +HOST_GROUP_N = 'client hostgroup gethostlist -n {}' +VOLUME_GROUP = 'client lungroup getlglist' +VOLUME_GROUP_N = 'client lungroup getlunlist -n {}' +SHARE_LUN_LIST = 'client mgt getsharelunlist -n {}' +MAPVIEW = 'client mapview getlist' +TARGET_QUERY_PORT_LIST = 'client target queryportlist' +SAS_PORT_LIST = 'system sas getportlist -c {}:{}' + +# character +SUCCESSFUL_TAG = 'Command completed successfully.' +FAILED_TAG = 'Command failed.' +UNKNOWN_COMMAND_TAG = 'Unknown command.' +PORT_SUCCESSFUL_TAG = 'Commandcompletedsuccessfully.' +COLON = ':' +LEFT_HALF_BRACKET = '[' +AFTER_HALF_BRACKET = 'Version]' +CPU_INFORMATION_BRACKET = 'CPU Information]' +SP = 'SP' +ODSP_MSC_VERSION_KEY = 'ODSP_MSCVersion' +ODSP_DRIVER_VERSION_KEY = 'ODSP_DriverVersion' +PROCESSOR_VENDOR_KEY = 'Processor0Vendor_id' +PROCESSOR_FREQUENCY_KEY = 'Processor0CPUFrequency' +STORAGE_VENDOR = 'MacroSAN' +FIELDS_NAME = 'Name:' +FIELDS_ENABLE = 'enable' +FIELDS_INITIATOR_ALIAS = 'InitiatorAlias:' +FIELDS_INITIATOR_HOST = 'N/A' +FIELDS_HOST_NAME = 'Host Name:' +FIELDS_HOST_NAME_TWO = 'HostName:' +FIELDS_HOST_GROUP_NAME = 'Host Group Name:' +FIELDS_VOLUME_GROUP_NAME = 'LUN Group Name:' +FIELDS_LUN_NAME = 'LUNName:' +FIELDS_MAPVIEW_NAME = 'Mapview Name:' +FIELDS_LINK_STATUS = 'Link Status' +DSU = 'DSU-' +DISK = 'Disk-' +HA_RUNNING_STATUS = 'HARunningStatus' +PORT = 'port' +GBPS = 'Gbps' +MBPS = 'Mbps' +KBPS = 'KBPS' +TIME_PATTERN = '%Y-%m-%d %H:%M:%S' + +# regular expression +SYSTEM_CPU_SP_REGULAR = '^\\[SP\\d.* CPU.*]' +SYSTEM_VERSION_SP_REGULAR = '\\[SP\\d.* Version\\]' +TARGET_PORT_REGULAR = 'port\\-\\d\\:\\d\\:\\d$' + +# The time limit +TIME_LIMIT = 8 + +# model +MODEL_PATH = '{}/delfin/drivers/macro_san/ms/file/{}{}' +STORAGE_INFO_REGULAR = '^storage_info.*\\.xls$' +STORAGE_INFO_MODEL_REGULAR = '^MS' +FTP_PATH_TMP = '/tmp' +FTP_PATH_FILE = '/tmp/{}' + +# alert +MACRO_SAN_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' +OS_PATH = '{}/delfin/drivers/macro_san/ms/file/alert{}' +ALERT_FILE_NAME = 'alarm_history_query.csv.sp' +FTP_ALERT_PATH = '/odsp/log/remote' +YES_FIELDS = '是' +SEVERITY_MAP = { + 'fatal': constants.Severity.FATAL, + '紧急': constants.Severity.FATAL, + 'critical': constants.Severity.CRITICAL, + '重要': constants.Severity.MAJOR, + 'major': constants.Severity.MAJOR, + 'minor': constants.Severity.MINOR, + 'warning': constants.Severity.WARNING, + '警告': constants.Severity.WARNING, + 'informational': constants.Severity.INFORMATIONAL, + 'NotSpecified': constants.Severity.NOT_SPECIFIED +} + + +class digital_constant(object): + ZERO_INT = 0 + ONE_INT = 1 + MINUS_ONE_INT = -1 + TWO_INT = 2 + THREE_INT = 3 + FOUR_INT = 4 + FIVE_INT = 5 + SIX_INT = 6 + SEVEN_INT = 7 + TWELVE_INT = 12 + SIXTEEN_INT = 13 + THIRTY_SIX = 36 + SIXTY = 60 + + +STORAGE_STATUS_MAP = { + 'normal': constants.StorageStatus.NORMAL, + 'offline': constants.StorageStatus.OFFLINE, + 'abnormal': constants.StorageStatus.ABNORMAL, + 'takeover': constants.StorageStatus.NORMAL, + 'degraded': constants.StorageStatus.DEGRADED, + 'unknown': constants.StorageStatus.UNKNOWN, +} + +LIST_VOLUMES_STATUS_MAP = { + 'normal': constants.StorageStatus.NORMAL, + 'offline': constants.StorageStatus.OFFLINE, + 'abnormal': constants.StorageStatus.ABNORMAL, + 'error': constants.StorageStatus.ABNORMAL, + 'fault': constants.StorageStatus.ABNORMAL, + 'faulty': constants.StorageStatus.ABNORMAL, + 'degraded': constants.StorageStatus.DEGRADED, + 'unknown': constants.StorageStatus.UNKNOWN +} +VOLUME_TYPE_MAP = { + 'disable': constants.VolumeType.THICK, + 'enable': constants.VolumeType.THIN +} + + +class POOL_STATUS_ABNORMAL(object): + FAULTY = 'faulty' + FAULT = 'fault' + ERROR = 'error' + ABNORMAL = 'abnormal' + ALL = (FAULTY, FAULT, ERROR, ABNORMAL) + + +class POOL_STATUS_NORMAL(object): + OFFLINE = 'offline' + NORMAL = 'normal' + ALL = (OFFLINE, NORMAL) + + +POOLS_STATUS_MAP = { + 'normal': constants.StoragePoolStatus.NORMAL, + 'offline': constants.StoragePoolStatus.OFFLINE, + 'abnormal': constants.StoragePoolStatus.ABNORMAL, + 'error': constants.StoragePoolStatus.ABNORMAL, + 'fault': constants.StoragePoolStatus.ABNORMAL, + 'faulty': constants.StoragePoolStatus.ABNORMAL, + 'unknown': constants.StoragePoolStatus.UNKNOWN, + 'degraded': constants.StoragePoolStatus.DEGRADED +} + +DISK_PHYSICAL_TYPE_MAP = { + 'ssd': constants.DiskPhysicalType.SSD, + 'sata': constants.DiskPhysicalType.SATA, + 'sas': constants.DiskPhysicalType.SAS, + 'nl-ssd': constants.DiskPhysicalType.NL_SSD, + 'fc': constants.DiskPhysicalType.FC, + 'lun': constants.DiskPhysicalType.LUN, + 'ata': constants.DiskPhysicalType.ATA, + 'flash': constants.DiskPhysicalType.FLASH, + 'vmdisk': constants.DiskPhysicalType.VMDISK, + 'nl-sas': constants.DiskPhysicalType.NL_SAS, + 'ssd-card': constants.DiskPhysicalType.SSD_CARD, + 'sas-flash-vp': constants.DiskPhysicalType.SAS_FLASH_VP, + 'hdd': constants.DiskPhysicalType.HDD, + 'unknown': constants.DiskPhysicalType.UNKNOWN +} + +DISK_LOGICAL_TYPE_MAP = { + 'free': constants.DiskLogicalType.FREE, + 'member': constants.DiskLogicalType.MEMBER, + 'hotspare': constants.DiskLogicalType.HOTSPARE, + 'cache': constants.DiskLogicalType.CACHE, + 'aggregate': constants.DiskLogicalType.AGGREGATE, + 'broken': constants.DiskLogicalType.BROKEN, + 'foreign': constants.DiskLogicalType.FOREIGN, + 'labelmaint': constants.DiskLogicalType.LABELMAINT, + 'maintenance': constants.DiskLogicalType.MAINTENANCE, + 'shared': constants.DiskLogicalType.SHARED, + 'spare': constants.DiskLogicalType.SPARE, + 'unassigned': constants.DiskLogicalType.UNASSIGNED, + 'unsupported': constants.DiskLogicalType.UNSUPPORTED, + 'remote': constants.DiskLogicalType.REMOTE, + 'mediator': constants.DiskLogicalType.MEDIATOR, + 'data': constants.DiskLogicalType.DATA, + 'datadisk': constants.DiskLogicalType.DATA, + 'unknown': constants.DiskLogicalType.UNKNOWN +} + +DISK_STATUS_MAP = { + 'normal': constants.DiskStatus.NORMAL, + 'abnormal': constants.DiskStatus.ABNORMAL, + 'fault': constants.DiskStatus.ABNORMAL, + 'faulty': constants.DiskStatus.ABNORMAL, + 'degraded': constants.DiskStatus.DEGRADED, + 'offline': constants.DiskStatus.OFFLINE +} + +CONTROLLERS_STATUS_MAP = { + 'normal': constants.ControllerStatus.NORMAL, + 'dual--single': constants.ControllerStatus.NORMAL, + 'single-single': constants.ControllerStatus.NORMAL, + 'single': constants.ControllerStatus.NORMAL, + 'offline': constants.ControllerStatus.OFFLINE, + 'absent--poweroff': constants.ControllerStatus.OFFLINE, + 'poweroff': constants.ControllerStatus.OFFLINE, + 'fault': constants.ControllerStatus.FAULT, + 'error': constants.ControllerStatus.FAULT, + 'abnormal': constants.ControllerStatus.FAULT, + 'degraded': constants.ControllerStatus.DEGRADED, + 'double-idle': constants.ControllerStatus.NORMAL, + 'double': constants.ControllerStatus.NORMAL, + 'triple': constants.ControllerStatus.NORMAL, + 'quadruple': constants.ControllerStatus.NORMAL, + 'unknown': constants.ControllerStatus.UNKNOWN +} + +PORT_CONNECTION_STATUS_MAP = { + '1': constants.PortConnectionStatus.CONNECTED, + '2': constants.PortConnectionStatus.DISCONNECTED, + 'Full-Linkup': constants.PortConnectionStatus.CONNECTED, + 'Linkdown': constants.PortConnectionStatus.DISCONNECTED +} + +INITIATOR_TYPE_MAP = { + 'fc': constants.InitiatorType.FC, + 'iscsi': constants.InitiatorType.ISCSI, + 'roce': constants.InitiatorType.NVME_OVER_ROCE, + 'sas': constants.InitiatorType.SAS, + 'nvme-of': constants.InitiatorType.NVME_OVER_FABRIC, + 'unknown': constants.InitiatorType.UNKNOWN +} + +INITIATOR_STATUS_MAP = { + 'offline': constants.InitiatorStatus.OFFLINE, + 'online': constants.InitiatorStatus.ONLINE, + 'normal': constants.InitiatorStatus.ONLINE, + 'n/a': constants.InitiatorStatus.UNKNOWN +} + +HOST_OS_TYPES_MAP = { + 'linux': constants.HostOSTypes.LINUX, + 'windows': constants.HostOSTypes.WINDOWS, + 'windows2008': constants.HostOSTypes.WINDOWS, + 'solaris': constants.HostOSTypes.SOLARIS, + 'hp-ux': constants.HostOSTypes.HP_UX, + 'hp_unix': constants.HostOSTypes.HP_UX, + 'aix': constants.HostOSTypes.AIX, + 'xenserver': constants.HostOSTypes.XEN_SERVER, + 'vmware esx': constants.HostOSTypes.VMWARE_ESX, + 'esxi': constants.HostOSTypes.VMWARE_ESX, + 'linux_vis': constants.HostOSTypes.LINUX_VIS, + 'windows server 2012': constants.HostOSTypes.WINDOWS_SERVER_2012, + 'windows2012': constants.HostOSTypes.WINDOWS_SERVER_2012, + 'oracle vm': constants.HostOSTypes.ORACLE_VM, + 'open vms': constants.HostOSTypes.OPEN_VMS, + 'mac os': constants.HostOSTypes.MAC_OS, + 'svc': constants.HostOSTypes.UNKNOWN, + 'other': constants.HostOSTypes.UNKNOWN, + 'suse': constants.HostOSTypes.UNKNOWN, + 'unknown': constants.HostOSTypes.UNKNOWN +} + +PARSE_ALERT_ALERT_ID = '1.3.6.1.2.1.1.3.0' +PARSE_ALERT_TIME = '1.3.6.1.2.1.25.1.2' +PARSE_ALERT_STORAGE = '1.3.6.1.4.1.35904.1.2.1.1' +PARSE_ALERT_NAME = '1.3.6.1.4.1.35904.1.2.1.4.1' +PARSE_ALERT_LOCATION = '1.3.6.1.4.1.35904.1.2.1.4.2' +PARSE_ALERT_DESCRIPTION = '1.3.6.1.4.1.35904.1.2.1.4.3' +PARSE_ALERT_SEVERITY = '1.3.6.1.4.1.35904.1.2.1.4.4' + +ALERT_NAME_CONFIG = { + 'power_supply_failed': '设备供电异常', + 'power_supply_failed_reissue': '设备供电异常重发', + 'power_supply_normal': '设备供电恢复正常', + 'power_supply_abnormal': '设备供电异常', + 'power_supply_abnormal_reissue': '设备供电异常重发', + 'power_supply_absent': '电源模块不在位', + 'power_supply_absent_reissue': '电源模块不在位重发', + 'fan_normal': '风扇模块恢复正常', + 'fan_failed': '风扇模块故障', + 'fan_failed_reissue': '风扇模块故障重发', + 'fan_absent': '风扇模块不在位', + 'fan_absent_reissue': '风扇模块不在位重发', + 'battery_normal': '电池模块恢复正常', + 'battery_failed': '电池模块故障', + 'battery_failed_reissue': '电池模块故障重发', + 'battery_absent': '电池模块不在位', + 'battery_absent_reissue': '电池模块不在位重发', + 'battery_charging': '电池模块正在充电', + 'battery_will_expire': '电池模块即将超期', + 'battery_expired': '电池模块超期', + 'battery_expired_reissue': '电池模块超期重发', + 'battery_model_inconsistent': '电池模块型号不一致', + 'temperature_normal': '温度恢复正常', + 'temperature_warning': '温度一般告警', + 'temperature_warning_reissue': '温度一般告警重发', + 'temperature_critical': '温度严重告警', + 'temperature_critical_reissue': '温度严重告警重发', + 'Voltage_normal': '电压恢复正常', + 'Voltage_warning': '电压一般告警', + 'Voltage_warning_reissue': '电压一般告警重发', + 'Voltage_critical': '电压严重告警', + 'Voltage_critical_reissue': '电压严重告警重发', + 'sp_power_on': 'SP开机', + 'sp_power_off': 'SP关机', + 'sp_absent': 'SP不在位', + 'sp_memory_shrink': 'SP内存变小', + 'sp_reboot_for_memory_insufficient': 'SP内存不足自动重启', + 'sp_hardware_abnormally': 'SP硬件异常', + 'sp_boot_disk_warning': 'SP系统盘告警', + 'ha_auto_recover_disabled': 'HA自动恢复选项被禁用', + 'ha_heartbeat_lost': 'HA心跳丢失', + 'ha_self_detect_failure': 'HA自检发现故障', + 'ha_takeover': 'SP被接管', + 'ha_takeover_abnormally': 'SP接管异常', + 'ha_recover_successfully': 'SP恢复成功', + 'ha_recover_abnormally': 'SP恢复异常', + 'ha_peer_sp_abnormally': '对端SP异常', + 'cpu_utilization_normal': 'CPU利用率恢复正常', + 'cpu_utilization_warning': 'CPU利用率一般告警', + 'cpu_utilization_serious': 'CPU利用率重要告警', + 'cpu_utilization_critical': 'CPU利用率严重告警', + 'memory_utilization_normal': '内存利用率恢复正常', + 'memory_utilization_warning': '内存利用率告警', + 'sp_average_responsetime_normal': 'SP平均延时恢复正常', + 'sp_average_responsetime_warning': 'SP平均延时告警', + 'host_average_responsetime_normal': '主机平均延时恢复正常', + 'host_average_responsetime_warning': '主机平均延时告警', + 'iscsi_port_average_responsetime_normal': 'iSCSI端口平均延时恢复正常', + 'iscsi_port_average_responsetime_warning': 'iSCSI端口平均延时告警', + 'fc_port_average_responsetime_normal': 'FC端口平均延时恢复正常', + 'fc_port_average_responsetime_warning': 'FC端口平均延时告警', + 'nvmf_port_average_responsetime_normal': 'NVMf端口平均延时恢复正常', + 'nvmf_port_average_responsetime_warning': 'NVMf端口平均延时告警', + 'lun_average_responsetime_normal': 'LUN平均延时恢复正常', + 'lun_average_responsetime_warning': 'LUN平均延时告警', + 'device_busy': '设备管理通道忙', + 'sys_lun_cache_capacity_insufficient': 'SYS-LUN-Cache空间不足', + 'sys_lun_log_capacity_insufficient': 'SYS-LUN-Log空间不足', + 'global_write_cache_disabled_manually': '全局写缓存被手动禁用', + 'global_write_cache_disabled_automatically': '全局写缓存被自动禁用', + 'cache_vault_has_data': 'Cache Vault中有脏数据', + 'software_version_inconsistent': '软件版本不一致', + 'license_expired': 'License超期', + 'system_failure_reboot': '系统异常重启', + 'io_card_safe_remove': 'IO卡安全下电', + 'io_card_pullout_forcibly': 'IO卡暴力拔出', + 'io_card_abnormal': 'IO卡异常', + 'port_linkup': '端口已连接', + 'port_linkdown': '端口断开连接', + 'port_link_recovery': '端口链路恢复', + 'port_link_unstable': '端口链路不稳定', + 'port_abnormal': '端口异常', + 'port_closed': '端口被关闭', + 'port_speed_nonoptimal': '端口非最大速率运行', + 'port_optical_transceiver_mismatch': '端口光模块不匹配', + 'sas_phy_disabled': 'SAS PHY被禁用', + 'sas_phy_inconsistent': 'SAS_PHY速率不一致', + 'sas_port_inconsistent': 'SAS端口连接状态不一致', + 'i_t_connection_recovery': 'I_T连接恢复', + 'i_t_connection_unstable': 'I_T连接不稳定', + 'i_t_connected': 'I_T建立连接', + 'i_t_unconnected': 'I_T未建立连接', + 'i_t_l_insufficient': 'I_T_L低于预期', + 'initiator_has_unestablished_connection': 'Initiator存在未建立的连接', + 'nvmf_subsystem_full_connected': 'NVMf Subsystem完全连接', + 'nvmf_subsystem_partial_connected': 'NVMf Subsystem部分连接', + 'nvmf_subsystem_unconnected': 'NVMf Subsystem未连接', + 'ep_online': 'EP上线', + 'ep_offline': 'EP离线', + 'ep_install_unproperly': 'EP未安装到位', + 'ep_disordered_link': 'EP拓扑乱序', + 'dsu_inconsistent_link': 'DSU拓扑不一致', + 'disk_online': '磁盘上线', + 'disk_offline': '磁盘异常离线', + 'disk_safe_remove': '磁盘安全下电', + 'disk_pullout_forcibly': '磁盘暴力拔出', + 'disk_warning': '磁盘告警', + 'disk_failed': '磁盘故障', + 'disk_path_missing': '磁盘路径丢失', + 'disk_poweron_time_warning': '磁盘上电时间告警', + 'disk_poweron_time_warning_reissue': '磁盘上电时间告警重发', + 'ssd_life_remaining_warning': 'SSD预计剩余寿命预警', + 'ssd_life_remaining_critical': 'SSD预计剩余寿命严重告警', + 'ssd_time_remaining_warning': 'SSD预计可用时间预警', + 'ssd_time_remaining_critical': 'SSD预计可用时间严重告警', + 'ssd_interface_unknown': 'SSD接口类型未知', + 'raid_normal': 'RAID恢复正常', + 'raid_degraded': 'RAID降级', + 'raid_faulty': 'RAID错误', + 'raid_failed': 'RAID故障', + 'raid_rebuild_start': 'RAID开始重建', + 'raid_rebuild_successfully': 'RAID完成重建', + 'raid_cannot_rebuild': 'RAID重建等待热备盘', + 'raid_rebuild_paused_abnormally': 'RAID重建失败', + 'raid_spare_capacity_warning': 'RAID热备空间告警', + 'raid_sync_successfully': 'RAID完成同步', + 'raid_sync_failed': 'RAID同步失败', + 'raid_disk_type_inconsistent': 'RAID成员磁盘类型不一致', + 'lun_normal': 'LUN恢复正常', + 'lun_faulty': 'LUN错误', + 'lun_write_zero_failed': 'LUN自动置零功能失效', + 'lun_write_cache_frozen': 'LUN写缓存被冻结', + 'thinlun_expand_failed': 'Thin-LUN自动扩容失败', + 'thinlun_physical_capacity_will_useup': 'Thin-LUN物理空间即将用光', + 'thinlun_physical_capacity_has_usedup': 'Thin-LUN物理空间已经用光', + 'thinlun_metadata_abnormal': 'Thin-LUN元数据异常', + 'pool_capacity_normal': '存储池空间使用率恢复正常', + 'pool_capacity_warning': '存储池空间使用率一般告警', + 'pool_capacity_serious': '存储池空间使用率重要告警', + 'pool_capacity_critical': '存储池空间使用率严重告警', + 'pool_capacity_has_usedup': '存储池空间已经用光', + 'pool_capacity_over_quota': '存储池已分配容量超出配额', + 'pool_user_capacity_over_quota': '存储池用户容量超出配额', + 'pool_data_protection_capacity_over_quota': '存储池数据保护容量超出配额', + 'volume_online': '卷上线', + 'volume_offline': '卷离线', + 'volume_path_recovery': '卷路径恢复', + 'volume_path_missing': '卷路径丢失', + 'volume_attached': '卷联机', + 'volume_detached': '卷脱机', + 'volume_io_error': '卷IO错误', + 'volume_average_responsetime_normal': '卷平均延时恢复正常', + 'volume_average_responsetime_warning': '卷平均延时告警', + 'snapshot_resource_full': '快照资源空间即将用光', + 'snapshot_resource_invalid': '快照资源数据无效', + 'snapshot_resource_expand_successfully': '快照资源自动扩容成功', + 'snapshot_resource_expand_failed': '快照资源自动扩容失败', + 'snapshot_point_delete_automatically': '自动删除快照时间点', + 'snapshot_point_create_failed': '自动创建快照时间点失败', + 'snapshot_rollback_successfully': '快照回滚成功', + 'snapshot_rollback_failed': '快照回滚失败', + 'replication_start': '开始复制', + 'replication_successfully': '复制成功', + 'replication_failed': '复制失败', + 'replication_scan_failed': '扫描失败', + 'replication_replica_faulty': '副本资源复制状态异常', + 'xan_link_unreachable': 'XAN链路不可达', + 'xan_link_reachable': 'XAN链路恢复', + 'sdas_link_unreachable': 'SDAS链路不可达', + 'sdas_link_reachable': 'SDAS链路恢复', + 'arbiter_unreachable': '节点不能访问仲裁者', + 'arbiter_reachable': '节点可以访问仲裁者', + 'mirror_auto_swap_successfully': '镜像对自动反转成功', + 'mirror_auto_swap_failed': '镜像对自动反转失败', + 'mirror_unsynchronized': '镜像对未同步', + 'mirror_synchronized': '镜像对恢复已同步', + 'mirror_negotiating': '镜像对是协商状态', + 'clone_sync_start': '开始克隆同步', + 'clone_sync_successfully': '克隆同步成功', + 'clone_sync_failed': '克隆同步失败', + 'migrate_start': '开始迁移', + 'migrate_successfully': '迁移成功', + 'migrate_failed': '迁移失败', + 'migrate_negotiating': '迁移对是协商状态', + 'migrate_auto_disable_failed': '迁移自动禁用失败', + 'migrate_itl_remaining': '迁移残留ITL', + 'dedup_data_exceed_spec': '重删数据量超过规格', + 'dedup_discard_some_fingerprints': '重删丢弃部分指纹', + 'sp_temperature_normal': 'SP温度恢复正常', + 'sp_temperature_warning': 'SP温度一般告警', + 'sp_temperature_warning_reissue': 'SP温度一般告警重发', + 'sp_temperature_critical': 'SP温度严重告警', + 'sp_temperature_critical_reissue': 'SP温度严重告警重发', + 'sp_voltage_normal': 'SP电压恢复正常', + 'sp_voltage_warning': 'SP电压一般告警', + 'sp_voltage_warning_reissue': 'SP电压一般告警重发', + 'sp_voltage_critical': 'SP电压严重告警', + 'sp_voltage_critical_reissue': 'SP电压严重告警重发', + 'ep_temperature_normal': 'EP温度恢复正常', + 'ep_temperature_warning': 'EP温度一般告警', + 'ep_temperature_warning_reissue': 'EP温度一般告警重发', + 'ep_temperature_critical': 'EP温度严重告警', + 'ep_temperature_critical_reissue': 'EP温度严重告警重发', + 'spu_bat_normal': 'SPU电池模块恢复正常', + 'spu_bat_failed': 'SPU电池模块变为故障', + 'spu_bat_failed_reissue': 'SPU电池模块故障重发', + 'spu_bat_absent': 'SPU电池模块不在位', + 'spu_bat_absent_reissue': 'SPU电池模块不在位重发', + 'spu_bat_will_expire': 'SPU电池模块即将超期', + 'spu_bat_expired': 'SPU电池模块超期', + 'spu_bat_expired_reissue': 'SPU电池模块超期重发', + 'cmos_bat_normal': 'CMOS电池恢复正常', + 'cmos_bat_failed': 'CMOS电池电力不足', + 'cmos_bat_failed_reissue': 'CMOS电池电力不足重发', + 'fc_link_error': 'FC链路错误', + 'sp_unexpected_power_down': 'SP异常掉电', + 'ha_takeover_successfully': 'HA接管成功', + 'ha_takeover_failed': 'HA接管失败', + 'write_cache_frozen': '写缓存被冻结', + 'write_cache_disabled': '写缓存被自动禁用', + 'sas_phy_speed_warning': 'SAS_PHY速率告警', + 'disk_pullout_electrified': '磁盘带电拔出', + 'sys_raid_warning': 'SYS_RAID告警', + 'thinlun_physical_capacity_usedup': 'Thin-LUN物理空间已经用光', + 'pool_capacity_will_useup': '存储池空间即将用光', + 'sdas_link_recovery': 'SDAS链路恢复', + 'sdas_auto_swap_successfully': 'SDAS自动反转成功', + 'sdas_auto_swap_failed': 'SDAS自动反转失败', +} + +PARSE_ALERT_SEVERITY_MAP = { + '0': constants.Severity.NOT_SPECIFIED, + '1': constants.Severity.FATAL, + '2': constants.Severity.MAJOR, + '3': constants.Severity.WARNING, + '4': constants.Severity.INFORMATIONAL, +} + + +STORAGE_CAP = { + constants.StorageMetric.IOPS.name: { + "unit": constants.StorageMetric.IOPS.unit, + "description": constants.StorageMetric.IOPS.description + }, + constants.StorageMetric.READ_IOPS.name: { + "unit": constants.StorageMetric.READ_IOPS.unit, + "description": constants.StorageMetric.READ_IOPS.description + }, + constants.StorageMetric.WRITE_IOPS.name: { + "unit": constants.StorageMetric.WRITE_IOPS.unit, + "description": constants.StorageMetric.WRITE_IOPS.description + }, + constants.StorageMetric.THROUGHPUT.name: { + "unit": constants.StorageMetric.THROUGHPUT.unit, + "description": constants.StorageMetric.THROUGHPUT.description + }, + constants.StorageMetric.READ_THROUGHPUT.name: { + "unit": constants.StorageMetric.READ_THROUGHPUT.unit, + "description": constants.StorageMetric.READ_THROUGHPUT.description + }, + constants.StorageMetric.WRITE_THROUGHPUT.name: { + "unit": constants.StorageMetric.WRITE_THROUGHPUT.unit, + "description": constants.StorageMetric.WRITE_THROUGHPUT.description + }, + constants.StorageMetric.RESPONSE_TIME.name: { + "unit": constants.StorageMetric.RESPONSE_TIME.unit, + "description": constants.StorageMetric.RESPONSE_TIME.description + }, + constants.StorageMetric.READ_RESPONSE_TIME.name: { + "unit": constants.StorageMetric.READ_RESPONSE_TIME.unit, + "description": constants.StorageMetric.READ_RESPONSE_TIME.description + }, + constants.StorageMetric.WRITE_RESPONSE_TIME.name: { + "unit": constants.StorageMetric.WRITE_RESPONSE_TIME.unit, + "description": constants.StorageMetric.WRITE_RESPONSE_TIME.description + }, + constants.StorageMetric.CACHE_HIT_RATIO.name: { + "unit": constants.StorageMetric.CACHE_HIT_RATIO.unit, + "description": constants.StorageMetric.CACHE_HIT_RATIO.description + }, + constants.StorageMetric.READ_CACHE_HIT_RATIO.name: { + "unit": constants.StorageMetric.READ_CACHE_HIT_RATIO.unit, + "description": constants.StorageMetric.READ_CACHE_HIT_RATIO.description + }, + constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name: { + "unit": constants.StorageMetric.WRITE_CACHE_HIT_RATIO.unit, + "description": + constants.StorageMetric.WRITE_CACHE_HIT_RATIO.description + } +} + +VOLUME_CAP = { + constants.VolumeMetric.IOPS.name: { + "unit": constants.VolumeMetric.IOPS.unit, + "description": constants.VolumeMetric.IOPS.description + }, + constants.VolumeMetric.READ_IOPS.name: { + "unit": constants.VolumeMetric.READ_IOPS.unit, + "description": constants.VolumeMetric.READ_IOPS.description + }, + constants.VolumeMetric.WRITE_IOPS.name: { + "unit": constants.VolumeMetric.WRITE_IOPS.unit, + "description": constants.VolumeMetric.WRITE_IOPS.description + }, + constants.VolumeMetric.THROUGHPUT.name: { + "unit": constants.VolumeMetric.THROUGHPUT.unit, + "description": constants.VolumeMetric.THROUGHPUT.description + }, + constants.VolumeMetric.READ_THROUGHPUT.name: { + "unit": constants.VolumeMetric.READ_THROUGHPUT.unit, + "description": constants.VolumeMetric.READ_THROUGHPUT.description + }, + constants.VolumeMetric.WRITE_THROUGHPUT.name: { + "unit": constants.VolumeMetric.WRITE_THROUGHPUT.unit, + "description": constants.VolumeMetric.WRITE_THROUGHPUT.description + }, + constants.VolumeMetric.RESPONSE_TIME.name: { + "unit": constants.VolumeMetric.RESPONSE_TIME.unit, + "description": constants.VolumeMetric.RESPONSE_TIME.description + }, + constants.VolumeMetric.READ_RESPONSE_TIME.name: { + "unit": constants.VolumeMetric.READ_RESPONSE_TIME.unit, + "description": constants.VolumeMetric.READ_RESPONSE_TIME.description + }, + constants.VolumeMetric.WRITE_RESPONSE_TIME.name: { + "unit": constants.VolumeMetric.WRITE_RESPONSE_TIME.unit, + "description": constants.VolumeMetric.WRITE_RESPONSE_TIME.description + }, + constants.VolumeMetric.CACHE_HIT_RATIO.name: { + "unit": constants.VolumeMetric.CACHE_HIT_RATIO.unit, + "description": constants.VolumeMetric.CACHE_HIT_RATIO.description + }, + constants.VolumeMetric.READ_CACHE_HIT_RATIO.name: { + "unit": constants.VolumeMetric.READ_CACHE_HIT_RATIO.unit, + "description": constants.VolumeMetric.READ_CACHE_HIT_RATIO.description + }, + constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.name: { + "unit": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.unit, + "description": constants.VolumeMetric.WRITE_CACHE_HIT_RATIO.description + } +} + +DISK_CAP = { + constants.DiskMetric.IOPS.name: { + "unit": constants.DiskMetric.IOPS.unit, + "description": constants.DiskMetric.IOPS.description + }, + constants.DiskMetric.READ_IOPS.name: { + "unit": constants.DiskMetric.READ_IOPS.unit, + "description": constants.DiskMetric.READ_IOPS.description + }, + constants.DiskMetric.WRITE_IOPS.name: { + "unit": constants.DiskMetric.WRITE_IOPS.unit, + "description": constants.DiskMetric.WRITE_IOPS.description + }, + constants.DiskMetric.THROUGHPUT.name: { + "unit": constants.DiskMetric.THROUGHPUT.unit, + "description": constants.DiskMetric.THROUGHPUT.description + }, + constants.DiskMetric.READ_THROUGHPUT.name: { + "unit": constants.DiskMetric.READ_THROUGHPUT.unit, + "description": constants.DiskMetric.READ_THROUGHPUT.description + }, + constants.DiskMetric.WRITE_THROUGHPUT.name: { + "unit": constants.DiskMetric.WRITE_THROUGHPUT.unit, + "description": constants.DiskMetric.WRITE_THROUGHPUT.description + }, + constants.DiskMetric.RESPONSE_TIME.name: { + "unit": constants.DiskMetric.RESPONSE_TIME.unit, + "description": constants.DiskMetric.RESPONSE_TIME.description + }, + constants.DiskMetric.READ_RESPONSE_TIME.name: { + "unit": constants.DiskMetric.READ_RESPONSE_TIME.unit, + "description": constants.DiskMetric.READ_RESPONSE_TIME.description + }, + constants.DiskMetric.WRITE_RESPONSE_TIME.name: { + "unit": constants.DiskMetric.WRITE_RESPONSE_TIME.unit, + "description": constants.DiskMetric.WRITE_RESPONSE_TIME.description + }, + constants.DiskMetric.CACHE_HIT_RATIO.name: { + "unit": constants.DiskMetric.CACHE_HIT_RATIO.unit, + "description": constants.DiskMetric.CACHE_HIT_RATIO.description + }, + constants.DiskMetric.READ_CACHE_HIT_RATIO.name: { + "unit": constants.DiskMetric.READ_CACHE_HIT_RATIO.unit, + "description": constants.DiskMetric.READ_CACHE_HIT_RATIO.description + }, + constants.DiskMetric.WRITE_CACHE_HIT_RATIO.name: { + "unit": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.unit, + "description": constants.DiskMetric.WRITE_CACHE_HIT_RATIO.description + } +} + +PORT_CAP = { + constants.PortMetric.IOPS.name: { + "unit": constants.PortMetric.IOPS.unit, + "description": constants.PortMetric.IOPS.description + }, + constants.PortMetric.READ_IOPS.name: { + "unit": constants.PortMetric.READ_IOPS.unit, + "description": constants.PortMetric.READ_IOPS.description + }, + constants.PortMetric.WRITE_IOPS.name: { + "unit": constants.PortMetric.WRITE_IOPS.unit, + "description": constants.PortMetric.WRITE_IOPS.description + }, + constants.PortMetric.THROUGHPUT.name: { + "unit": constants.PortMetric.THROUGHPUT.unit, + "description": constants.PortMetric.THROUGHPUT.description + }, + constants.PortMetric.READ_THROUGHPUT.name: { + "unit": constants.PortMetric.READ_THROUGHPUT.unit, + "description": constants.PortMetric.READ_THROUGHPUT.description + }, + constants.PortMetric.WRITE_THROUGHPUT.name: { + "unit": constants.PortMetric.WRITE_THROUGHPUT.unit, + "description": constants.PortMetric.WRITE_THROUGHPUT.description + }, + constants.PortMetric.RESPONSE_TIME.name: { + "unit": constants.PortMetric.RESPONSE_TIME.unit, + "description": constants.PortMetric.RESPONSE_TIME.description + }, + constants.PortMetric.READ_RESPONSE_TIME.name: { + "unit": constants.PortMetric.READ_RESPONSE_TIME.unit, + "description": constants.PortMetric.READ_RESPONSE_TIME.description + }, + constants.PortMetric.WRITE_RESPONSE_TIME.name: { + "unit": constants.PortMetric.WRITE_RESPONSE_TIME.unit, + "description": constants.PortMetric.WRITE_RESPONSE_TIME.description + }, + constants.PortMetric.CACHE_HIT_RATIO.name: { + "unit": constants.PortMetric.CACHE_HIT_RATIO.unit, + "description": constants.PortMetric.CACHE_HIT_RATIO.description + }, + constants.PortMetric.READ_CACHE_HIT_RATIO.name: { + "unit": constants.PortMetric.READ_CACHE_HIT_RATIO.unit, + "description": constants.PortMetric.READ_CACHE_HIT_RATIO.description + }, + constants.PortMetric.WRITE_CACHE_HIT_RATIO.name: { + "unit": constants.PortMetric.WRITE_CACHE_HIT_RATIO.unit, + "description": constants.PortMetric.WRITE_CACHE_HIT_RATIO.description + } +} +FTP_PERF_PATH = '/odsp/log/local/perf' +STRAGE_REGULAR = '^perf_device' +LUN_REGULAR = '^perf_lun' +SASPORT_REGULAR = '^perf_sasport' +ISCSIPORT_REGULAR = '^perf_iscsiport' +FCPORT_REGULAR = '^perf_fciport' +DISK_REGULAR = '^perf_disk' +SYSTEM_PERFORMANCE_FILE = 'system performance getfilelist' +VERSION_SHOW = 'versionshow' +CSV = '.csv' +SIXTY = 60 +ADD_FOLDER = '{}/delfin/drivers/utils/performance_file/macro_san/{}{}{}' +PERF_LUN = 'perf_lun_' +PERF_SP = '_SP' +PERF_SAS_PORT = 'perf_sasport_' +PERF_ISCSI_PORT = 'perf_iscsiport_' +GET_DATE = 'date +%s' +SPECIAL_VERSION = 'Version:' +SAS_PORT = 'sasport' +ISCSI_PORT = 'iscsiport' +FC_PORT = 'fcport' diff --git a/delfin/drivers/macro_san/ms/file/__init__.py b/delfin/drivers/macro_san/ms/file/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/macro_san/ms/macro_ssh_client.py b/delfin/drivers/macro_san/ms/macro_ssh_client.py new file mode 100644 index 000000000..715fdf0e7 --- /dev/null +++ b/delfin/drivers/macro_san/ms/macro_ssh_client.py @@ -0,0 +1,104 @@ +# Copyright 2020 The SODA Authors. +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import time + +import paramiko +import six +from oslo_log import log as logging + +from delfin import cryptor +from delfin import exception, utils +from delfin.drivers.utils.ssh_client import SSHPool + +LOG = logging.getLogger(__name__) + + +class MacroSanSSHPool(SSHPool): + def create(self): + ssh = paramiko.SSHClient() + try: + if self.ssh_pub_key is None: + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + else: + host_key = '%s %s %s' % \ + (self.ssh_host, self.ssh_pub_key_type, + self.ssh_pub_key) + self.set_host_key(host_key, ssh) + + ssh.connect(hostname=self.ssh_host, port=self.ssh_port, + username=self.ssh_username, + password=cryptor.decode(self.ssh_password), + timeout=self.ssh_conn_timeout, + banner_timeout=self.ssh_conn_timeout) + transport = ssh.get_transport() + transport.set_keepalive(self.ssh_conn_timeout) + return ssh + except Exception as e: + err = six.text_type(e) + LOG.error(err) + if 'timed out' in err: + raise exception.InvalidIpOrPort() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err: + raise exception.InvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err: + raise exception.InvalidPrivateKey() + elif 'not found in known_hosts' in err: + raise exception.SSHNotFoundKnownHosts(self.ssh_host) + else: + raise exception.SSHException(err) + + def do_exec_shell(self, command_list, sleep_time=0.5): + result = '' + try: + with self.item() as ssh: + if command_list and ssh: + channel = ssh.invoke_shell() + for command in command_list: + utils.check_ssh_injection(command) + channel.send(command + '\n') + time.sleep(sleep_time) + channel.send("exit" + "\n") + channel.close() + while True: + resp = channel.recv(9999).decode('utf8') + if not resp: + break + result += resp + if 'is not a recognized command' in result: + raise exception.InvalidIpOrPort() + except paramiko.AuthenticationException as ae: + LOG.error('doexec Authentication error:{}'.format(ae)) + raise exception.InvalidUsernameOrPassword() + except Exception as e: + err = six.text_type(e) + LOG.error(err) + if 'timed out' in err \ + or 'SSH connect timeout' in err: + raise exception.SSHConnectTimeout() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err \ + or 'Invalid username or password' in err: + raise exception.InvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err \ + or 'not a valid RSA private key' in err: + raise exception.InvalidPrivateKey() + elif 'Unable to connect to port' in err \ + or 'Invalid ip or port' in err: + raise exception.InvalidIpOrPort() + else: + raise exception.SSHException(err) + return result diff --git a/delfin/drivers/macro_san/ms/ms_handler.py b/delfin/drivers/macro_san/ms/ms_handler.py new file mode 100644 index 000000000..d552b5cce --- /dev/null +++ b/delfin/drivers/macro_san/ms/ms_handler.py @@ -0,0 +1,1290 @@ +# Copyright 2022 The SODA Authors. +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import codecs +import csv +import datetime +import hashlib +import os +import re +import shutil +import tarfile +import time + +import six +import xlrd +from oslo_log import log +from oslo_utils import units + +from delfin import exception +from delfin.common import constants +from delfin.drivers.macro_san.ms import consts +from delfin.drivers.macro_san.ms.consts import digital_constant +from delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool +from delfin.drivers.utils.tools import Tools + +LOG = log.getLogger(__name__) + + +class MsHandler(object): + + def __init__(self, **kwargs): + self.ssh_pool = MacroSanSSHPool(**kwargs) + ssh_access = kwargs.get('ssh') + self.ssh_host = ssh_access.get('host') + self.down_lock = True + + def login(self): + res = '' + try: + res = self.ssh_pool.do_exec_shell([consts.ODSP_SH]) + except Exception as e: + LOG.error('Failed to ssh login macro_san %s' % ( + six.text_type(e))) + if consts.UNKNOWN_COMMAND_TAG in res: + try: + self.ssh_pool.do_exec_shell([consts.SYSTEM_QUERY]) + self.down_lock = False + except Exception as e: + LOG.error('Failed to cli login macro_san %s' % ( + six.text_type(e))) + raise e + + def get_storage(self, storage_id): + storage_data_map = self.get_data_query(consts.SYSTEM_QUERY) + if not storage_data_map: + raise exception.SSHException('The command returns empty data') + device_uuid = storage_data_map.get('DeviceUUID') + serial_number = '{}/{}'.format(self.ssh_host, device_uuid) + storage_name = storage_data_map.get('DeviceName') + firmware_version = self.get_firmware_version() + pools = self.list_storage_pools(storage_id) + total_capacity = digital_constant.ZERO_INT + used_capacity = digital_constant.ZERO_INT + for pool in pools: + total_capacity += pool.get('total_capacity') + used_capacity += pool.get('used_capacity') + disks = self.list_disks(storage_id) + raw_capacity = digital_constant.ZERO_INT + for disk in disks: + raw_capacity += disk.get('capacity') + storage_status = self.get_storage_status(storage_id) + model = self.get_storage_model(storage_id) + storage = { + 'name': storage_name if storage_name else device_uuid, + 'vendor': consts.STORAGE_VENDOR, + 'status': storage_status, + 'model': model, + 'serial_number': serial_number, + 'firmware_version': firmware_version, + 'raw_capacity': raw_capacity, + 'total_capacity': total_capacity, + 'used_capacity': used_capacity, + 'free_capacity': total_capacity - used_capacity + } + return storage + + def get_storage_model(self, storage_id): + storage_model = '' + if not self.down_lock: + return storage_model + local_path = self.download_model_file(storage_id) + if local_path: + try: + storage_model = self.analysis_model_file(local_path, + storage_model) + finally: + shutil.rmtree(local_path) + return storage_model + + @staticmethod + def analysis_model_file(local_path, storage_model): + list_dir = os.listdir(local_path) + for dir_name in list_dir: + excel = xlrd.open_workbook('{}/{}'.format(local_path, dir_name)) + sheet = excel[consts.digital_constant.ZERO_INT] + rows_data_list = sheet.row_values(consts.digital_constant.ONE_INT) + for rows_data in rows_data_list: + title_pattern = re.compile(consts.STORAGE_INFO_MODEL_REGULAR) + title_search_obj = title_pattern.search(rows_data) + if title_search_obj: + storage_model = rows_data + break + return storage_model + + def download_model_file(self, storage_id): + sftp = None + local_path = '' + try: + ssh = self.ssh_pool.create() + sftp = ssh.open_sftp() + file_name_list = sftp.listdir(consts.FTP_PATH_TMP) + for file_name in file_name_list: + title_pattern = re.compile(consts.STORAGE_INFO_REGULAR) + title_search_obj = title_pattern.search(file_name) + if title_search_obj: + os_path = os.getcwd() + localtime = int(time.mktime(time.localtime())) * units.k + local_path = consts.MODEL_PATH.format( + os_path, storage_id, localtime) + os.mkdir(local_path) + local_path_file = '{}/{}'.format(local_path, file_name) + sftp.get(consts.FTP_PATH_FILE.format(file_name), + local_path_file) + break + except Exception as e: + LOG.error('Failed to down storage model file macro_san %s' % + (six.text_type(e))) + if sftp: + sftp.close() + return local_path + + def get_firmware_version(self): + firmware_version = None + version_map = self.get_storage_version() + for sp_num in range( + consts.digital_constant.ONE_INT, + len(version_map) + consts.digital_constant.ONE_INT): + sp_key = '{}{}'.format(consts.SP, sp_num) + firmware_version = \ + version_map.get(sp_key, {}).get('{}{}'.format( + sp_key, consts.ODSP_MSC_VERSION_KEY)) + if consts.FIELDS_INITIATOR_HOST != firmware_version: + break + return firmware_version + + def get_storage_status(self, storage_id): + storage_status = constants.StorageStatus.NORMAL + ha_status_map = self.get_data_query(consts.HA_STATUS) + ha_status = ha_status_map.get('SystemHAStatus') + if ha_status: + storage_status = consts.STORAGE_STATUS_MAP.get( + ha_status.lower(), constants.StorageStatus.UNKNOWN) + else: + controllers_list = self.list_controllers(storage_id) + for controllers in controllers_list: + controllers_status = controllers.get('status') + if controllers_status in constants.ControllerStatus.FAULT: + storage_status = constants.StorageStatus.ABNORMAL + return storage_status + + def list_storage_pools(self, storage_id): + pool_list = [] + pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME) + for pool in pools: + pool_name = pool.get('Name') + health_status = self.get_pool_status(pool_name) + total_capacity = Tools.get_capacity_size(pool.get('AllCapacity')) + used_capacity = Tools.get_capacity_size(pool.get('UsedCapacity')) + pool_model = { + 'name': pool_name, + 'storage_id': storage_id, + 'native_storage_pool_id': pool_name, + 'status': health_status, + 'storage_type': constants.StorageType.BLOCK, + 'total_capacity': total_capacity, + 'used_capacity': used_capacity, + 'free_capacity': total_capacity - used_capacity + } + pool_list.append(pool_model) + return pool_list + + def get_pool_status(self, pool_name): + raids = self.get_data_list(consts.RAID_LIST.format(pool_name), + consts.FIELDS_NAME) + pool_status = constants.StoragePoolStatus.UNKNOWN + if raids: + pool_status = constants.StoragePoolStatus.NORMAL + for raid in raids: + health_status = raid.get('HealthStatus').lower() \ + if raid.get('HealthStatus') else None + if health_status in consts.POOL_STATUS_ABNORMAL.ALL: + pool_status = constants.StoragePoolStatus.ABNORMAL + break + if health_status == constants.StoragePoolStatus.DEGRADED: + pool_status = constants.StoragePoolStatus.DEGRADED + break + if health_status not in consts.POOL_STATUS_NORMAL.ALL: + pool_status = constants.StoragePoolStatus.UNKNOWN + return pool_status + + def list_volumes(self, storage_id): + volume_list = [] + pool_volumes = self.get_volumes(storage_id) + for volume in pool_volumes: + status = volume.get('HealthStatus').lower() \ + if volume.get('HealthStatus') else None + total_capacity = self.get_total_capacity(volume) + thin_provisioning = volume.get('Thin-Provisioning').lower() \ + if volume.get('Thin-Provisioning') else None + used_capacity = self.get_used_capacity(thin_provisioning, + total_capacity, volume) + volume_model = { + 'name': volume.get('Name'), + 'storage_id': storage_id, + 'status': consts.LIST_VOLUMES_STATUS_MAP.get( + status, constants.StorageStatus.UNKNOWN), + 'native_volume_id': volume.get('Name'), + 'native_storage_pool_id': volume.get('Owner(Pool)'), + 'type': consts.VOLUME_TYPE_MAP.get( + thin_provisioning, constants.VolumeType.THICK), + 'wwn': volume.get('DeviceID') if + volume.get('DeviceID') else volume.get('WWN'), + 'total_capacity': total_capacity, + 'used_capacity': used_capacity, + 'free_capacity': total_capacity - used_capacity + } + volume_list.append(volume_model) + return volume_list + + @staticmethod + def get_used_capacity(thin_provisioning, total_capacity, volume): + if consts.FIELDS_ENABLE == thin_provisioning: + used_capacity_str = volume.get('Thin-LUNUsedCapacity') + number_b = used_capacity_str.index('B') + used_capacity = \ + used_capacity_str[:number_b + consts.digital_constant.ONE_INT] + used_capacity = Tools.get_capacity_size(used_capacity) + else: + used_capacity = total_capacity + return used_capacity + + @staticmethod + def get_total_capacity(volume): + total_size = volume.get('TotalSize') + if not total_size: + physical_size = volume.get('TotalPhysicalSize') + number_b = physical_size.index('B') + total_size = \ + physical_size[:number_b + consts.digital_constant.ONE_INT] + total_capacity = Tools.get_capacity_size(total_size) + return total_capacity + + def list_controllers(self, storage_id): + controllers_list = [] + sp_map = self.get_storage_version() + cpu_map = self.get_cup_information() + ha_status_map = self.get_data_query(consts.HA_STATUS) + for sp_name in sp_map.keys(): + status_key = '{}{}'.format(sp_name, consts.HA_RUNNING_STATUS) + status = ha_status_map.get(status_key).lower() \ + if ha_status_map.get(status_key) else None + soft_version = sp_map.get(sp_name, {}).get( + '{}{}'.format(sp_name, consts.ODSP_MSC_VERSION_KEY)) + cpu_vendor_id = cpu_map.get(sp_name, {}).get( + '{}{}'.format(sp_name, consts.PROCESSOR_VENDOR_KEY)) + cpu_frequency = cpu_map.get(sp_name, {}).get( + '{}{}'.format(sp_name, consts.PROCESSOR_FREQUENCY_KEY)) + cpu_info = '' + if cpu_vendor_id and cpu_frequency: + cpu_info = '{}@{}'.format(cpu_vendor_id, cpu_frequency) + controller_model = { + 'name': sp_name, + 'storage_id': storage_id, + 'native_controller_id': sp_name, + 'status': consts.CONTROLLERS_STATUS_MAP.get( + status, constants.ControllerStatus.UNKNOWN), + 'location': sp_name, + 'soft_version': soft_version, + 'cpu_info': cpu_info + } + if cpu_info: + controller_model['cpu_count'] = consts.digital_constant.ONE_INT + controllers_list.append(controller_model) + return controllers_list + + def get_cup_information(self): + cpu_res = self.do_exec(consts.SYSTEM_CPU) + sp_map = {} + if cpu_res: + cpu_res_list = cpu_res.strip(). \ + replace('\r', '').split('\n') + sp_cpu_map = {} + sp = None + bag = True + for row_cpu in (cpu_res_list or []): + row_pattern = re.compile(consts.SYSTEM_CPU_SP_REGULAR) + row_search = row_pattern.search(row_cpu) + if row_search: + bag = False + sp = row_cpu.replace( + consts.LEFT_HALF_BRACKET, '').replace( + consts.CPU_INFORMATION_BRACKET, '').replace(' ', '') + if bag: + continue + if consts.COLON in row_cpu: + row_version_list = row_cpu.replace(' ', '').split( + consts.COLON, digital_constant.ONE_INT) + key = row_version_list[digital_constant.ZERO_INT] + sp_cpu_map[key] = row_version_list[ + digital_constant.ONE_INT] + if not row_cpu: + sp_map[sp] = sp_cpu_map + sp_cpu_map = {} + sp = None + return sp_map + + def list_disks(self, storage_id): + disk_list = [] + disks = self.get_disks() + for disk in disks: + disk_name = disk.get('Name') + physical = disk.get('Type').lower() if disk.get('Type') else None + logical = disk.get('Role').lower() if disk.get('Role') else None + status = disk.get('HealthStatus').lower() if \ + disk.get('HealthStatus') else None + disk_model = { + 'name': disk_name, + 'storage_id': storage_id, + 'native_disk_id': disk_name, + 'serial_number': disk.get('SerialNumber'), + 'manufacturer': disk.get('Vendor'), + 'model': disk.get('Model'), + 'firmware': disk.get('FWVersion'), + 'location': disk_name, + 'speed': int(disk.get('RPMs')) if disk.get('RPMs') else '', + 'capacity': Tools.get_capacity_size(disk.get('Capacity')), + 'status': consts.DISK_STATUS_MAP.get( + status, constants.DiskStatus.NORMAL), + 'physical_type': consts.DISK_PHYSICAL_TYPE_MAP.get( + physical, constants.DiskPhysicalType.UNKNOWN), + 'logical_type': consts.DISK_LOGICAL_TYPE_MAP.get( + logical, constants.DiskLogicalType.UNKNOWN) + } + disk_list.append(disk_model) + return disk_list + + def list_ports(self, storage_id): + ports = self.get_fc_port_encapsulation(storage_id) + ports.extend(self.get_sas_port_data(storage_id)) + return ports + + def get_fc_port_encapsulation(self, storage_id): + ports = [] + fc_port_map = self.get_fc_port() + for fc_port_id in fc_port_map.keys(): + fc_port_id_upper = fc_port_id.upper() + port_type = self.get_port_type(fc_port_id.lower()) + fc_ports = fc_port_map.get(fc_port_id) + status_int = fc_ports.get('onlinestate') + native_parent_id = '{}{}'.format( + consts.SP, self.numbers_character(fc_port_id)) + fc_port_m = { + 'native_port_id': fc_port_id_upper, + 'name': fc_port_id_upper, + 'type': port_type, + 'logical_type': constants.PortLogicalType.PHYSICAL, + 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( + status_int, constants.PortConnectionStatus.UNKNOWN), + 'health_status': constants.PortHealthStatus.UNKNOWN, + 'location': fc_port_id_upper, + 'storage_id': storage_id, + 'native_parent_id': native_parent_id, + 'speed': Tools.get_capacity_size(fc_ports.get('actualspeed')), + 'wwn': fc_ports.get('wwn') + } + ports.append(fc_port_m) + return ports + + @staticmethod + def parse_alert(alert): + try: + if consts.PARSE_ALERT_DESCRIPTION in alert.keys(): + alert_name = alert.get(consts.PARSE_ALERT_NAME) + alert_name_e = alert_name.lower() + alert_name_c = consts.ALERT_NAME_CONFIG.get( + alert_name_e, alert_name) + alert_model = dict() + description = alert.get(consts.PARSE_ALERT_DESCRIPTION)\ + .encode('iso-8859-1').decode('gbk') + alert_model['alert_id'] = alert.get( + consts.PARSE_ALERT_ALERT_ID) + alert_model['severity'] = consts.PARSE_ALERT_SEVERITY_MAP.get( + alert.get(consts.PARSE_ALERT_SEVERITY), + constants.Severity.NOT_SPECIFIED) + alert_model['category'] = constants.Category.FAULT + alert_model['occur_time'] = Tools().time_str_to_timestamp( + alert.get(consts.PARSE_ALERT_TIME), consts.TIME_PATTERN) + alert_model['description'] = description + alert_model['location'] = '{}:{}'.format(alert.get( + consts.PARSE_ALERT_STORAGE), + alert.get(consts.PARSE_ALERT_LOCATION)) + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE + alert_model['alert_name'] = alert_name_c + match_key = '{}{}'.format(alert_name_c, description) + alert_model['match_key'] = hashlib.md5( + match_key.encode()).hexdigest() + return alert_model + except Exception as e: + err_msg = "Failed to parse alert from " \ + "macro_san ms: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_storage_host_initiators(self, storage_id): + initiators_list = [] + initiators = self.get_data_list( + consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS) + for initiator in initiators: + host_name = initiator.get('MappedClient') \ + if initiator.get('MappedClient') else initiator.get( + 'MappedHost') + wwn = initiator.get('InitiatorWWN') + online_status = initiator.get('OnlineStatus').lower() \ + if initiator.get('OnlineStatus') else None + initiator_type = initiator.get('Type').lower() \ + if initiator.get('Type') else None + initiator_d = { + 'native_storage_host_initiator_id': wwn, + 'name': wwn, + 'alias': initiator.get('InitiatorAlias'), + 'type': consts.INITIATOR_TYPE_MAP.get( + initiator_type, constants.InitiatorType.UNKNOWN), + 'status': consts.INITIATOR_STATUS_MAP.get( + online_status, constants.InitiatorStatus.UNKNOWN), + 'wwn': wwn, + 'storage_id': storage_id + } + if consts.FIELDS_INITIATOR_HOST != host_name: + initiator_d['native_storage_host_id'] = host_name + initiators_list.append(initiator_d) + return initiators_list + + def list_storage_hosts_old(self, storage_id): + host_list = [] + initiators_host_relation = self.get_initiators_host_relation() + hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME, '') + for host in hosts: + host_name = host.get('Name') + initiators = initiators_host_relation.get(host_name) + os_type = constants.HostOSTypes.UNKNOWN + if initiators: + os_str = initiators.get('OS').lower() \ + if initiators.get('OS') else None + os_type = consts.HOST_OS_TYPES_MAP.get( + os_str, constants.HostOSTypes.UNKNOWN) + host_d = { + 'name': host_name, + 'storage_id': storage_id, + 'native_storage_host_id': host_name, + 'os_type': os_type, + 'status': constants.HostStatus.NORMAL, + 'description': host.get('Description') + } + host_list.append(host_d) + return host_list + + def list_storage_hosts_new(self, storage_id): + hosts_new = self.get_data_list(consts.CLIENT_HOST, + consts.FIELDS_HOST_NAME, '') + host_list = [] + for host in hosts_new: + host_name = host.get('Host Name') + os = host.get('OS').lower() if host.get('OS') else None + host_d = { + 'name': host_name, + 'storage_id': storage_id, + 'native_storage_host_id': host_name, + 'os_type': consts.HOST_OS_TYPES_MAP.get( + os, constants.HostOSTypes.UNKNOWN), + 'status': constants.HostStatus.NORMAL, + 'description': host.get('Description') + } + if consts.FIELDS_INITIATOR_HOST != host.get('IP Address'): + host_d['ip_address'] = host.get('IP Address') + host_list.append(host_d) + return host_list + + def list_storage_host_groups(self, storage_id): + host_groups = self.get_data_list(consts.HOST_GROUP, + consts.FIELDS_HOST_GROUP_NAME, '') + storage_host_groups = [] + host_grp_relation_list = [] + for host_group in host_groups: + host_group_name = host_group.get('Host Group Name') + host_g = { + 'name': host_group_name, + 'storage_id': storage_id, + 'native_storage_host_group_id': host_group_name, + 'description': host_group.get('Description') + } + storage_host_groups.append(host_g) + hosts = self.get_data_list( + consts.HOST_GROUP_N.format(host_group_name), + consts.FIELDS_HOST_NAME_TWO) + for host in hosts: + host_name = host.get('HostName') + host_group_relation = { + 'storage_id': storage_id, + 'native_storage_host_group_id': host_group_name, + 'native_storage_host_id': host_name + } + host_grp_relation_list.append(host_group_relation) + result = { + 'storage_host_groups': storage_host_groups, + 'storage_host_grp_host_rels': host_grp_relation_list + } + return result + + def list_volume_groups(self, storage_id): + volume_groups = self.get_data_list(consts.VOLUME_GROUP, + consts.FIELDS_VOLUME_GROUP_NAME, '') + volume_group_list = [] + volume_grp_relation_list = [] + for volume_group in volume_groups: + volume_group_name = volume_group.get('LUN Group Name') + volume_g = { + 'name': volume_group_name, + 'storage_id': storage_id, + 'native_volume_group_id': volume_group_name, + 'description': volume_group.get('Description') + } + volume_group_list.append(volume_g) + volumes = self.get_data_list( + consts.VOLUME_GROUP_N.format(volume_group_name), + consts.FIELDS_LUN_NAME) + for volume in volumes: + volume_name = volume.get('LUNName') + volume_group_relation = { + 'storage_id': storage_id, + 'native_volume_group_id': volume_group_name, + 'native_volume_id': volume_name + } + volume_grp_relation_list.append(volume_group_relation) + result = { + 'volume_groups': volume_group_list, + 'vol_grp_vol_rels': volume_grp_relation_list + } + return result + + def list_masking_views_old(self, storage_id): + views = [] + hosts = self.get_data_list(consts.CLIENT_LIST, consts.FIELDS_NAME) + for host in hosts: + host_name = host.get('Name') + masking_list = self.get_data_list( + consts.SHARE_LUN_LIST.format(host_name), + consts.FIELDS_LUN_NAME) + for masking_object in masking_list: + volume_id = masking_object.get('LUNID') + native_masking_view_id = '{}{}'.format(host_name, volume_id) + view = { + 'native_masking_view_id': native_masking_view_id, + 'name': native_masking_view_id, + 'native_storage_host_id': host_name, + 'native_volume_id': volume_id, + 'storage_id': storage_id + } + views.append(view) + return views + + def list_masking_views_new(self, storage_id): + views = self.get_data_list(consts.MAPVIEW, consts.FIELDS_MAPVIEW_NAME, + '') + views_list = [] + for view in views: + mapview_name = view.get('Mapview Name') + view_d = { + 'native_masking_view_id': mapview_name, + 'name': mapview_name, + 'native_storage_host_group_id': view.get('Host Group Name'), + 'native_volume_group_id': view.get('LUN Group Name'), + 'description': view.get('Description'), + 'storage_id': storage_id + } + views_list.append(view_d) + return views_list + + def do_exec(self, command_str, sleep_time=0.5, mix_time=consts.TIME_LIMIT): + if self.down_lock: + try: + res = self.ssh_pool.do_exec_shell( + [consts.ODSP_SH, command_str], sleep_time) + except Exception as e: + LOG.error('ssh Command(%s) execution info: %s' % ( + command_str, six.text_type(e))) + raise e + else: + try: + res = self.ssh_pool.do_exec_shell([command_str], sleep_time) + except Exception as e: + LOG.error('cli Command(%s) execution info: %s' % ( + command_str, six.text_type(e))) + raise e + if consts.FAILED_TAG in res or consts.UNKNOWN_COMMAND_TAG in res: + return None + if consts.SUCCESSFUL_TAG not in res: + LOG.info('Command(%s) sleep(%s) return info: %s' % + (command_str, sleep_time, res)) + if sleep_time > mix_time: + return None + res = self.do_exec(command_str, sleep_time + 2, mix_time) + return res + + def get_data_query(self, command): + data_map = {} + res = self.do_exec(command) + if res is not None: + row_res_list = res.strip().replace('\r', '').split('\n') + for row_res in (row_res_list or []): + if consts.COLON not in row_res: + continue + row_data_list = row_res.replace(' ', '').split( + consts.COLON, digital_constant.ONE_INT) + key = row_data_list[digital_constant.ZERO_INT] + data_map[key] = row_data_list[digital_constant.ONE_INT] + return data_map + + def get_storage_version(self): + version_res = self.do_exec(consts.SYSTEM_VERSION) + sp_map = {} + if version_res: + version_res_list = version_res.strip(). \ + replace('\r', '').split('\n') + sp_version_map = {} + sp = None + bag = True + for row_version in (version_res_list or []): + row_pattern = re.compile(consts.SYSTEM_VERSION_SP_REGULAR) + row_search = row_pattern.search(row_version) + if row_search: + bag = False + sp = row_version.replace( + consts.LEFT_HALF_BRACKET, '').replace( + consts.AFTER_HALF_BRACKET, '').replace(' ', '') + if bag: + continue + if consts.COLON in row_version: + row_version_list = row_version.replace(' ', '').split( + consts.COLON, digital_constant.ONE_INT) + key = row_version_list[digital_constant.ZERO_INT] + sp_version_map[key] = row_version_list[ + digital_constant.ONE_INT] + if consts.ODSP_DRIVER_VERSION_KEY in key: + sp_map[sp] = sp_version_map + sp_version_map = {} + return sp_map + + def get_data_list(self, command, contains_fields, space=' ', + sleep_time=0.5, mix_time=consts.TIME_LIMIT): + data_list = [] + res = self.do_exec(command, sleep_time, mix_time) + if res: + res_list = res.strip().replace('\r', '').split('\n\n') + for object_str in (res_list or []): + object_str = object_str.replace(space, '') + if contains_fields not in object_str: + continue + object_list = object_str.split('\n') + data_map = {} + for row_str in (object_list or []): + if consts.COLON not in row_str: + continue + row_list = row_str.split( + consts.COLON, digital_constant.ONE_INT) + key = row_list[digital_constant.ZERO_INT].strip() + data_map[key] = row_list[digital_constant.ONE_INT].strip() + data_list.append(data_map) + return data_list + + def get_volumes(self, storage_id): + pools = self.list_storage_pools(storage_id) + volumes = [] + for pool in pools: + pool_name = pool.get('name') + lun_list = self.get_data_list( + consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME) + for lun in lun_list: + lun_name = lun.get('Name') + lun_query = self.get_data_query( + consts.LUN_QUERY.format(lun_name)) + if lun_query: + volumes.append(lun_query) + return volumes + + def get_disks(self): + disk_list = [] + dsu_list = self.get_data_list(consts.DSU_LIST, consts.FIELDS_NAME) + for dsu in dsu_list: + dsu_name = dsu.get('Name') + if not dsu_name: + continue + dsu_id = dsu_name.replace(consts.DSU, '') + disks = self.get_data_list( + consts.DISK_LIST.format(dsu_id), consts.FIELDS_NAME) + for disk in disks: + disk_name = disk.get('Name') + if not disk_name: + continue + disk_id = disk_name.replace(consts.DISK, '') + disk_map = self.get_data_query( + consts.DISK_QUERY.format(disk_id)) + if disk_map: + disk_list.append(disk_map) + return disk_list + + def get_fc_port(self): + target_port_res = self.do_exec(consts.TARGET_QUERY_PORT_LIST) + fc_port = {} + if target_port_res: + bag = True + port_id = None + port_map = {} + target_port_list = target_port_res.replace('\r', '').split('\n') + for port_row_str in target_port_list: + port_row_str = port_row_str.replace(' ', '') + row_pattern = re.compile(consts.TARGET_PORT_REGULAR) + row_search = row_pattern.search(port_row_str) + if row_search: + if port_map: + fc_port[port_id] = port_map + port_map = {} + port_id = port_row_str.replace(consts.PORT, '') + bag = False + continue + if bag: + continue + if consts.COLON in port_row_str: + port_row_list = port_row_str.split( + consts.COLON, digital_constant.ONE_INT) + port_key = port_row_list[digital_constant.ZERO_INT] + port_map[port_key] = port_row_list[ + digital_constant.ONE_INT] + if consts.PORT_SUCCESSFUL_TAG in port_row_str: + fc_port[port_id] = port_map + return fc_port + + def get_sas_port_data(self, storage_id): + sas_list = [] + try: + ha_status_map = self.get_data_query(consts.HA_STATUS) + for ha_status_key in ha_status_map.keys(): + if consts.SP not in ha_status_key: + continue + sp_num = ha_status_key.replace( + consts.HA_RUNNING_STATUS, '').replace(consts.SP, '') + dsu_list = self.get_data_list(consts.DSU_LIST, + consts.FIELDS_NAME) + for dsu in dsu_list: + dsu_num = self.numbers_character(dsu.get('Name')) + sas_data_map = self.get_sas_data_list( + consts.SAS_PORT_LIST.format(sp_num, dsu_num), + consts.FIELDS_LINK_STATUS) + self.get_sas_encapsulation_data(sas_data_map, sas_list, + storage_id) + finally: + return sas_list + + def get_sas_encapsulation_data(self, sas_data_map, sas_list, storage_id): + for sas_port_id in sas_data_map.keys(): + sas_object_map = sas_data_map.get(sas_port_id) + status = sas_object_map.get( + '{} Link Status'.format(sas_port_id)) + max_speed = sas_object_map.get( + '{} PHY Max Speed'.format(sas_port_id)) + speed = sas_object_map.get( + '{} PHY1 Speed'.format(sas_port_id)) + native_parent_id = '{}{}'.format( + consts.SP, self.numbers_character(sas_port_id)) + sas_port_m = { + 'native_port_id': sas_port_id, + 'name': sas_port_id, + 'type': constants.PortType.SAS, + 'logical_type': constants.PortLogicalType.PHYSICAL, + 'connection_status': consts.PORT_CONNECTION_STATUS_MAP.get( + status, constants.PortConnectionStatus.UNKNOWN), + 'health_status': constants.PortHealthStatus.UNKNOWN, + 'location': sas_port_id, + 'storage_id': storage_id, + 'native_parent_id': native_parent_id, + 'max_speed': self.capacity_conversion(max_speed), + 'speed': self.capacity_conversion(speed) + } + sas_list.append(sas_port_m) + + @staticmethod + def capacity_conversion(capacity_str): + capacity_int = consts.digital_constant.ZERO_INT + if consts.GBPS in capacity_str: + capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.G + elif consts.MBPS in capacity_str: + capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.M + elif consts.KBPS in capacity_str: + capacity_int = int(capacity_str.replace(consts.GBPS, '')) * units.k + return capacity_int + + def get_sas_data_list(self, command, contains_fields): + sas_data = {} + res = self.do_exec(command) + if res: + res_list = res.strip().replace('\r', '').split('\n\n') + for object_str in (res_list or []): + if contains_fields not in object_str: + continue + object_list = object_str.split('\n') + sas_object = {} + sas_data_key = None + for row_str in (object_list or []): + if consts.COLON not in row_str: + continue + object_num = row_str.rindex(consts.COLON) + object_key = row_str[:object_num].strip() + object_num_one = object_num + consts.digital_constant. \ + ONE_INT + sas_object[object_key] = row_str[object_num_one:].strip() + if consts.FIELDS_LINK_STATUS in row_str: + sas_data_num = row_str.index(' ') + sas_data_key = row_str[:sas_data_num] + sas_data[sas_data_key] = sas_object + return sas_data + + @staticmethod + def get_port_type(fc_port_id_lower): + if constants.PortType.FC in fc_port_id_lower: + port_type = constants.PortType.FC + elif constants.PortType.ISCSI in fc_port_id_lower: + port_type = constants.PortType.ISCSI + elif constants.PortType.SAS in fc_port_id_lower: + port_type = constants.PortType.SAS + elif constants.PortType.ETH in fc_port_id_lower: + port_type = constants.PortType.ETH + else: + port_type = constants.PortType.OTHER + return port_type + + @staticmethod + def numbers_character(character_string): + for character in list(character_string): + if character.isdigit(): + return character + + def get_initiators_host_relation(self): + initiators_host = {} + initiators = self.get_data_list( + consts.CLIENT_INITIATOR_GETLIST, consts.FIELDS_INITIATOR_ALIAS) + for initiator in initiators: + host_id = initiator.get('MappedClient') + initiators_host[host_id] = initiator + return initiators_host + + def collect_perf_metrics(self, storage_id, resource_metrics, start_time, + end_time): + metrics = [] + if not self.down_lock: + return metrics + LOG.info('The system(storage_id: %s) starts to collect macro_san' + ' performance, start_time: %s, end_time: %s', + storage_id, start_time, end_time) + resource_storage = resource_metrics.get(constants.ResourceType.STORAGE) + if resource_storage: + storage_metrics = self.get_storage_metrics( + end_time, resource_storage, start_time, storage_id) + metrics.extend(storage_metrics) + LOG.info('The system(storage_id: %s) stop to collect storage' + ' performance, The length is: %s', + storage_id, len(storage_metrics)) + resource_volume = resource_metrics.get(constants.ResourceType.VOLUME) + if resource_volume: + volume_metrics = self.get_volume_metrics( + end_time, resource_volume, start_time, storage_id) + metrics.extend(volume_metrics) + LOG.info('The system(storage_id: %s) stop to collect volume' + ' performance, The length is: %s', + storage_id, len(volume_metrics)) + file_name_map = self.get_identification() + resource_port = resource_metrics.get(constants.ResourceType.PORT) + if resource_port: + sas_port_metrics = self.get_port_metrics( + end_time, resource_port, start_time, storage_id, + consts.SAS_PORT, consts.SASPORT_REGULAR) + metrics.extend(sas_port_metrics) + LOG.info('The system(storage_id: %s) stop to collect sas port' + ' performance, The length is: %s', + storage_id, len(sas_port_metrics)) + if file_name_map: + fc_port_metrics = self.get_fc_port_metrics( + end_time, resource_port, start_time, storage_id, + file_name_map) + metrics.extend(fc_port_metrics) + LOG.info('The system(storage_id: %s) stop to collect fc port' + ' performance, The length is: %s', storage_id, + len(fc_port_metrics)) + resource_disk = resource_metrics.get(constants.ResourceType.DISK) + if resource_disk and file_name_map: + disk_metrics = self.get_disk_metrics( + end_time, resource_disk, start_time, storage_id, file_name_map) + metrics.extend(disk_metrics) + LOG.info('The system(storage_id: %s) stop to collect disk' + ' performance, The length is: %s', + storage_id, len(disk_metrics)) + return metrics + + def get_fc_port_metrics(self, end_time, resource_disk, start_time, + storage_id, file_name_map): + local_path = self.down_perf_file(consts.FC_PORT, storage_id, + consts.FCPORT_REGULAR) + disk_metrics = [] + if local_path: + metrics_data = None + try: + metrics_data = self.analysis_per_file( + local_path, start_time, end_time, + consts.FC_PORT, file_name_map) + except Exception as e: + LOG.error('Failed to fc port analysis per file %s' % ( + six.text_type(e))) + finally: + shutil.rmtree(local_path) + if metrics_data: + disk_metrics = self.packaging_metrics( + storage_id, metrics_data, resource_disk, + constants.ResourceType.PORT) + return disk_metrics + + def get_disk_metrics(self, end_time, resource_disk, start_time, + storage_id, file_name_map): + local_path = self.down_perf_file( + constants.ResourceType.DISK, storage_id, + consts.DISK_REGULAR) + disk_metrics = [] + if local_path: + metrics_data = None + try: + metrics_data = self.analysis_per_file( + local_path, start_time, end_time, + constants.ResourceType.DISK, file_name_map) + except Exception as e: + LOG.error('Failed to disk analysis per file %s' % ( + six.text_type(e))) + finally: + shutil.rmtree(local_path) + if metrics_data: + disk_metrics = self.packaging_metrics( + storage_id, metrics_data, resource_disk, + constants.ResourceType.DISK) + return disk_metrics + + def get_port_metrics(self, end_time, resource_port, start_time, + storage_id, folder, pattern): + local_path = self.down_perf_file(folder, storage_id, pattern) + sas_port_metrics = [] + if local_path: + metrics_data = None + try: + metrics_data = self.analysis_per_file( + local_path, start_time, end_time, folder) + except Exception as e: + LOG.error('Failed to sas port analysis per file %s' % ( + six.text_type(e))) + finally: + shutil.rmtree(local_path) + if metrics_data: + sas_port_metrics = self.packaging_metrics( + storage_id, metrics_data, resource_port, + constants.ResourceType.PORT) + return sas_port_metrics + + def get_volume_metrics(self, end_time, resource_volume, start_time, + storage_id): + local_path = self.down_perf_file( + constants.ResourceType.VOLUME, storage_id, consts.LUN_REGULAR) + volume_metrics = [] + if local_path: + metrics_data = None + try: + uuid_map = self.get_volume_uuid() + metrics_data = self.analysis_per_file( + local_path, start_time, end_time, + constants.ResourceType.VOLUME, uuid_map) + except Exception as e: + LOG.error('Failed to volume analysis per file %s' % ( + six.text_type(e))) + finally: + shutil.rmtree(local_path) + if metrics_data: + volume_metrics = self.packaging_metrics( + storage_id, metrics_data, resource_volume, + constants.ResourceType.VOLUME) + return volume_metrics + + def get_storage_metrics(self, end_time, resource_storage, start_time, + storage_id): + local_path = self.down_perf_file(constants.ResourceType.STORAGE, + storage_id, consts.STRAGE_REGULAR) + storage_metrics = [] + if local_path: + metrics_data = None + try: + metrics_data = self.analysis_per_file( + local_path, start_time, end_time, + constants.ResourceType.STORAGE) + except Exception as e: + LOG.error('Failed to storage analysis per file %s' % ( + six.text_type(e))) + finally: + shutil.rmtree(local_path) + if metrics_data: + resource_id, resource_name = self.get_storages() + storage_metrics = self.storage_packaging_data( + storage_id, metrics_data, resource_storage, + resource_id, resource_name) + return storage_metrics + + def get_storages(self): + storage_data_map = self.get_data_query(consts.SYSTEM_QUERY) + device_uuid = storage_data_map.get('DeviceUUID') + storage_name = storage_data_map.get('DeviceName') + resource_name = storage_name if storage_name else device_uuid + resource_id = '{}/{}'.format(self.ssh_host, device_uuid) + return resource_id, resource_name + + def down_perf_file(self, folder, storage_id, pattern): + sftp = None + tar = None + local_path = '' + try: + ssh = self.ssh_pool.create() + sftp = ssh.open_sftp() + file_name_list = sftp.listdir(consts.FTP_PERF_PATH) + ms_path = os.getcwd() + localtime = int(round(time.time() * 1000)) + local_path = consts.ADD_FOLDER.format( + ms_path, folder, storage_id, localtime) + os.mkdir(local_path) + for file_name in file_name_list: + title_pattern = re.compile(pattern) + title_search_obj = title_pattern.search(file_name) + if title_search_obj: + local_path_file = '{}/{}'.format(local_path, file_name) + ftp_path = '{}/{}'.format(consts.FTP_PERF_PATH, file_name) + sftp.get(ftp_path, local_path_file) + if consts.CSV in file_name: + continue + tar = tarfile.open(local_path_file) + tar.extractall(local_path) + except Exception as e: + LOG.error('Failed to down perf file %s macro_san %s' % + (folder, six.text_type(e))) + if sftp: + sftp.close() + if tar: + tar.close() + return local_path + + def get_identification(self): + identification = {} + controller = self.get_controller() + if not controller: + return identification + files = self.get_data_list( + consts.SYSTEM_PERFORMANCE_FILE, consts.FIELDS_NAME, + sleep_time=consts.digital_constant.TWELVE_INT, + mix_time=consts.digital_constant.SIXTY) + for file in files: + sp = file.get('SPName') + file_name = file.get('FileName') + if controller != sp or not file_name: + continue + identification[file_name] = file.get('ObjectName') + return identification + + def get_controller(self): + res = self.ssh_pool.do_exec_shell([consts.VERSION_SHOW], + consts.digital_constant.ONE_INT) + if res: + res_list = res.strip().replace('\r', '').split('\n') + for res in res_list: + if consts.SPECIAL_VERSION in res: + controller = res.replace(' ', '').replace( + consts.SPECIAL_VERSION, '') + return controller + + def get_volume_uuid(self): + uuid_map = {} + pools = self.get_data_list(consts.POOL_LIST, consts.FIELDS_NAME) + for pool in pools: + pool_name = pool.get('Name') + lun_list = self.get_data_list( + consts.LUN_LIST.format(pool_name), consts.FIELDS_NAME) + for lun in lun_list: + lun_name = lun.get('Name') + lun_query = self.get_data_query( + consts.LUN_QUERY.format(lun_name)) + uuid = lun_query.get('LUNUUID') + uuid_map[uuid] = lun_name + return uuid_map + + def analysis_per_file(self, local_path, start_time, end_time, + resource_type, uuid_map=None): + resource_key_data = {} + resource_key = None + if constants.ResourceType.STORAGE == resource_type: + resource_key = resource_type + list_dir = os.listdir(local_path) + data = {} + for dir_name in list_dir: + dir_name = dir_name.replace(' ', '') + if consts.CSV not in dir_name: + continue + resource_key = self.get_resource_key(dir_name, resource_key, + resource_type, uuid_map) + resource_data = resource_key_data.get(resource_key) + if resource_data: + data = resource_data + with codecs.open('{}/{}'.format(local_path, dir_name), + encoding='utf-8-sig') as f: + for row in csv.DictReader( + line.replace('\0', '') for line in f): + time_str = row.get('') + timestamp_s = self.get_timestamp_s(time_str) + timestamp_ms = timestamp_s * units.k + if timestamp_ms < start_time or timestamp_ms >= end_time: + continue + row_data, timestamp = self.get_perf_data(row, timestamp_s) + data[timestamp] = row_data + resource_key_data[resource_key] = data + return resource_key_data + + @staticmethod + def get_resource_key(dir_name, resource_key, resource_type, uuid_map): + if constants.ResourceType.VOLUME == resource_type: + uuid_list = dir_name.replace(consts.PERF_LUN, '').split( + consts.PERF_SP) + uuid = uuid_list[consts.digital_constant.ZERO_INT] + resource_key = uuid_map.get(uuid) + if consts.SAS_PORT == resource_type: + uuid_list = dir_name.replace(consts.PERF_SAS_PORT, '').split( + consts.PERF_SP) + resource_key = uuid_list[consts.digital_constant.ZERO_INT] \ + .replace('_', ':') + if constants.ResourceType.DISK == resource_type or \ + consts.FC_PORT == resource_type: + resource_key = uuid_map.get(dir_name) if \ + uuid_map.get(dir_name) else \ + uuid_map.get(dir_name.replace('.csv', '.tgz')) + return resource_key + + @staticmethod + def get_perf_data(row, timestamp_s): + timestamp = int(timestamp_s / consts.SIXTY) * consts.SIXTY * units.k + throughput = round( + (int(row.get('r&w/throughput(B)')) / units.Mi), 3) + r_throughput = round( + (int(row.get('r/throughput(B)')) / units.Mi), 3) + w_throughput = round( + (int(row.get('w/throughput(B)')) / units.Mi), 3) + response = round( + int(row.get('r&w/avg_rsp_time(us)')) / units.k, 3) + r_response = round( + int(row.get('r/avg_rsp_time(us)')) / units.k, 3) + w_response = round( + int(row.get('w/avg_rsp_time(us)')) / units.k, 3) + cache_hit_ratio = round( + int(row.get('r&w/cacherate(%*100)')), 3) + r_cache_hit_ratio = round( + int(row.get('r/cacherate(%*100)')), 3) + w_cache_hit_ratio = round( + int(row.get('w/cacherate(%*100)')), 3) + row_data = { + constants.StorageMetric.IOPS.name: round( + int(row.get('r&w/iops')), 3), + constants.StorageMetric.READ_IOPS.name: round( + int(row.get('r/iops')), 3), + constants.StorageMetric.WRITE_IOPS.name: round( + int(row.get('w/iops')), 3), + constants.StorageMetric.THROUGHPUT.name: throughput, + constants.StorageMetric.READ_THROUGHPUT.name: r_throughput, + constants.StorageMetric.WRITE_THROUGHPUT.name: w_throughput, + constants.StorageMetric.RESPONSE_TIME.name: response, + constants.StorageMetric.READ_RESPONSE_TIME.name: r_response, + constants.StorageMetric.WRITE_RESPONSE_TIME.name: w_response, + constants.StorageMetric.CACHE_HIT_RATIO.name: cache_hit_ratio, + constants.StorageMetric.READ_CACHE_HIT_RATIO.name: + r_cache_hit_ratio, + constants.StorageMetric.WRITE_CACHE_HIT_RATIO.name: + w_cache_hit_ratio + } + return row_data, timestamp + + @staticmethod + def storage_packaging_data(storage_id, metrics_data, resource_metrics, + resource_id, resource_name): + metrics = [] + for resource_key in resource_metrics.keys(): + labels = { + 'storage_id': storage_id, + 'resource_type': constants.ResourceType.STORAGE, + 'resource_id': resource_id, + 'resource_name': resource_name, + 'type': 'RAW', + 'unit': resource_metrics[resource_key]['unit'] + } + resource_value = {} + time_key_data = metrics_data.get(constants.ResourceType.STORAGE) + for time_key in time_key_data.keys(): + resource_key_data = time_key_data.get(time_key) + resource_data = resource_key_data.get(resource_key) + resource_value[time_key] = resource_data + metrics_res = constants.metric_struct( + name=resource_key, labels=labels, values=resource_value) + metrics.append(metrics_res) + return metrics + + @staticmethod + def packaging_metrics(storage_id, metrics_data, resource_metrics, + resource_type): + metrics = [] + for resource_id in metrics_data.keys(): + for resource_key in resource_metrics.keys(): + labels = { + 'storage_id': storage_id, + 'resource_type': resource_type, + 'resource_id': resource_id, + 'resource_name': resource_id, + 'type': 'RAW', + 'unit': resource_metrics[resource_key]['unit'] + } + resource_value = {} + resource_data = metrics_data.get(resource_id) + for time_key in resource_data.keys(): + resource_value[time_key] = \ + resource_data.get(time_key, {}).get(resource_key) + if resource_value: + metrics_res = constants.metric_struct( + name=resource_key, labels=labels, + values=resource_value) + metrics.append(metrics_res) + return metrics + + @staticmethod + def get_timestamp_s(time_str): + timestamp_s = \ + int(datetime.datetime.strptime( + time_str, consts.MACRO_SAN_TIME_FORMAT).timestamp()) + return timestamp_s + + def get_latest_perf_timestamp(self): + timestamp = None + if not self.down_lock: + return timestamp + res = self.ssh_pool.do_exec_shell([consts.GET_DATE]) + if res: + res_list = res.strip().replace('\r', '').split('\n') + for row in res_list: + if row.isdigit(): + timestamp = int( + int(row) / consts.SIXTY) * consts.SIXTY * units.k + return timestamp diff --git a/delfin/drivers/macro_san/ms/ms_stor.py b/delfin/drivers/macro_san/ms/ms_stor.py new file mode 100644 index 000000000..1fceb7638 --- /dev/null +++ b/delfin/drivers/macro_san/ms/ms_stor.py @@ -0,0 +1,109 @@ +# Copyright 2022 The SODA Authors. +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_log import log +from delfin.common import constants +from delfin.drivers import driver +from delfin.drivers.macro_san.ms import ms_handler, consts +from delfin.drivers.macro_san.ms.ms_handler import MsHandler + +LOG = log.getLogger(__name__) + + +class MacroSanDriver(driver.StorageDriver): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.ms_handler = ms_handler.MsHandler(**kwargs) + self.login = self.ms_handler.login() + + def get_storage(self, context): + return self.ms_handler.get_storage(self.storage_id) + + def list_storage_pools(self, context): + return self.ms_handler.list_storage_pools(self.storage_id) + + def list_volumes(self, context): + return self.ms_handler.list_volumes(self.storage_id) + + def list_controllers(self, context): + return self.ms_handler.list_controllers(self.storage_id) + + def list_disks(self, context): + return self.ms_handler.list_disks(self.storage_id) + + def list_ports(self, context): + return self.ms_handler.list_ports(self.storage_id) + + def list_alerts(self, context, query_para=None): + raise NotImplementedError( + "Macro_SAN Driver SSH list_alerts() is not Implemented") + + @staticmethod + def parse_alert(context, alert): + return MsHandler.parse_alert(alert) + + def clear_alert(self, context, alert): + pass + + def remove_trap_config(self, context, trap_config): + pass + + def add_trap_config(self, context, trap_config): + pass + + def reset_connection(self, context, **kwargs): + pass + + def collect_perf_metrics(self, context, storage_id, + resource_metrics, start_time, end_time): + return self.ms_handler.collect_perf_metrics( + self.storage_id, resource_metrics, start_time, end_time) + + @staticmethod + def get_capabilities(context, filters=None): + return { + 'is_historic': True, + 'resource_metrics': { + constants.ResourceType.STORAGE: consts.STORAGE_CAP, + constants.ResourceType.VOLUME: consts.VOLUME_CAP, + constants.ResourceType.PORT: consts.PORT_CAP, + constants.ResourceType.DISK: consts.DISK_CAP + } + } + + def get_latest_perf_timestamp(self, context): + return self.ms_handler.get_latest_perf_timestamp() + + def list_storage_host_initiators(self, context): + return self.ms_handler.list_storage_host_initiators(self.storage_id) + + def list_storage_hosts(self, context): + host_list = self.ms_handler.list_storage_hosts_new(self.storage_id) + if not host_list: + host_list = self.ms_handler.list_storage_hosts_old(self.storage_id) + return host_list + + def list_storage_host_groups(self, context): + return self.ms_handler.list_storage_host_groups(self.storage_id) + + def list_volume_groups(self, context): + return self.ms_handler.list_volume_groups(self.storage_id) + + def list_masking_views(self, context): + views = self.ms_handler.list_masking_views_new(self.storage_id) + if not views: + views = self.ms_handler.list_masking_views_old(self.storage_id) + return views diff --git a/delfin/drivers/utils/performance_file/macro_san/__init__.py b/delfin/drivers/utils/performance_file/macro_san/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index 4a8e22873..f40f27bed 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -133,7 +133,7 @@ def do_exec(self, command_str): class SSHPool(pools.Pool): - SOCKET_TIMEOUT = 10 + CONN_TIMEOUT = 60 def __init__(self, **kwargs): ssh_access = kwargs.get('ssh') @@ -146,9 +146,8 @@ def __init__(self, **kwargs): self.ssh_pub_key_type = ssh_access.get('pub_key_type') self.ssh_pub_key = ssh_access.get('pub_key') self.ssh_conn_timeout = ssh_access.get('conn_timeout') - self.conn_timeout = self.SOCKET_TIMEOUT if self.ssh_conn_timeout is None: - self.ssh_conn_timeout = SSHPool.SOCKET_TIMEOUT + self.ssh_conn_timeout = SSHPool.CONN_TIMEOUT super(SSHPool, self).__init__(min_size=0, max_size=3) def set_host_key(self, host_key, ssh): @@ -187,9 +186,8 @@ def create(self): username=self.ssh_username, password=cryptor.decode(self.ssh_password), timeout=self.ssh_conn_timeout) - if self.conn_timeout: - transport = ssh.get_transport() - transport.set_keepalive(self.SOCKET_TIMEOUT) + transport = ssh.get_transport() + transport.set_keepalive(self.ssh_conn_timeout) return ssh except Exception as e: err = six.text_type(e) @@ -281,7 +279,7 @@ def do_exec(self, command_str): raise exception.StorageBackendException(result) return result - def do_exec_shell(self, command_list): + def do_exec_shell(self, command_list, sleep_time=0.5): result = '' try: with self.item() as ssh: @@ -290,7 +288,7 @@ def do_exec_shell(self, command_list): for command in command_list: utils.check_ssh_injection(command) channel.send(command + '\n') - time.sleep(0.5) + time.sleep(sleep_time) channel.send("exit" + "\n") channel.close() while True: diff --git a/delfin/tests/unit/drivers/macro_san/__init__.py b/delfin/tests/unit/drivers/macro_san/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/macro_san/ms/__init__.py b/delfin/tests/unit/drivers/macro_san/ms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/macro_san/ms/test_ms_stor.py b/delfin/tests/unit/drivers/macro_san/ms/test_ms_stor.py new file mode 100644 index 000000000..ee169d5be --- /dev/null +++ b/delfin/tests/unit/drivers/macro_san/ms/test_ms_stor.py @@ -0,0 +1,1126 @@ +# Copyright 2022 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from unittest import TestCase, mock + +import paramiko +import six +from paramiko import SSHClient + +sys.modules['delfin.cryptor'] = mock.Mock() +import time +from oslo_utils import units +from delfin.common import constants +from delfin.drivers.macro_san.ms import consts +from delfin.drivers.macro_san.ms.macro_ssh_client import MacroSanSSHPool + +from oslo_log import log + +from delfin import context +from delfin.drivers.macro_san.ms.ms_handler import MsHandler +from delfin.drivers.macro_san.ms.ms_stor import MacroSanDriver + +LOG = log.getLogger(__name__) +ACCESS_INFO = { + "storage_id": "12345", + "vendor": "macro_san", + "model": "macro_san", + "ssh": { + "host": "110.143.133.200", + "port": 22, + "username": "admin", + "password": "admin" + } +} +POOLS_INFO = """Last login: Wed Jul 13 15:05:45 2022 from 192.168.3.235\r +(null)@(null) ODSP CLI> pool mgt getlist\r +Storage Pools Sum: 4\r +\r +Name: SYS-Pool\r +Type: Traditional\r +Is Foreign: No\r +Is Reserved: Yes\r +Cell Size: 1GB\r +All Capacity: 7144GB\r +Used Capacity: 961GB\r +Used Capacity Rate: 13.5%\r +Free Capacity(RAID): 6183GB\r +Free Capacity(HDD RAID): 0GB\r +Free Capacity(SSD RAID): 6183GB\r +\r +Name: pool-1\r +Type: Traditional\r +Is Foreign: No\r +Is Reserved: No\r +Cell Size: 1GB\r +All Capacity: 0GB\r +Used Capacity: 0GB\r +Used Capacity Rate: 0.0%\r +Free Capacity(RAID): 0GB\r +Free Capacity(HDD RAID): 0GB\r +Free Capacity(SSD RAID): 0GB\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +RAID_SYS_POOL = """(null)@(null) ODSP CLI> raid mgt getlist -p SYS-Pool\r +RAIDs Sum: 1\r +\r +Name: SYS-RAID\r +RAID Level: RAID5\r +Health Status: Normal\r +Total Capacity: 7144GB\r +Free Capacity: 6183GB\r +Disk Type: SSD\r +Data Disks Sum: 8\r +Dedicated Spare Disks Sum: 1\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" + +RAID_POOL_1 = """(null)@(null) ODSP CLI> raid mgt getlist -p pool-1\r +RAIDs Sum: 0\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ + +POOLS_DATA = [{'name': 'SYS-Pool', 'storage_id': '12345', + 'native_storage_pool_id': 'SYS-Pool', 'status': 'normal', + 'storage_type': 'block', 'total_capacity': 7670811590656.0, + 'used_capacity': 1031865892864.0, + 'free_capacity': 6638945697792.0}, + {'name': 'pool-1', 'storage_id': '12345', + 'native_storage_pool_id': 'pool-1', 'status': 'unknown', + 'storage_type': 'block', 'total_capacity': 0.0, + 'used_capacity': 0.0, + 'free_capacity': 0.0}] +VOLUME_INFO = """(null)@(null) ODSP CLI> lun mgt getlist -p SYS-Pool\r +SYS-Pool: 18 LUNs (18 Normal 0 Faulty)\r +\r +Name : SYS-LUN-Config\r +LUN id : 0\r +Total Size : 4GB\r +Current Owner(SP) : SP1\r +Health Status : Normal\r +Cache Status : Disable\r +Mapped to Client : No\r +\r +\r +Name : SYS-LUN-Log\r +LUN id : 1\r +Total Size : 4GB\r +Current Owner(SP) : SP1\r +Health Status : Normal\r +Cache Status : Disable\r +Mapped to Client : No\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_QUERY_ONE = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\r +Name : SYS-LUN-Config\r +Device ID: 600B342F1B0F9ABD7BABD272BD0000DA\r +Total Size : 4GB\r +Current Owner(SP) : SP1\r +Owner(Pool) : SYS-Pool\r +Health Status : Normal\r +Is Reserved : Yes\r +Is Foreign : No\r +Created Time: 2021/12/23 11:26:40\r +Cache Set Status: Disable\r +Cache Status: Disable\r +LUN Distr Mode : concatenated\r +Mapped to Client : No\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_QUERY_TWO = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\r +Name : SYS-LUN-Log\r +Device ID: 600B342EF209582D8D07D1EE4D0000DA\r +Total Size : 4GB\r +Current Owner(SP) : SP1\r +Owner(Pool) : SYS-Pool\r +Health Status : Normal\r +Is Reserved : Yes\r +Is Foreign : No\r +Created Time: 2021/12/23 11:26:44\r +Cache Set Status: Disable\r +Cache Status: Disable\r +LUN Distr Mode : concatenated\r +Mapped to Client : No\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +VOLUME_ONE_NEW = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Config\r +Name: SYS-LUN-Config\r +WWN: 600B342F1B0F9ABD7BABD272BD0000DA\r +Type: Standard-LUN\r +Is RDV LUN: No\r +Total Logical Size: 4GB (209715200sector)\r +Total Physical Size: 4GB (209715200sector)\r +Thin-Provisioning: Disable\r +Default Owner(SP): SP1\r +Current Owner(SP): SP1\r +Owner(Group): N/A\r +Owner(Pool): SYS-Pool\r +Health Status: Normal\r +Ua_type: ALUA\r +Is Reserved: No\r +Is Foreign: No\r +Write Zero Status: Disable\r +Created Time: 2020/03/02 17:49:15\r +Read Cache: Enable\r +Read Cache Status: Enable\r +Write Cache: Enable\r +Write Cache Status: Enable\r +Mapped to Client: No\r +LUN UUID: 0x50b34200-154800ee-a8746477-234b74a7\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_TWO_NEW = """(null)@(null) ODSP CLI> lun mgt query -n SYS-LUN-Log\r +Name: SYS-LUN-Log\r +WWN: 600B3423899AC1EDB125DCAE6D4E00D0\r +NGUID: 040F09004EE6CA2500B342B11EAC9938\r +Type: Standard-LUN\r +Is RDV LUN: No\r +Total Logical Size: 1GB (2097152sector)\r +Total Physical Size: 1GB (2097152sector)\r +Thin-Provisioning: Enable\r +Thin-LUN Extent Size: 16KB\r +Thin-LUN Private-area Allocate Mode: SSD RAID First\r +Thin-LUN Data-area Allocate Mode: HDD RAID First\r +Thin-LUN Expand Threshold: 30GB\r +Thin-LUN Expand Step Size: 50GB\r +Thin-LUN Allocated Physical Capacity: 1GB\r +Thin-LUN Allocated Physical Capacity Percentage: 100.0%\r +Thin-LUN Used Capacity: 3956KB\r +Thin-LUN Used Capacity Percentage: 0.0%\r +Thin-LUN Unused Capacity: 1,048,576KB\r +Thin-LUN Unused Capacity Percentage: 100.0%\r +Thin-LUN Distribute Mode: Single\r +Thin-LUN Dedup Switch: Disable\r +Thin-LUN Compress Switch: Disable\r +Default Owner(SP): SP1\r +Current Owner(SP): SP1\r +Owner(Group): N/A\r +Owner(Pool): Pool-1\r +Health Status: Normal\r +Ua_type: ALUA\r +Is Reserved: No\r +Is Foreign: No\r +Created Time: 2022/08/29 17:36:37\r +Read Cache: Enable\r +Read Cache Status: Enable\r +Write Cache: Enable\r +Write Cache Status: Enable\r +Mapped to Client: No\r +LUN UUID: 0x00b34204-0f09004e-e6ca25b1-1eac9938\r +Thin-LUN private UUID: 0x00b34204-0f09006f-6c27276c-a6d3f14b\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_TWO_INFO = """(null)@(null) ODSP CLI> lun mgt getlist -p pool-1\r +pool-1: 0 LUNs (0 Normal 0 Faulty)\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUMES_DATA = [ + {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal', + 'native_volume_id': 'SYS-LUN-Config', + 'native_storage_pool_id': 'SYS-Pool', 'type': 'thick', + 'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0, + 'used_capacity': 4294967296.0, 'free_capacity': 0.0}, + {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal', + 'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'Pool-1', + 'type': 'thin', 'wwn': '600B3423899AC1EDB125DCAE6D4E00D0', + 'total_capacity': 1073741824.0, 'used_capacity': 4050944.0, + 'free_capacity': 1069690880.0}] +THICK_VOLUMES_DATA = [ + {'name': 'SYS-LUN-Config', 'storage_id': '12345', 'status': 'normal', + 'native_volume_id': 'SYS-LUN-Config', + 'native_storage_pool_id': 'SYS-Pool', 'type': 'thick', + 'wwn': '600B342F1B0F9ABD7BABD272BD0000DA', 'total_capacity': 4294967296.0, + 'used_capacity': 4294967296.0, 'free_capacity': 0.0}, + {'name': 'SYS-LUN-Log', 'storage_id': '12345', 'status': 'normal', + 'native_volume_id': 'SYS-LUN-Log', 'native_storage_pool_id': 'SYS-Pool', + 'type': 'thick', 'wwn': '600B342EF209582D8D07D1EE4D0000DA', + 'total_capacity': 4294967296.0, 'used_capacity': 4294967296.0, + 'free_capacity': 0.0}] +VERSION_INFO = """(null)@(null) ODSP CLI> system mgt getversion\r +[SP1 Version]\r +SP1 ODSP_MSC Version: V2.0.14T04\r +SP1 ODSP_Driver Version: V607\r +\r +[SP2 Version]\r +SP2 ODSP_MSC Version: V2.0.14T04\r +SP2 ODSP_Driver Version: V607\r +\r +[SP3 Version]\r +SP3 ODSP_MSC Version: N/A\r +SP3 ODSP_Driver Version: N/A\r +\r +[SP4 Version]\r +SP4 ODSP_MSC Version: N/A\r +SP4 ODSP_Driver Version: N/A\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +CPU_INFO = """(null)@(null) ODSP CLI> system mgt getcpuinfo\r +[SP1 CPU Information]\r +SP1 Processor0 ID: 0\r +SP1 Processor0 Vendor_id: GenuineIntel\r +SP1 Processor0 CPU Frequency: 2200.000 MHz\r +SP1 Processor1 ID: 1\r +SP1 Processor1 Vendor_id: GenuineIntel\r +SP1 Processor1 CPU Frequency: 2200.000 MHz\r +SP1 Processor2 ID: 2\r +SP1 Processor2 Vendor_id: GenuineIntel\r +SP1 Processor2 CPU Frequency: 2200.000 MHz\r +SP1 Processor3 ID: 3\r +SP1 Processor3 Vendor_id: GenuineIntel\r +SP1 Processor3 CPU Frequency: 2200.000 MHz\r +\r +[SP2 CPU Information]\r +SP2 Processor0 ID: 0\r +SP2 Processor0 Vendor_id: GenuineIntel\r +SP2 Processor0 CPU Frequency: 2200.000 MHz\r +SP2 Processor1 ID: 1\r +SP2 Processor1 Vendor_id: GenuineIntel\r +SP2 Processor1 CPU Frequency: 2200.000 MHz\r +SP2 Processor2 ID: 2\r +SP2 Processor2 Vendor_id: GenuineIntel\r +SP2 Processor2 CPU Frequency: 2200.000 MHz\r +SP2 Processor3 ID: 3\r +SP2 Processor3 Vendor_id: GenuineIntel\r +SP2 Processor3 CPU Frequency: 2200.000 MHz\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +HA_STATUS = """(null)@(null) ODSP CLI> ha mgt getstatus\r +SP1 HA Running Status : dual--single\r +SP2 HA Running Status : dual--single\r +SP3 HA Running Status : absent--poweroff\r +SP4 HA Running Status : absent--poweroff\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +HA_STATUS_NEW = """(null)@(null) ODSP CLI> ha mgt getstatus\r +System HA Status : normal\r +SP1 HA Running Status : single\r +SP2 HA Running Status : single\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +CONTROLLERS_DATA = [ + {'name': 'SP1', 'storage_id': '12345', 'native_controller_id': 'SP1', + 'status': 'normal', 'location': 'SP1', 'soft_version': 'V2.0.14T04', + 'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1}, + {'name': 'SP2', 'storage_id': '12345', 'native_controller_id': 'SP2', + 'status': 'normal', 'location': 'SP2', 'soft_version': 'V2.0.14T04', + 'cpu_info': 'GenuineIntel@2200.000MHz', 'cpu_count': 1}, + {'name': 'SP3', 'storage_id': '12345', 'native_controller_id': 'SP3', + 'status': 'offline', 'location': 'SP3', 'soft_version': 'N/A', + 'cpu_info': ''}, + {'name': 'SP4', 'storage_id': '12345', 'native_controller_id': 'SP4', + 'status': 'offline', 'location': 'SP4', 'soft_version': 'N/A', + 'cpu_info': ''}] +DSU_INFO = """(null)@(null) ODSP CLI> dsu mgt getlist\r +DSUs Sum:1\r +\r +Name: DSU-7:1:1\r +Disks: 2\r +DSU EP1 SAS address: 500b342000dd26ff\r +DSU EP2 SAS address: 500b342000dd273f\r +\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +DISKS_INFO = """(null)@(null) ODSP CLI> disk mgt getlist -d 7:1:1\r +Disks Sum: 2\r +\r +Name: Disk-7:1:1:1\r +Type: SSD\r +Capacity: 893GB\r +Vendor: ATA\r +RPMs: 0\r +Health Status: Normal\r +Disk Role: Data disk\r +Owner(Pool): SYS-Pool\r +Owner(RAID): SYS-RAID\r +\r +Name: Disk-7:1:1:2\r +Type: SSD\r +Capacity: 893GB\r +Vendor: ATA\r +RPMs: 0\r +Health Status: Normal\r +Disk Role: Data disk\r +Owner(Pool): SYS-Pool\r +Owner(RAID): SYS-RAID\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +DISK_ONE = """(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:1\r +Name: Disk-7:1:1:1\r +Type: HDD\r +Capacity: 893GB\r +Vendor: ATA\r +Model: Micron_5200_MTFDDAK960TDD\r +FW Version: U004\r +Serial Number: 18311E8D2787\r +Size: 2.5inch\r +RPMs: 0\r +Read Cache Setting: Enable\r +Write Cache Setting: Enable\r +Health Status: Normal\r +Role: Data disk\r +Owner(Pool): SYS-Pool\r +Owner(RAID): SYS-RAID\r +Locating Status: NO\r +SP1 Disk Online Status: Online\r +SP2 Disk Online Status: Online\r +SP3 Disk Online Status: Online\r +SP4 Disk Online Status: Online\r +SSD Estimated Life Remaining: N/A\r +SSD Estimated Time Remaining: N/A\r +SSD Applicable Scene: N/A\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +DISKS_TWO = """(null)@(null) ODSP CLI> disk mgt query -d 7:1:1:2\r +Name: Disk-7:1:1:2\r +Type: SSD\r +Capacity: 893GB\r +Vendor: ATA\r +Model: Micron_5200_MTFDDAK960TDD\r +FW Version: U004\r +Serial Number: 18311E8D2C03\r +Size: 2.5inch\r +RPMs: 0\r +Read Cache Setting: Enable\r +Write Cache Setting: Enable\r +Health Status: Normal\r +Role: Data disk\r +Owner(Pool): SYS-Pool\r +Owner(RAID): SYS-RAID\r +Locating Status: NO\r +SP1 Disk Online Status: Online\r +SP2 Disk Online Status: Online\r +SP3 Disk Online Status: Online\r +SP4 Disk Online Status: Online\r +SSD Estimated Life Remaining: N/A\r +SSD Estimated Time Remaining: N/A\r +SSD Applicable Scene: N/A\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +DISKS_DATA = [{'name': 'Disk-7:1:1:1', 'storage_id': '12345', + 'native_disk_id': 'Disk-7:1:1:1', + 'serial_number': '18311E8D2787', 'manufacturer': 'ATA', + 'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004', + 'location': 'Disk-7:1:1:1', 'speed': 0, + 'capacity': 958851448832.0, 'status': 'normal', + 'physical_type': 'hdd', 'logical_type': 'data'}, + {'name': 'Disk-7:1:1:2', 'storage_id': '12345', + 'native_disk_id': 'Disk-7:1:1:2', + 'serial_number': '18311E8D2C03', 'manufacturer': 'ATA', + 'model': 'Micron_5200_MTFDDAK960TDD', 'firmware': 'U004', + 'location': 'Disk-7:1:1:2', 'speed': 0, + 'capacity': 958851448832.0, 'status': 'normal', + 'physical_type': 'ssd', 'logical_type': 'data'}] +FC_INFO = """(null)@(null) ODSP CLI> client target queryportlist\r +fc port-1:4:1\r +wwn : 50:0b:34:20:02:fe:b5:0d\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-1:4:2\r +wwn : 50:0b:34:20:02:fe:b5:0e\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-1:4:3\r +wwn : 50:0b:34:20:02:fe:b5:0f\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-1:4:4\r +wwn : 50:0b:34:20:02:fe:b5:10\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-2:4:1\r +wwn : 50:0b:34:20:02:fe:b3:0d\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-2:4:2\r +wwn : 50:0b:34:20:02:fe:b3:0e\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-2:4:3\r +wwn : 50:0b:34:20:02:fe:b3:0f\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +fc port-2:4:4\r +wwn : 50:0b:34:20:02:fe:b3:10\r +online state : 2\r +actual speed : 0\r +port topology : 0\r +initiator num : 0\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +SAS_INFO = """(null)@(null) ODSP CLI>system sas getportlist -c 1:1\r +SAS Controller 1:1 Ports Sum:2\r +\r +SAS-1:1:1 Link Status: Full-Linkup\r +SAS-1:1:1 PHY Max Speed: 12Gbps\r +SAS-1:1:1 PHY1 Speed: 12Gbps\r +SAS-1:1:1 PHY2 Speed: 12Gbps\r +SAS-1:1:1 PHY3 Speed: 12Gbps\r +SAS-1:1:1 PHY4 Speed: 12Gbps\r +\r +SAS-1:1:2 Link Status: Full-Linkup\r +SAS-1:1:2 PHY Max Speed: 12Gbps\r +SAS-1:1:2 PHY1 Speed: 6Gbps\r +SAS-1:1:2 PHY2 Speed: 6Gbps\r +SAS-1:1:2 PHY3 Speed: 6Gbps\r +SAS-1:1:2 PHY4 Speed: 6Gbps\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +PORT_DATA = [{'native_port_id': 'FC-1:4:1', 'name': 'FC-1:4:1', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-1:4:1', + 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b5:0d'}, + {'native_port_id': 'FC-1:4:2', 'name': 'FC-1:4:2', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-1:4:2', + 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b5:0e'}, + {'native_port_id': 'FC-1:4:3', 'name': 'FC-1:4:3', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-1:4:3', + 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b5:0f'}, + {'native_port_id': 'FC-1:4:4', 'name': 'FC-1:4:4', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-1:4:4', + 'storage_id': '12345', 'native_parent_id': 'SP1', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b5:10'}, + {'native_port_id': 'FC-2:4:1', 'name': 'FC-2:4:1', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-2:4:1', + 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b3:0d'}, + {'native_port_id': 'FC-2:4:2', 'name': 'FC-2:4:2', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-2:4:2', + 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b3:0e'}, + {'native_port_id': 'FC-2:4:3', 'name': 'FC-2:4:3', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-2:4:3', + 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b3:0f'}, + {'native_port_id': 'FC-2:4:4', 'name': 'FC-2:4:4', 'type': 'fc', + 'logical_type': 'physical', 'connection_status': 'disconnected', + 'health_status': 'unknown', 'location': 'FC-2:4:4', + 'storage_id': '12345', 'native_parent_id': 'SP2', 'speed': 0.0, + 'wwn': '50:0b:34:20:02:fe:b3:10'}, + {'native_port_id': 'SAS-1:1:1', 'name': 'SAS-1:1:1', + 'type': 'sas', 'logical_type': 'physical', + 'connection_status': 'connected', 'health_status': 'unknown', + 'location': 'SAS-1:1:1', 'storage_id': '12345', + 'native_parent_id': 'SP1', 'max_speed': 12000000000, + 'speed': 12000000000}, + {'native_port_id': 'SAS-1:1:2', 'name': 'SAS-1:1:2', + 'type': 'sas', 'logical_type': 'physical', + 'connection_status': 'connected', 'health_status': 'unknown', + 'location': 'SAS-1:1:2', 'storage_id': '12345', + 'native_parent_id': 'SP1', 'max_speed': 12000000000, + 'speed': 6000000000}] +PARSE_ALERT_INFO = { + '1.3.6.1.2.1.1.3.0': '2995472', + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.35904.1.3.3', + '1.3.6.1.2.1.25.1.2': '2022-07-12 17:43:40', + '1.3.6.1.4.1.35904.1.2.1.1': 'Storage-1', + '1.3.6.1.4.1.35904.1.2.1.4.1': 'Battery_expired', + '1.3.6.1.4.1.35904.1.2.1.4.2': 'SP1', + '1.3.6.1.4.1.35904.1.2.1.4.3': "SSU-7:1:1's battery '2' becomes expired," + " please prepare a new module and replace" + " it as soon as possible.", + '1.3.6.1.4.1.35904.1.2.1.4.4': '2', + 'transport_address': '192.168.3.235', + 'storage_id': '05e007e4-62ef-4e24-a14e-57a8ee8e5bf3'} +PARSE_ALERT_DATA = { + 'alert_id': '2995472', 'severity': 'Major', + 'category': 'Fault', 'occur_time': 1657619020000, + 'description': "SSU-7:1:1's battery '2' becomes expired, please prepare" + " a new module and replace it as soon as possible.", + 'location': 'Storage-1:SP1', 'type': 'EquipmentAlarm', + 'resource_type': 'Storage', + 'alert_name': '电池模块超期', + 'match_key': 'ec62c3cdd862da9b0f8da6d03d97d76e'} +INITIATOR_INFO = """(null)@(null) ODSP CLI> client initiator getlist -t all\r +Initiators Sum: 3\r + +Initiator Alias: VMWare\r +Initiator WWN: 20:18:f8:2e:3f:f9:85:54\r +Type: FC\r +OS: AIX\r +IP Address Used in Last iSCSI Login Session: N/A\r +Mapped Client: Client-1\r +Mapped Targets Sum: 2\r +Mapped LUNs Sum: 6\r +\r +Initiator Alias: ds\r +Initiator WWN: 20:ab:30:48:56:01:fc:31\r +Type: FC\r +OS: Other\r +IP Address Used in Last iSCSI Login Session: N/A\r +Mapped Client: Client-2\r +Mapped Targets Sum: 1\r +Mapped LUNs Sum: 1\r +\r +Initiator Alias: dc\r +Initiator WWN: 42:25:dc:35:ab:69:12:cb\r +Type: FC\r +OS: HP_UNIX\r +IP Address Used in Last iSCSI Login Session: N/A\r +Mapped Client: Client-2\r +Mapped Targets Sum: 1\r +Mapped LUNs Sum: 2\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +INITIATOR_DATA = [ + {'native_storage_host_initiator_id': '20:18:f8:2e:3f:f9:85:54', + 'native_storage_host_id': 'Client-1', 'name': '20:18:f8:2e:3f:f9:85:54', + 'alias': 'VMWare', 'type': 'fc', 'status': 'unknown', + 'wwn': '20:18:f8:2e:3f:f9:85:54', 'storage_id': '12345'}, + {'native_storage_host_initiator_id': '20:ab:30:48:56:01:fc:31', + 'native_storage_host_id': 'Client-2', 'name': '20:ab:30:48:56:01:fc:31', + 'alias': 'ds', 'type': 'fc', 'status': 'unknown', + 'wwn': '20:ab:30:48:56:01:fc:31', 'storage_id': '12345'}, + {'native_storage_host_initiator_id': '42:25:dc:35:ab:69:12:cb', + 'native_storage_host_id': 'Client-2', 'name': '42:25:dc:35:ab:69:12:cb', + 'alias': 'dc', 'type': 'fc', 'status': 'unknown', + 'wwn': '42:25:dc:35:ab:69:12:cb', 'storage_id': '12345'}] +UNKNOWN_COMMAND = """(null)@(null) ODSP CLI> client host gethostlist +% Unknown command. +(null)@(null) ODSP CLI> """ +HOSTS_INFO = """(null)@(null) ODSP CLI> client mgt getclientlist\r +Clients Sum: 7\r +\r +Name: Client-1\r +Description: ds mss\r +Mapped Initiators Num: 1\r +\r +Name: Client-2\r +Description: \r +Mapped Initiators Num: 2\r +\r +Name: Client-3\r +Description: sss\r +Mapped Initiators Num: 0\r +\r +Name: Client-4\r +Description: dsd\r +Mapped Initiators Num: 0\r +\r +Name: Client-5\r +Description: ds\r +Mapped Initiators Num: 0\r +\r +Name: Client-6\r +Description: \r +Mapped Initiators Num: 0\r +\r +Name: 5\r +Description: \r +Mapped Initiators Num: 0\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +HOST_INFO_NEW = """(null)@(null) ODSP CLI> client host gethostlist\r +Host Sum: 1\r +\r +Host Name: Host-1\r +OS: Windows2008\r +IP Address: 192.168.1.20\r +Description: Server 1\r +Location: Room-201\r +Initiators Sum: 4\r +iSCSI Initiators Sum: 2\r +FC Initiators Sum: 2\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +HOST_DATA = [{'name': 'Client-1', 'storage_id': '12345', + 'native_storage_host_id': 'Client-1', 'os_type': 'AIX', + 'status': 'normal', 'description': 'ds mss'}, + {'name': 'Client-2', 'storage_id': '12345', + 'native_storage_host_id': 'Client-2', 'os_type': 'HP-UX', + 'status': 'normal', 'description': ''}, + {'name': 'Client-3', 'storage_id': '12345', + 'native_storage_host_id': 'Client-3', 'os_type': 'Unknown', + 'status': 'normal', 'description': 'sss'}, + {'name': 'Client-4', 'storage_id': '12345', + 'native_storage_host_id': 'Client-4', 'os_type': 'Unknown', + 'status': 'normal', 'description': 'dsd'}, + {'name': 'Client-5', 'storage_id': '12345', + 'native_storage_host_id': 'Client-5', 'os_type': 'Unknown', + 'status': 'normal', 'description': 'ds'}, + {'name': 'Client-6', 'storage_id': '12345', + 'native_storage_host_id': 'Client-6', 'os_type': 'Unknown', + 'status': 'normal', 'description': ''}, + {'name': '5', 'storage_id': '12345', + 'native_storage_host_id': '5', 'os_type': 'Unknown', + 'status': 'normal', 'description': ''}] +HOST_DATA_NEW = [{'name': 'Host-1', 'storage_id': '12345', + 'native_storage_host_id': 'Host-1', 'os_type': 'Windows', + 'status': 'normal', 'description': 'Server 1', + 'ip_address': '192.168.1.20'}] +HOST_GROUPS_INFO = """(null)@(null) ODSP CLI> client hostgroup gethglist\r +Host Groups Sum: 1\r +\r +Host Group Name: Host-Group-1\r +Description: Host Group\r +Hosts Sum: 1\r +Initiators Sum: 4\r +iSCSI Initiators Sum: 2\r +FC Initiators Sum: 2\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +HOST_GROUPS_H_INFO = """(null)@(null) ODSP CLI> client hostgroup gethostlist\ + -n Host-Group-1\r +Hosts Sum: 1\r +\r +HostName: Host-1\r +OS: Windows2008\r +IP Address: 192.168.1.20\r +Description: Server1\r +Location: Room-201\r +Initiators Sum: 4\r +iSCSI Initiators Sum: 2\r +FC Initiators Sum: 2\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +HOST_GROUPS_DATA = { + 'storage_host_groups': [ + {'name': 'Host-Group-1', 'storage_id': '12345', + 'native_storage_host_group_id': 'Host-Group-1', + 'description': 'Host Group'} + ], + 'storage_host_grp_host_rels': [ + {'storage_id': '12345', 'native_storage_host_group_id': 'Host-Group-1', + 'native_storage_host_id': 'Host-1'} + ] +} +VOLUME_GROUPS_INFO = """(null)@(null) ODSP CLI> client lungroup getlglist\r +LUN Group Sum: 1\r +\r +LUN Group Name: LUN-Group-1\r +Description: LUN Group description\r +LUNs Sum: 4\r +Local LUNs Sum: 4\r +Remote LUNs Sum: 0\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_GROUPS_N_INFO = """(null)@(null) ODSP CLI> client lungroup getlunlist\ + -n LUN-Group-1\r +LUNs Sum: 1\r +\r +LUN Name: LUN-0001/N/A\r +Location: Local/Remote\r +LUN Capacity: 10GB (20971520sector)/N/A\r +LUN WWN: 600B34249837CEBDC611DCB12DD500D6/N/A\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VOLUME_GROUP_DATA = {'volume_groups': [ + {'name': 'LUN-Group-1', 'storage_id': '12345', + 'native_volume_group_id': 'LUN-Group-1', + 'description': 'LUN Group description'}], 'vol_grp_vol_rels': [ + {'storage_id': '12345', 'native_volume_group_id': 'LUN-Group-1', + 'native_volume_id': 'LUN-0001/N/A'}]} +VIEWS_ONE = """(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-1\r +LUNs Sum: 6\r +\r +LUN Name: Test_Lun-1\r +LUN Capacity: 10GB\r +LUN WWN: 600B3427C77BBDFD2FF0DBA82D0000DB\r +LUN ID: 0\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +LUN Name: Test_Lun-2\r +LUN Capacity: 10GB\r +LUN WWN: 600B342A316B328D7035DD724D0000DB\r +LUN ID: 1\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +LUN Name: Test_Lun-3\r +LUN Capacity: 10GB\r +LUN WWN: 600B342AB2FE2ACDBC63D8B0DD0000DB\r +LUN ID: 2\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +LUN Name: Test_Lun-4\r +LUN Capacity: 10GB\r +LUN WWN: 600B342B328A722D55F7DEF5DD0000DB\r +LUN ID: 3\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +LUN Name: Test_Lun-5\r +LUN Capacity: 10GB\r +LUN WWN: 600B34221067D72D65DFD18C8D0000DB\r +LUN ID: 4\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +LUN Name: LUN-1\r +LUN Capacity: 2GB\r +LUN WWN: 600B342A816A4F2D9098DB015D0000DB\r +LUN ID: 5\r +Access Mode: Read-Write\r +Thin-Provisioning: Disable\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VIEW_TWO = """(null)@(null) ODSP CLI> client mgt getsharelunlist -n Client-2\r +LUNs Sum: 0\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VIEWS_DATA = [{'native_masking_view_id': 'Client-10', 'name': 'Client-10', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '0', + 'storage_id': '12345'}, + {'native_masking_view_id': 'Client-11', 'name': 'Client-11', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '1', + 'storage_id': '12345'}, + {'native_masking_view_id': 'Client-12', 'name': 'Client-12', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '2', + 'storage_id': '12345'}, + {'native_masking_view_id': 'Client-13', 'name': 'Client-13', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '3', + 'storage_id': '12345'}, + {'native_masking_view_id': 'Client-14', 'name': 'Client-14', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '4', + 'storage_id': '12345'}, + {'native_masking_view_id': 'Client-15', 'name': 'Client-15', + 'native_storage_host_id': 'Client-1', 'native_volume_id': '5', + 'storage_id': '12345'}] +VIEW_NEW_INFO = """client mapview getlist\r +Mapviews Sum: 1\r +\r +Mapview Name: Mapview-1\r +Description: Map view\r +Host Group Name: Host-Group-1\r +Target Group Name: Target-Group-1\r +LUN Group Name: LUN-Group-1\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +VIEWS_NEW_DATA = [{'native_masking_view_id': 'Mapview-1', 'name': 'Mapview-1', + 'native_storage_host_group_id': 'Host-Group-1', + 'native_volume_group_id': 'LUN-Group-1', + 'description': 'Map view', 'storage_id': '12345'}] +SYSTEM_QUERY = """(null)@(null) ODSP CLI> system mgt query\r +system mgt query\r +Device UUID:0x00b34202-fea90000-fa41e0d6-ded905a8\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +SYSTEM_QUERY_TWO = """(null)@(null) ODSP CLI> system mgt query\r +Device UUID:0x50b34200-0b750056-42ab74ff-6265d80e\r +Device Name:Storage-1\r +Command completed successfully.\r +(null)@(null) ODSP CLI> """ +STORAGE_DATA = { + 'name': '0x00b34202-fea90000-fa41e0d6-ded905a8', + 'vendor': 'MacroSAN', 'status': 'normal', + 'serial_number': '110.143.133.200/0x00b34202-fea90000-fa41e0d6-ded905a8', + 'firmware_version': 'V2.0.14T04', + 'raw_capacity': 1917702897664.0, + 'total_capacity': 7670811590656.0, + 'used_capacity': 1031865892864.0, + 'free_capacity': 6638945697792.0, + 'model': '' +} +STORAGE_TWO_DATA = { + 'name': 'Storage-1', 'vendor': 'MacroSAN', + 'status': 'normal', + 'serial_number': '110.143.133.200/0x50b34200-0b750056-42ab74ff-6265d80e', + 'firmware_version': 'V2.0.14T04', + 'raw_capacity': 1917702897664.0, + 'total_capacity': 7670811590656.0, + 'used_capacity': 1031865892864.0, + 'free_capacity': 6638945697792.0, + 'model': '' +} +TIMESTAMP = """[root@00-b3-42-04-0f-09 ~]# date +%s\r +1662345266\r +[root@00-b3-42-04-0f-09 ~]#""" +VERSION_SHOW = """[root@00-b3-42-04-0f-09 ~]# versionshow\r +\r +SP2 Version:\r + ODSP_MSC: V1.5.12T03\r + ODSP_DRIVER: V230T03\r + BIOS : V166\r + BMC : V272P001\r + MCPLD : V104\r + MPCB : VER.B\r + BCB1 : V214\r + BCB2 : V214\r + BAT1HW : BAT1111A\r + BAT2HW : FAN2021A\r + IOC1PCB :\r + IOC2PCB :\r +DSU : 1:1:1\r + ODSP_JMC : V221\r + ODSP_JMCB: N/A\r + EPCB : N/A\r + ECPLD : V101\r + BAT0_BCB : N/A\r + BAT1_BCB : N/A\r +\r +[root@00-b3-42-04-0f-09 ~]#""" +GET_FILE_LIST = """(null)@(null) ODSP CLI> system performance getfilelist\r +Performance Statistics Files Sum:2\r + +SP Name: SP2\r +Object Type: DEVICE\r +Object Name: Device\r +Object Identification: N/A\r +File Name: perf_device_SP2_20220920181959.csv\r +File Create Time: 2022-09-20 18:19:59\r +File Size: 58 KB\r +\r +SP Name: SP2\r +Object Type: SAS PORT\r +Object Name: SAS-2:1:1\r +Object Identification: N/A\r +File Name: perf_sasport_SAS-2_1_1_SP2_20220920181959.csv\r +File Create Time: 2022-09-20 18:19:59\r +File Size: 56 KB\r +\r +Command completed successfully.\r +(null)@(null) ODSP CLI>""" +resource_metrics = { + constants.ResourceType.STORAGE: consts.STORAGE_CAP, + constants.ResourceType.VOLUME: consts.VOLUME_CAP, + constants.ResourceType.PORT: consts.PORT_CAP +} + + +def create_driver(): + MsHandler.login = mock.Mock( + return_value={None}) + return MacroSanDriver(**ACCESS_INFO) + + +class test_macro_san_driver(TestCase): + driver = create_driver() + + def test_init(self): + MsHandler.login = mock.Mock( + return_value={""}) + MacroSanDriver(**ACCESS_INFO) + + def test_get_storage(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[SYSTEM_QUERY, VERSION_INFO, + POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, + DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO, + HA_STATUS, VERSION_INFO, CPU_INFO, HA_STATUS, + VERSION_SHOW]) + MacroSanSSHPool.create = mock.Mock(__class__) + SSHClient.open_sftp = mock.Mock(__class__) + storage_object = self.driver.get_storage(context) + self.assertDictEqual(storage_object, STORAGE_DATA) + + def test_get_storage_new(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[SYSTEM_QUERY_TWO, VERSION_INFO, + POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, + DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO, + HA_STATUS_NEW, VERSION_INFO, CPU_INFO, HA_STATUS_NEW, + VERSION_SHOW]) + MacroSanSSHPool.create = mock.Mock(__class__) + SSHClient.open_sftp = mock.Mock(__class__) + storage_object = self.driver.get_storage(context) + self.assertDictEqual(storage_object, STORAGE_TWO_DATA) + + def test_list_storage_pools(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1]) + pools = self.driver.list_storage_pools(context) + self.assertListEqual(pools, POOLS_DATA) + + def test_list_volumes(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, + VOLUME_INFO, VOLUME_QUERY_ONE, VOLUME_QUERY_TWO, + VOLUME_TWO_INFO]) + volumes = self.driver.list_volumes(context) + self.assertListEqual(volumes, THICK_VOLUMES_DATA) + + def test_list_volumes_new(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[POOLS_INFO, RAID_SYS_POOL, RAID_POOL_1, + VOLUME_INFO, VOLUME_ONE_NEW, VOLUME_TWO_NEW, + VOLUME_TWO_INFO]) + volumes = self.driver.list_volumes(context) + self.assertListEqual(volumes, VOLUMES_DATA) + + def test_list_controllers(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[VERSION_INFO, CPU_INFO, HA_STATUS]) + controllers = self.driver.list_controllers(context) + self.assertListEqual(controllers, CONTROLLERS_DATA) + + def test_list_disks(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[DSU_INFO, DISKS_INFO, DISK_ONE, DISKS_TWO]) + disks = self.driver.list_disks(context) + self.assertListEqual(disks, DISKS_DATA) + + def test_list_ports(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[FC_INFO, HA_STATUS, DSU_INFO, SAS_INFO, None, None, + None]) + ports = self.driver.list_ports(context) + self.assertListEqual(ports, PORT_DATA) + + def test_parse_alert(self): + parse_alert = self.driver.parse_alert(context, PARSE_ALERT_INFO) + PARSE_ALERT_DATA['occur_time'] = parse_alert.get('occur_time') + self.assertDictEqual(parse_alert, PARSE_ALERT_DATA) + + def test_list_storage_host_initiators(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[INITIATOR_INFO]) + initiators = self.driver.list_storage_host_initiators(context) + self.assertListEqual(initiators, INITIATOR_DATA) + + def test_list_storage_hosts_old(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[consts.UNKNOWN_COMMAND_TAG, + INITIATOR_INFO, HOSTS_INFO]) + hosts = self.driver.list_storage_hosts(context) + self.assertListEqual(hosts, HOST_DATA) + + def test_list_storage_hosts_new(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[HOST_INFO_NEW]) + hosts = self.driver.list_storage_hosts(context) + self.assertListEqual(hosts, HOST_DATA_NEW) + + def test_list_storage_hosts_group(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[HOST_GROUPS_INFO, HOST_GROUPS_H_INFO]) + host_groups = self.driver.list_storage_host_groups(context) + self.assertDictEqual(host_groups, HOST_GROUPS_DATA) + + def test_list_volume_groups(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[VOLUME_GROUPS_INFO, VOLUME_GROUPS_N_INFO]) + volume_groups = self.driver.list_volume_groups(context) + self.assertDictEqual(volume_groups, VOLUME_GROUP_DATA) + + def test_list_masking_views_old(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[consts.UNKNOWN_COMMAND_TAG, + HOSTS_INFO, VIEWS_ONE, VIEW_TWO, VIEW_TWO, VIEW_TWO, + VIEW_TWO, VIEW_TWO, VIEW_TWO]) + views = self.driver.list_masking_views(context) + self.assertListEqual(views, VIEWS_DATA) + + def test_list_masking_views_new(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[VIEW_NEW_INFO]) + views = self.driver.list_masking_views(context) + self.assertListEqual(views, VIEWS_NEW_DATA) + + def test_list_alert(self): + block = False + try: + self.driver.list_alerts(context) + except Exception as e: + LOG.error(six.text_type(e)) + block = True + self.assertEqual(block, True) + + def test_get_latest_perf_timestamp(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[TIMESTAMP]) + timestamp = self.driver.get_latest_perf_timestamp(context) + times = 1662345240000 + self.assertEqual(timestamp, times) + + def test_get_capabilities(self): + capabilities = self.driver.get_capabilities(context) + metrics = { + 'is_historic': True, + 'resource_metrics': { + constants.ResourceType.STORAGE: consts.STORAGE_CAP, + constants.ResourceType.VOLUME: consts.VOLUME_CAP, + constants.ResourceType.PORT: consts.PORT_CAP, + constants.ResourceType.DISK: consts.DISK_CAP, + } + } + self.assertDictEqual(capabilities, metrics) + + def test_collect_perf_metrics(self): + MacroSanSSHPool.get = mock.Mock(return_value={paramiko.SSHClient()}) + MacroSanSSHPool.do_exec_shell = mock.Mock( + side_effect=[VERSION_SHOW, GET_FILE_LIST]) + MsHandler.down_perf_file = mock.Mock(return_value='') + localtime = time.mktime(time.localtime()) * units.k + storage_id = 12345 + start_time = localtime - 1000 * 60 * 5 + end_time = localtime + metrics = self.driver.collect_perf_metrics( + context, storage_id, resource_metrics, start_time, end_time) + self.assertListEqual(metrics, []) diff --git a/setup.py b/setup.py index 067b0902e..73a340275 100644 --- a/setup.py +++ b/setup.py @@ -56,6 +56,7 @@ 'hitachi hnas = delfin.drivers.hitachi.hnas.hds_nas:HitachiHNasDriver', 'pure flasharray = delfin.drivers.pure.flasharray.pure_flasharray:PureFlashArrayDriver', 'h3c unistor_cf = delfin.drivers.h3c.unistor_cf.unistor_cf:H3cUniStorCfDriver', + 'macrosan macrosan = delfin.drivers.macro_san.ms.ms_stor:MacroSanDriver', # AS5500/AS5300/AS2600/AS2200 use the same driver 'inspur as5500 = delfin.drivers.inspur.as5500.as5500:As5500Driver' ]