diff --git a/delfin/alert_manager/snmp_validator.py b/delfin/alert_manager/snmp_validator.py index b3b565e96..1c51f419e 100644 --- a/delfin/alert_manager/snmp_validator.py +++ b/delfin/alert_manager/snmp_validator.py @@ -37,7 +37,6 @@ def __init__(self): self.snmp_error_flag = {} def validate(self, ctxt, alert_source): - alert_source = dict(alert_source) engine_id = alert_source.get('engine_id') try: alert_source = self.validate_connectivity(alert_source) diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index b67153f93..563e48897 100644 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -217,4 +217,5 @@ def _set_synced_if_ok(context, storage_id, resource_count): storage['sync_status'] > 0: raise exception.StorageIsSyncing(storage['id']) storage['sync_status'] = resource_count * constants.ResourceSync.START + storage['updated_at'] = current_time db.storage_update(context, storage['id'], storage) diff --git a/delfin/drivers/api.py b/delfin/drivers/api.py index 5d80b642c..b5073449b 100644 --- a/delfin/drivers/api.py +++ b/delfin/drivers/api.py @@ -45,7 +45,6 @@ def discover_storage(self, context, access_info): access_info = db.access_info_create(context, access_info) storage['id'] = access_info['storage_id'] storage = db.storage_create(context, storage) - self.driver_manager.update_driver(storage['id'], driver) LOG.info("Storage found successfully.") return storage @@ -63,7 +62,6 @@ def update_access_info(self, context, access_info): helper.check_storage_consistency(context, storage_id, storage_new) access_info = db.access_info_update(context, storage_id, access_info) db.storage_update(context, storage_id, storage_new) - self.driver_manager.update_driver(storage_id, driver) LOG.info("Access information updated successfully.") return access_info @@ -114,7 +112,10 @@ def remove_trap_config(self, context, storage_id, trap_config): def parse_alert(self, context, storage_id, alert): """Parse alert data got from snmp trap server.""" - driver = self.driver_manager.get_driver(context, storage_id=storage_id) + access_info = db.access_info_get(context, storage_id) + driver = self.driver_manager.get_driver(context, + invoke_on_load=False, + **access_info) return driver.parse_alert(context, alert) def clear_alert(self, context, storage_id, sequence_number): diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py b/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py index 55a94627b..9db7e33f9 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/oid_mapper.py @@ -32,14 +32,15 @@ class OidMapper(object): def __init__(self): pass - def map_oids(self, alert): + @staticmethod + def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] - key = self.OID_MAP.get(oid_str, None) + key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py b/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py index d344b2f6b..00a36b0e6 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/snmp_alerts.py @@ -27,9 +27,6 @@ class AlertHandler(object): """Alert handling functions for vmax snmp traps""" - def __init__(self): - self.oid_mapper = oid_mapper.OidMapper() - # Translation of trap severity to alert model severity # Values are: # unknown 1, emergency 2, alert 3, critical 4, error 5, @@ -53,12 +50,13 @@ def __init__(self): 'emcAsyncEventComponentName', 'emcAsyncEventSource') - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" - alert = self.oid_mapper.map_oids(alert) + alert = oid_mapper.OidMapper.map_oids(alert) # Check for mandatory alert attributes - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -71,7 +69,7 @@ def parse_alert(self, context, alert): alert_model['alert_name'] = alert_mapper.alarm_id_name_mapping.get( alert_model['alert_id'], alert_model['alert_id']) - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['connUnitEventSeverity'], constants.Severity.INFORMATIONAL) diff --git a/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py b/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py index b3379710d..c1beaf532 100644 --- a/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py +++ b/delfin/drivers/dell_emc/vmax/alert_handler/unisphere_alerts.py @@ -51,7 +51,7 @@ def parse_queried_alerts(self, alert_list): alert['severity'], constants.Severity.NOT_SPECIFIED) # category and type are not part of queried alerts - alert_model['category'] = constants.Category.EVENT + alert_model['category'] = constants.Category.FAULT alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert['alertId'] diff --git a/delfin/drivers/dell_emc/vmax/client.py b/delfin/drivers/dell_emc/vmax/client.py index 3feecf297..62974b70f 100644 --- a/delfin/drivers/dell_emc/vmax/client.py +++ b/delfin/drivers/dell_emc/vmax/client.py @@ -52,8 +52,7 @@ def init_connection(self, access_info): LOG.error(msg) raise e except (exception.SSLCertificateFailed, - exception.WrongTlsVersion, - exception.CipherNotMatch) as e: + exception.SSLHandshakeFailed) as e: msg = ("Failed to connect to VMAX: {}".format(e)) LOG.error(msg) raise diff --git a/delfin/drivers/dell_emc/vmax/rest.py b/delfin/drivers/dell_emc/vmax/rest.py index eac938961..99caa7f30 100644 --- a/delfin/drivers/dell_emc/vmax/rest.py +++ b/delfin/drivers/dell_emc/vmax/rest.py @@ -15,24 +15,21 @@ # under the License. import json -import ssl import sys -from oslo_log import log as logging import requests import requests.auth import requests.exceptions as r_exc import six import urllib3 -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.poolmanager import PoolManager +from oslo_log import log as logging from delfin import cryptor from delfin import exception +from delfin import ssl_utils from delfin.common import alert_util from delfin.drivers.dell_emc.vmax import perf_utils, constants from delfin.i18n import _ -from delfin import ssl_utils LOG = logging.getLogger(__name__) SLOPROVISIONING = 'sloprovisioning' @@ -53,24 +50,6 @@ VERSION_GET_TIME_OUT = 10 -class HostNameIgnoringAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.assert_hostname = False - return super(HostNameIgnoringAdapter, self).cert_verify( - conn, url, verify, cert) - - def init_poolmanager(self, connections, maxsize, block=False, - **pool_kwargs): - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, - ssl_version=ssl.PROTOCOL_TLSv1, - **pool_kwargs) - - class VMaxRest(object): """Rest class based on Unisphere for VMax Rest API.""" @@ -115,7 +94,7 @@ def establish_rest_session(self): LOG.debug("Enable certificate verification, ca_path: {0}".format( self.verify)) session.verify = self.verify - session.mount("https://", ssl_utils.HostNameIgnoreAdapter()) + session.mount("https://", ssl_utils.get_host_name_ignore_adapter()) self.session = session return session @@ -176,15 +155,10 @@ def request(self, target_uri, method, params=None, request_object=None, "message: %(e)s") % {'base_uri': self.base_uri, 'e': e} LOG.error(msg) err_str = six.text_type(e) - if 'wrong ssl version' in err_str or \ - 'sslv3 alert handshake failure' in err_str: - raise exception.WrongTlsVersion() - elif 'no cipher match' in err_str: - raise exception.CipherNotMatch() - elif 'certificate verify failed' in err_str: + if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: - raise e + raise exception.SSLHandshakeFailed() except (r_exc.Timeout, r_exc.ConnectionError, r_exc.HTTPError) as e: diff --git a/delfin/drivers/dell_emc/vmax/vmax.py b/delfin/drivers/dell_emc/vmax/vmax.py index d9eb0db1a..cd3ac4018 100644 --- a/delfin/drivers/dell_emc/vmax/vmax.py +++ b/delfin/drivers/dell_emc/vmax/vmax.py @@ -93,7 +93,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): return snmp_alerts.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): diff --git a/delfin/drivers/driver.py b/delfin/drivers/driver.py index ca280ddc7..cf148c41c 100644 --- a/delfin/drivers/driver.py +++ b/delfin/drivers/driver.py @@ -73,8 +73,8 @@ def remove_trap_config(self, context, trap_config): """Remove trap receiver configuration from storage system.""" pass - @abc.abstractmethod - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from snmp trap server.""" """ diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index 5f8d5f9b4..409387d2b 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -273,7 +273,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): pass def clear_alert(self, context, alert): diff --git a/delfin/drivers/hitachi/__init__.py b/delfin/drivers/hitachi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/hitachi/vsp/__init__.py b/delfin/drivers/hitachi/vsp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/hitachi/vsp/consts.py b/delfin/drivers/hitachi/vsp/consts.py new file mode 100644 index 000000000..9bc3c3d35 --- /dev/null +++ b/delfin/drivers/hitachi/vsp/consts.py @@ -0,0 +1,20 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +SOCKET_TIMEOUT = 30 +ERROR_SESSION_INVALID_CODE = 403 +ERROR_SESSION_IS_BEING_USED_CODE = 409 +BLOCK_SIZE = 512 +MAX_LDEV_NUMBER_OF_RESTAPI = 16383 +SUPPORTED_VSP_SERIES = ('VSP G350', 'VSP G370', 'VSP G700', 'VSP G900', + 'VSP F350', 'VSP F370', 'VSP F700', 'VSP F900') diff --git a/delfin/drivers/hitachi/vsp/rest_handler.py b/delfin/drivers/hitachi/vsp/rest_handler.py new file mode 100644 index 000000000..9bc068de0 --- /dev/null +++ b/delfin/drivers/hitachi/vsp/rest_handler.py @@ -0,0 +1,202 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading + +import requests +import six +from oslo_log import log as logging + +from delfin import cryptor +from delfin import exception +from delfin.drivers.hitachi.vsp import consts +from delfin.drivers.utils.rest_client import RestClient + +LOG = logging.getLogger(__name__) + + +class RestHandler(RestClient): + COMM_URL = '/ConfigurationManager/v1/objects/storages' + LOGOUT_URL = '/ConfigurationManager/v1/objects/sessions/' + + AUTH_KEY = 'Authorization' + + def __init__(self, **kwargs): + super(RestHandler, self).__init__(**kwargs) + self.session_lock = threading.Lock() + self.session_id = None + self.storage_device_id = None + self.device_model = None + self.serial_number = None + + def call(self, url, data=None, method=None, + calltimeout=consts.SOCKET_TIMEOUT): + try: + res = self.do_call(url, data, method, calltimeout) + if (res.status_code == consts.ERROR_SESSION_INVALID_CODE + or res.status_code == + consts.ERROR_SESSION_IS_BEING_USED_CODE): + LOG.error("Failed to get token=={0}=={1},get token again" + .format(res.status_code, res.text)) + # if method is logout,return immediately + if method == 'DELETE' and RestHandler. \ + LOGOUT_URL in url: + return res + self.rest_auth_token = None + access_session = self.login() + if access_session is not None: + res = self. \ + do_call(url, data, method, calltimeout) + else: + LOG.error('Login error,get access_session failed') + elif res.status_code == 503: + raise exception.InvalidResults(res.text) + + return res + + except Exception as e: + err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + + def get_rest_info(self, url, timeout=consts.SOCKET_TIMEOUT, data=None): + result_json = None + res = self.call(url, data, 'GET', timeout) + if res.status_code == 200: + result_json = res.json() + return result_json + + def login(self): + try: + self.get_device_id() + access_session = self.rest_auth_token + if self.san_address: + url = '%s/%s/sessions' % \ + (RestHandler.COMM_URL, + self.storage_device_id) + data = {} + + with self.session_lock: + if self.session is None: + self.init_http_head() + self.session.auth = \ + requests.auth.HTTPBasicAuth( + self.rest_username, + cryptor.decode(self.rest_password)) + res = self. \ + do_call(url, data, 'POST', 10) + if res.status_code == 200: + result = res.json() + self.session_id = result.get('sessionId') + access_session = 'Session %s' % result.get('token') + self.rest_auth_token = access_session + self.session.headers[ + RestHandler.AUTH_KEY] = access_session + else: + LOG.error("Login error. URL: %(url)s\n" + "Reason: %(reason)s.", + {"url": url, "reason": res.text}) + if 'authentication failed' in res.text: + raise exception.InvalidUsernameOrPassword() + else: + raise exception.BadResponse(res.text) + else: + LOG.error('Login Parameter error') + + return access_session + except Exception as e: + LOG.error("Login error: %s", six.text_type(e)) + raise e + + def logout(self): + try: + url = RestHandler.LOGOUT_URL + if self.session_id is not None: + url = '%s/%s/sessions/%s' % \ + (RestHandler.COMM_URL, + self.storage_device_id, + self.session_id) + if self.san_address: + self.call(url, method='DELETE') + self.session_id = None + self.storage_device_id = None + self.device_model = None + self.serial_number = None + self.session = None + self.rest_auth_token = None + else: + LOG.error('logout error:session id not found') + except Exception as err: + LOG.error('logout error:{}'.format(err)) + raise exception.StorageBackendException( + reason='Failed to Logout from restful') + + def get_device_id(self): + try: + if self.session is None: + self.init_http_head() + storage_systems = self.get_system_info() + system_info = storage_systems.get('data') + for system in system_info: + if system.get('model') in consts.SUPPORTED_VSP_SERIES: + if system.get('ctl1Ip') == self.rest_host or \ + system.get('ctl2Ip') == self.rest_host: + self.storage_device_id = system.get('storageDeviceId') + self.device_model = system.get('model') + self.serial_number = system.get('serialNumber') + break + elif system.get('svpIp') == self.rest_host: + self.storage_device_id = system.get('storageDeviceId') + self.device_model = system.get('model') + self.serial_number = system.get('serialNumber') + break + if self.storage_device_id is None: + LOG.error("Get device id fail,model or something is wrong") + except Exception as e: + LOG.error("Get device id error: %s", six.text_type(e)) + raise e + + def get_firmware_version(self): + url = '%s/%s' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + if result_json is None: + return None + firmware_version = result_json.get('dkcMicroVersion') + + return firmware_version + + def get_capacity(self): + url = '%s/%s/total-capacities/instance' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_all_pools(self): + url = '%s/%s/pools' % \ + (RestHandler.COMM_URL, self.storage_device_id) + result_json = self.get_rest_info(url) + return result_json + + def get_all_volumes(self): + url = '%s/%s/ldevs?ldevOption=defined&count=%s' % \ + (RestHandler.COMM_URL, self.storage_device_id, + consts.MAX_LDEV_NUMBER_OF_RESTAPI) + result_json = self.get_rest_info(url) + return result_json + + def get_system_info(self): + result_json = self.get_rest_info(RestHandler.COMM_URL, timeout=10) + + return result_json diff --git a/delfin/drivers/hitachi/vsp/vsp_stor.py b/delfin/drivers/hitachi/vsp/vsp_stor.py new file mode 100644 index 000000000..e3599885c --- /dev/null +++ b/delfin/drivers/hitachi/vsp/vsp_stor.py @@ -0,0 +1,316 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +import six +from oslo_log import log +from oslo_utils import units + +from delfin import exception +from delfin.common import alert_util +from delfin.common import constants +from delfin.drivers import driver +from delfin.drivers.hitachi.vsp import consts +from delfin.drivers.hitachi.vsp import rest_handler + +LOG = log.getLogger(__name__) + + +class HitachiVspDriver(driver.StorageDriver): + POOL_STATUS_MAP = {"POLN": constants.StoragePoolStatus.NORMAL, + "POLF": constants.StoragePoolStatus.NORMAL, + "POLS": constants.StoragePoolStatus.ABNORMAL, + "POLE": constants.StoragePoolStatus.OFFLINE + } + ALERT_LEVEL_MAP = {"Acute": constants.Severity.CRITICAL, + "Serious": constants.Severity.MAJOR, + "Moderate": constants.Severity.WARNING, + "Service": constants.Severity.INFORMATIONAL + } + TRAP_ALERT_LEVEL_MAP = { + "1.3.6.1.4.1.116.3.11.4.1.1.0.1": constants.Severity.CRITICAL, + "1.3.6.1.4.1.116.3.11.4.1.1.0.2": constants.Severity.MAJOR, + "1.3.6.1.4.1.116.3.11.4.1.1.0.3": constants.Severity.WARNING, + "1.3.6.1.4.1.116.3.11.4.1.1.0.4": constants.Severity.INFORMATIONAL + } + + TIME_PATTERN = '%Y-%m-%dT%H:%M:%S' + + REFCODE_OID = '1.3.6.1.4.1.116.5.11.4.2.3' + DESC_OID = '1.3.6.1.4.1.116.5.11.4.2.7' + TRAP_TIME_OID = '1.3.6.1.4.1.116.5.11.4.2.6' + TRAP_DATE_OID = '1.3.6.1.4.1.116.5.11.4.2.5' + TRAP_NICKNAME_OID = '1.3.6.1.4.1.116.5.11.4.2.2' + OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' + SECONDS_TO_MS = 1000 + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.rest_handler = rest_handler.RestHandler(**kwargs) + self.rest_handler.login() + + def reset_connection(self, context, **kwargs): + self.rest_handler.logout() + self.rest_handler.verify = kwargs.get('verify', False) + self.rest_handler.login() + + def close_connection(self): + self.rest_handler.logout() + + def get_storage(self, context): + self.rest_handler.get_device_id() + if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: + capacity_json = self.rest_handler.get_capacity() + free_capacity = capacity_json.get("total").get("freeSpace") * \ + units.Ki + total_capacity = \ + capacity_json.get("total").get("totalCapacity") * units.Ki + else: + free_capacity = 0 + total_capacity = 0 + pools_info = self.rest_handler.get_all_pools() + if pools_info is not None: + pools = pools_info.get('data') + for pool in pools: + total_cap = \ + int(pool.get( + 'totalPoolCapacity')) * units.Mi + free_cap = int( + pool.get( + 'availableVolumeCapacity')) * units.Mi + free_capacity = free_capacity + free_cap + total_capacity = total_capacity + total_cap + firmware_version = self.rest_handler.get_firmware_version() + status = constants.StorageStatus.OFFLINE + if firmware_version is not None: + status = constants.StorageStatus.NORMAL + system_name = '%s_%s' % (self.rest_handler.device_model, + self.rest_handler.rest_host) + + s = { + 'name': system_name, + 'vendor': 'Hitachi', + 'description': 'Hitachi VSP Storage', + 'model': str(self.rest_handler.device_model), + 'status': status, + 'serial_number': str(self.rest_handler.serial_number), + 'firmware_version': str(firmware_version), + 'location': '', + 'raw_capacity': int(total_capacity), + 'total_capacity': int(total_capacity), + 'used_capacity': int(total_capacity - free_capacity), + 'free_capacity': int(free_capacity) + } + return s + + def list_storage_pools(self, context): + try: + pools_info = self.rest_handler.get_all_pools() + pool_list = [] + pools = pools_info.get('data') + for pool in pools: + status = self.POOL_STATUS_MAP.get( + pool.get('poolStatus'), + constants.StoragePoolStatus.ABNORMAL + ) + storage_type = constants.StorageType.BLOCK + total_cap = \ + int(pool.get('totalPoolCapacity')) * units.Mi + free_cap = int( + pool.get('availableVolumeCapacity')) * units.Mi + used_cap = total_cap - free_cap + p = { + 'name': pool.get('poolName'), + 'storage_id': self.storage_id, + 'native_storage_pool_id': str(pool.get('poolId')), + 'description': 'Hitachi VSP Pool', + 'status': status, + 'storage_type': storage_type, + 'total_capacity': int(total_cap), + 'used_capacity': int(used_cap), + 'free_capacity': int(free_cap), + } + pool_list.append(p) + + return pool_list + except exception.DelfinException as err: + err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ + (six.text_type(err)) + LOG.error(err_msg) + raise err + except Exception as e: + err_msg = "Failed to get pool metrics from hitachi vsp: %s" % \ + (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_volumes(self, context): + try: + volumes_info = self.rest_handler.get_all_volumes() + + volume_list = [] + volumes = volumes_info.get('data') + for volume in volumes: + if volume.get('emulationType') == 'NOT DEFINED': + continue + orig_pool_id = volume.get('poolId') + compressed = False + deduplicated = False + if volume.get('dataReductionMode') == \ + 'compression_deduplication': + deduplicated = True + compressed = True + if volume.get('dataReductionMode') == 'compression': + compressed = True + if volume.get('status') == 'NML': + status = 'normal' + else: + status = 'abnormal' + + vol_type = constants.VolumeType.THICK + for voltype in volume.get('attributes'): + if voltype == 'HTI': + vol_type = constants.VolumeType.THIN + + total_cap = \ + int(volume.get('blockCapacity')) * consts.BLOCK_SIZE + used_cap = \ + int(volume.get('blockCapacity')) * consts.BLOCK_SIZE + # Because there is only subscribed capacity in device,so free + # capacity always 0 + free_cap = 0 + if volume.get('label'): + name = volume.get('label') + else: + name = 'ldev_%s' % str(volume.get('ldevId')) + + v = { + 'name': name, + 'storage_id': self.storage_id, + 'description': 'Hitachi VSP volume', + 'status': status, + 'native_volume_id': str(volume.get('ldevId')), + 'native_storage_pool_id': orig_pool_id, + 'type': vol_type, + 'total_capacity': total_cap, + 'used_capacity': used_cap, + 'free_capacity': free_cap, + 'compressed': compressed, + 'deduplicated': deduplicated, + } + + volume_list.append(v) + + return volume_list + except exception.DelfinException as err: + err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ + (six.text_type(err)) + LOG.error(err_msg) + raise err + except Exception as e: + err_msg = "Failed to get volumes metrics from hitachi vsp: %s" % \ + (six.text_type(e)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_controllers(self, context): + pass + + def list_ports(self, context): + pass + + def list_disks(self, context): + pass + + @staticmethod + def parse_queried_alerts(alerts, alert_list, query_para=None): + for alert in alerts: + occur_time = int(time.mktime(time.strptime( + alert.get('occurenceTime'), + HitachiVspDriver.TIME_PATTERN))) * \ + HitachiVspDriver.SECONDS_TO_MS + if not alert_util.is_alert_in_time_range(query_para, + occur_time): + continue + a = { + 'location': alert.get('location'), + 'alarm_id': alert.get('alertId'), + 'sequence_number': alert.get('alertIndex'), + 'description': alert.get('errorDetail'), + 'alert_name': alert.get('errorSection'), + 'resource_type': constants.DEFAULT_RESOURCE_TYPE, + 'occur_time': occur_time, + 'category': constants.Category.FAULT, + 'type': constants.EventType.EQUIPMENT_ALARM, + 'severity': HitachiVspDriver.ALERT_LEVEL_MAP.get( + alert.get('errorLevel'), + constants.Severity.INFORMATIONAL + ), + } + alert_list.append(a) + + def list_alerts(self, context, query_para=None): + alert_list = [] + if self.rest_handler.device_model in consts.SUPPORTED_VSP_SERIES: + alerts_info_ctl1 = self.resthanlder.get_alerts('type=CTL1') + alerts_info_ctl2 = self.resthanlder.get_alerts('type=CTL2') + alerts_info_dkc = self.resthanlder.get_alerts('type=DKC') + HitachiVspDriver.parse_queried_alerts(alerts_info_ctl1, + alert_list, query_para) + HitachiVspDriver.parse_queried_alerts(alerts_info_ctl2, + alert_list, query_para) + HitachiVspDriver.parse_queried_alerts(alerts_info_dkc, + alert_list, query_para) + + return alert_list + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + try: + alert_model = dict() + alert_model['alert_id'] = alert.get(HitachiVspDriver.REFCODE_OID) + alert_model['alert_name'] = alert.get(HitachiVspDriver.DESC_OID) + severity = HitachiVspDriver.TRAP_ALERT_LEVEL_MAP.get( + alert.get(HitachiVspDriver.OID_SEVERITY), + constants.Severity.INFORMATIONAL + ) + alert_model['severity'] = severity + alert_model['category'] = constants.Category.FAULT + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + aler_time = '%s%s' % (alert.get(HitachiVspDriver.TRAP_DATE_OID), + alert.get(HitachiVspDriver.TRAP_TIME_OID)) + pattern = '%Y/%m/%d%H:%M:%S' + occur_time = time.strptime(aler_time, pattern) + alert_model['occur_time'] = int(time.mktime(occur_time) * + HitachiVspDriver.SECONDS_TO_MS) + alert_model['description'] = alert.get(HitachiVspDriver.DESC_OID) + alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE + alert_model['location'] = alert.get(HitachiVspDriver. + TRAP_NICKNAME_OID) + + return alert_model + except Exception as e: + LOG.error(e) + msg = ("Failed to build alert model as some attributes missing in" + " alert message:%s") % (six.text_type(e)) + raise exception.InvalidResults(msg) + + def clear_alert(self, context, alert): + pass diff --git a/delfin/drivers/hpe/hpe_3par/alert_handler.py b/delfin/drivers/hpe/hpe_3par/alert_handler.py index f5e728bb4..52df714f9 100644 --- a/delfin/drivers/hpe/hpe_3par/alert_handler.py +++ b/delfin/drivers/hpe/hpe_3par/alert_handler.py @@ -90,10 +90,11 @@ def __init__(self, rest_handler=None, ssh_handler=None): self.rest_handler = rest_handler self.ssh_handler = ssh_handler - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data got from alert manager and fill the alert model.""" # Check for mandatory alert attributes - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -103,17 +104,17 @@ def parse_alert(self, context, alert): alert_model = dict() # These information are sourced from device registration info alert_model['alert_id'] = alert.get(AlertHandler.OID_MESSAGECODE) - alert_model['alert_name'] = self.get_alert_type(alert.get( + alert_model['alert_name'] = AlertHandler.get_alert_type(alert.get( AlertHandler.OID_MESSAGECODE)) - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert.get(AlertHandler.OID_SEVERITY), constants.Severity.NOT_SPECIFIED) - alert_model['category'] = self.CATEGORY_MAP.get( + alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert.get(AlertHandler.OID_STATE), constants.Category.NOT_SPECIFIED) alert_model['type'] = constants.EventType.EQUIPMENT_ALARM alert_model['sequence_number'] = alert.get(AlertHandler.OID_ID) - alert_model['occur_time'] = self.get_time_stamp( + alert_model['occur_time'] = AlertHandler.get_time_stamp( alert.get(AlertHandler.OID_TIMEOCCURRED)) alert_model['description'] = alert.get(AlertHandler.OID_DETAILS) alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE @@ -155,12 +156,13 @@ def clear_alert(self, context, alert): LOG.error(err_msg) raise exception.InvalidResults(err_msg) - def judge_alert_time(self, map, query_para): + @staticmethod + def judge_alert_time(map, query_para): if len(map) <= 1: return False if query_para is None and len(map) > 1: return True - occur_time = self.get_time_stamp(map.get('occur_time')) + occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) if query_para.get('begin_time') and query_para.get('end_time'): if occur_time >= int(query_para.get('begin_time')) and \ occur_time <= int(query_para.get('end_time')): @@ -187,10 +189,10 @@ def handle_alters(self, alertlist, query_para): value = self.ALERT_KEY_MAP.get( strinfo[0]) and strinfo[1] or '' map[key] = value - elif self.judge_alert_time(map, query_para): + elif AlertHandler.judge_alert_time(map, query_para): severity = self.ALERT_LEVEL_MAP.get(map.get('severity')) category = map.get('category') == 'New' and 'Fault' or '' - occur_time = self.get_time_stamp(map.get('occur_time')) + occur_time = AlertHandler.get_time_stamp(map.get('occur_time')) alert_id = map.get('message_code') and str(int(map.get( 'message_code'), 16)) or '' alert_model = { @@ -232,7 +234,8 @@ def list_alerts(self, context, query_para): LOG.error(err_msg) raise exception.InvalidResults(err_msg) - def get_time_stamp(self, time_str): + @staticmethod + def get_time_stamp(time_str): """ Time stamp to time conversion """ time_stamp = '' @@ -247,7 +250,8 @@ def get_time_stamp(self, time_str): return time_stamp - def get_alert_type(self, message_code): + @staticmethod + def get_alert_type(message_code): """ Get alert type diff --git a/delfin/drivers/hpe/hpe_3par/component_handler.py b/delfin/drivers/hpe/hpe_3par/component_handler.py index 7341fc9e5..7f50e05e8 100644 --- a/delfin/drivers/hpe/hpe_3par/component_handler.py +++ b/delfin/drivers/hpe/hpe_3par/component_handler.py @@ -68,9 +68,9 @@ def get_storage(self, context): status = constants.StorageStatus.ABNORMAL LOG.error('SSH check health Failed!') - used_cap = int(storage.get('totalCapacityMiB')) * units.Mi free_cap = int(storage.get('freeCapacityMiB')) * units.Mi - total_cap = used_cap + free_cap + used_cap = int(storage.get('allocatedCapacityMiB')) * units.Mi + total_cap = free_cap + used_cap raw_cap = int(storage.get('totalCapacityMiB')) * units.Mi result = { 'name': storage.get('name'), diff --git a/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py b/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py index b69977625..94b589eb4 100644 --- a/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py +++ b/delfin/drivers/hpe/hpe_3par/hpe_3parstor.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six from oslo_log import log from delfin import context @@ -48,7 +49,11 @@ def __init__(self, **kwargs): rest_handler=self.rest_handler, ssh_handler=self.ssh_handler) def reset_connection(self, context, **kwargs): - self.rest_handler.logout() + try: + self.rest_handler.logout() + except Exception as e: + LOG.warning('logout failed when resetting connection, ' + 'reason is %s' % six.text_type(e)) self.rest_client.verify = kwargs.get('verify', False) self.rest_handler.login() @@ -84,8 +89,9 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): - return self.alert_handler.parse_alert(context, alert) + @staticmethod + def parse_alert(context, alert): + return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, alert): return self.alert_handler.clear_alert(context, alert) diff --git a/delfin/drivers/hpe/hpe_3par/rest_handler.py b/delfin/drivers/hpe/hpe_3par/rest_handler.py index 14a0628c6..97f4cbb2f 100644 --- a/delfin/drivers/hpe/hpe_3par/rest_handler.py +++ b/delfin/drivers/hpe/hpe_3par/rest_handler.py @@ -88,7 +88,8 @@ def call(self, url, data=None, method=None): LOG.error('Rest exec failed') return res - + except exception.SSLCertificateFailed: + raise except Exception as e: err_msg = "Get RestHandler.call failed: %s" % (six.text_type(e)) LOG.error(err_msg) diff --git a/delfin/drivers/hpe/hpe_3par/ssh_handler.py b/delfin/drivers/hpe/hpe_3par/ssh_handler.py index d55d130b4..d5c69e4f4 100644 --- a/delfin/drivers/hpe/hpe_3par/ssh_handler.py +++ b/delfin/drivers/hpe/hpe_3par/ssh_handler.py @@ -18,6 +18,7 @@ from oslo_log import log as logging from delfin import exception +from delfin import utils from delfin.drivers.utils.ssh_client import SSHClient @@ -98,6 +99,7 @@ def remove_alerts(self, alert_id): Currently not implemented removes command : removealert """ ssh_client = SSHClient(**self.kwargs) + utils.check_ssh_injection([alert_id]) command_str = SSHHandler.HPE3PAR_COMMAND_REMOVEALERT % alert_id res = ssh_client.do_exec(command_str) if res: diff --git a/delfin/drivers/huawei/oceanstor/alert_handler.py b/delfin/drivers/huawei/oceanstor/alert_handler.py index 42bea4630..d2ad6e378 100644 --- a/delfin/drivers/huawei/oceanstor/alert_handler.py +++ b/delfin/drivers/huawei/oceanstor/alert_handler.py @@ -27,8 +27,6 @@ class AlertHandler(object): """Alert handling functions for huawei oceanstor driver""" - def __init__(self): - self.oid_mapper = oid_mapper.OidMapper() TIME_PATTERN = "%Y-%m-%d,%H:%M:%S.%f" @@ -76,13 +74,14 @@ def __init__(self): 'hwIsmReportingAlarmFaultTime' ) - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): """Parse alert data and fill the alert model.""" # Check for mandatory alert attributes - alert = self.oid_mapper.map_oids(alert) + alert = oid_mapper.OidMapper.map_oids(alert) LOG.info("Get alert from storage: %s", alert) - for attr in self._mandatory_alert_attributes: + for attr in AlertHandler._mandatory_alert_attributes: if not alert.get(attr): msg = "Mandatory information %s missing in alert message. " \ % attr @@ -93,29 +92,29 @@ def parse_alert(self, context, alert): # These information are sourced from device registration info alert_model['alert_id'] = alert['hwIsmReportingAlarmAlarmID'] alert_model['alert_name'] = alert['hwIsmReportingAlarmFaultTitle'] - alert_model['severity'] = self.SEVERITY_MAP.get( + alert_model['severity'] = AlertHandler.SEVERITY_MAP.get( alert['hwIsmReportingAlarmFaultLevel'], constants.Severity.NOT_SPECIFIED) - alert_model['category'] = self.CATEGORY_MAP.get( + alert_model['category'] = AlertHandler.CATEGORY_MAP.get( alert['hwIsmReportingAlarmFaultCategory'], constants.Category.NOT_SPECIFIED) - alert_model['type'] = self.TYPE_MAP.get( + alert_model['type'] = AlertHandler.TYPE_MAP.get( alert['hwIsmReportingAlarmFaultType'], constants.EventType.NOT_SPECIFIED) alert_model['sequence_number'] \ = alert['hwIsmReportingAlarmSerialNo'] occur_time = datetime.strptime( alert['hwIsmReportingAlarmFaultTime'], - self.TIME_PATTERN) + AlertHandler.TIME_PATTERN) alert_model['occur_time'] = int(occur_time.timestamp() * 1000) description = alert['hwIsmReportingAlarmAdditionInfo'] - if self._is_hex(description): + if AlertHandler._is_hex(description): description = bytes.fromhex(description[2:]).decode('ascii') alert_model['description'] = description recovery_advice = alert['hwIsmReportingAlarmRestoreAdvice'] - if self._is_hex(recovery_advice): + if AlertHandler._is_hex(recovery_advice): recovery_advice = bytes.fromhex( recovery_advice[2:]).decode('ascii') @@ -189,7 +188,8 @@ def clear_alert(self, context, storage_id, alert): """Clear alert from storage system.""" pass - def _is_hex(self, value): + @staticmethod + def _is_hex(value): try: int(value, 16) except ValueError: diff --git a/delfin/drivers/huawei/oceanstor/oceanstor.py b/delfin/drivers/huawei/oceanstor/oceanstor.py index d170525be..b8783f62d 100644 --- a/delfin/drivers/huawei/oceanstor/oceanstor.py +++ b/delfin/drivers/huawei/oceanstor/oceanstor.py @@ -360,7 +360,8 @@ def add_trap_config(self, context, trap_config): def remove_trap_config(self, context, trap_config): pass - def parse_alert(self, context, alert): + @staticmethod + def parse_alert(context, alert): return alert_handler.AlertHandler().parse_alert(context, alert) def clear_alert(self, context, sequence_number): diff --git a/delfin/drivers/huawei/oceanstor/oid_mapper.py b/delfin/drivers/huawei/oceanstor/oid_mapper.py index 99d464554..2ee3ff64d 100644 --- a/delfin/drivers/huawei/oceanstor/oid_mapper.py +++ b/delfin/drivers/huawei/oceanstor/oid_mapper.py @@ -34,14 +34,15 @@ class OidMapper(object): def __init__(self): pass - def map_oids(self, alert): + @staticmethod + def map_oids(alert): """Translate oids using static map.""" alert_model = dict() for attr in alert: # Remove the instance number at the end of oid before mapping oid_str = attr.rsplit('.', 1)[0] - key = self.OID_MAP.get(oid_str, None) + key = OidMapper.OID_MAP.get(oid_str, None) alert_model[key] = alert[attr] return alert_model diff --git a/delfin/drivers/huawei/oceanstor/rest_client.py b/delfin/drivers/huawei/oceanstor/rest_client.py index dbdf4e39d..347c3a93f 100644 --- a/delfin/drivers/huawei/oceanstor/rest_client.py +++ b/delfin/drivers/huawei/oceanstor/rest_client.py @@ -103,11 +103,14 @@ def do_call(self, url, data, method, try: res = func(url, **kwargs) - except requests.exceptions.SSLError as ssl_exc: - LOG.exception('SSLError exception from server: %(url)s.' - ' Error: %(err)s', {'url': url, 'err': ssl_exc}) - return {"error": {"code": consts.ERROR_CONNECT_TO_SERVER, - "description": "Retry with valid certificate."}} + except requests.exceptions.SSLError as e: + LOG.error('SSLError exception from server: %(url)s.' + ' Error: %(err)s', {'url': url, 'err': e}) + err_str = six.text_type(e) + if 'certificate verify failed' in err_str: + raise exception.SSLCertificateFailed() + else: + raise exception.SSLHandshakeFailed() except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) diff --git a/delfin/drivers/ibm/storwize_svc/__init__.py b/delfin/drivers/ibm/storwize_svc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/drivers/ibm/storwize_svc/ssh_handler.py b/delfin/drivers/ibm/storwize_svc/ssh_handler.py new file mode 100644 index 000000000..1be7f71f6 --- /dev/null +++ b/delfin/drivers/ibm/storwize_svc/ssh_handler.py @@ -0,0 +1,386 @@ +# Copyright 2020 The SODA Authors. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import time + +import paramiko +import six +from oslo_log import log as logging +from oslo_utils import units + +from delfin import exception, utils +from delfin.common import constants, alert_util +from delfin.drivers.utils.ssh_client import SSHPool + +LOG = logging.getLogger(__name__) + + +class SSHHandler(object): + OID_ERR_ID = '1.3.6.1.4.1.2.6.190.4.3' + OID_SEQ_NUMBER = '1.3.6.1.4.1.2.6.190.4.9' + OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10' + OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11' + OID_OBJ_NAME = '1.3.6.1.4.1.2.6.190.4.17' + OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0' + + TRAP_SEVERITY_MAP = { + '1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL, + '1.3.6.1.4.1.2.6.190.2': constants.Severity.WARNING, + '1.3.6.1.4.1.2.6.190.3': constants.Severity.INFORMATIONAL, + } + + SEVERITY_MAP = {"warning": "Warning", + "informational": "Informational", + "error": "Major" + } + + SECONDS_TO_MS = 1000 + + def __init__(self, **kwargs): + self.ssh_pool = SSHPool(**kwargs) + + @staticmethod + def handle_split(split_str, split_char, arr_number): + split_value = '' + if split_str is not None and split_str != '': + tmp_value = split_str.split(split_char, 1) + if arr_number == 1 and len(tmp_value) > 1: + split_value = tmp_value[arr_number].strip() + elif arr_number == 0: + split_value = tmp_value[arr_number].strip() + return split_value + + @staticmethod + def parse_alert(alert): + try: + alert_model = dict() + alert_name = SSHHandler.handle_split(alert.get( + SSHHandler.OID_ERR_ID), ':', 1) + error_info = SSHHandler.handle_split(alert.get( + SSHHandler.OID_ERR_ID), ':', 0) + alert_id = SSHHandler.handle_split(error_info, '=', 1) + severity = SSHHandler.TRAP_SEVERITY_MAP.get( + alert.get(SSHHandler.OID_SEVERITY), + constants.Severity.INFORMATIONAL + ) + alert_model['alert_id'] = str(alert_id) + alert_model['alert_name'] = alert_name + alert_model['severity'] = severity + alert_model['category'] = constants.Category.FAULT + alert_model['type'] = constants.EventType.EQUIPMENT_ALARM + alert_model['sequence_number'] = SSHHandler. \ + handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1) + timestamp = SSHHandler. \ + handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1) + time_type = '%a %b %d %H:%M:%S %Y' + occur_time = int(time.mktime(time.strptime( + timestamp, + time_type))) + alert_model['occur_time'] = int(occur_time * SSHHandler. + SECONDS_TO_MS) + alert_model['description'] = alert_name + alert_model['resource_type'] = SSHHandler.handle_split( + alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1) + alert_model['location'] = SSHHandler.handle_split(alert.get( + SSHHandler.OID_OBJ_NAME), '=', 1) + return alert_model + except Exception as e: + LOG.error(e) + msg = ("Failed to build alert model as some attributes missing " + "in alert message:%s.") % (six.text_type(e)) + raise exception.InvalidResults(msg) + + def login(self): + try: + with self.ssh_pool.item() as ssh: + SSHHandler.do_exec('lssystem', ssh) + except Exception as e: + LOG.error("Failed to login ibm storwize_svc %s" % + (six.text_type(e))) + raise e + + @staticmethod + def do_exec(command_str, ssh): + """Execute command""" + try: + utils.check_ssh_injection(command_str) + if command_str is not None and ssh is not None: + stdin, stdout, stderr = ssh.exec_command(command_str) + res, err = stdout.read(), stderr.read() + re = res if res else err + result = re.decode() + except paramiko.AuthenticationException as ae: + LOG.error('doexec Authentication error:{}'.format(ae)) + raise exception.InvalidUsernameOrPassword() + except Exception as e: + err = six.text_type(e) + LOG.error('doexec InvalidUsernameOrPassword error') + if 'timed out' in err: + raise exception.SSHConnectTimeout() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err: + raise exception.InvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err: + raise exception.InvalidPrivateKey() + else: + raise exception.SSHException(err) + return result + + def exec_ssh_command(self, command): + try: + with self.ssh_pool.item() as ssh: + ssh_info = SSHHandler.do_exec(command, ssh) + return ssh_info + except Exception as e: + msg = "Failed to ssh ibm storwize_svc %s: %s" % \ + (command, six.text_type(e)) + raise exception.SSHException(msg) + + def change_capacity_to_bytes(self, unit): + unit = unit.upper() + if unit == 'TB': + result = units.Ti + elif unit == 'GB': + result = units.Gi + elif unit == 'MB': + result = units.Mi + elif unit == 'KB': + result = units.Ki + else: + result = 1 + return int(result) + + def parse_string(self, value): + capacity = 0 + if value: + if value.isdigit(): + capacity = float(value) + else: + unit = value[-2:] + capacity = float(value[:-2]) * int( + self.change_capacity_to_bytes(unit)) + return capacity + + def get_storage(self): + try: + system_info = self.exec_ssh_command('lssystem') + enclosure_info = self.exec_ssh_command('lsenclosure -delim :') + enclosure_res = enclosure_info.split('\n') + enclosure = enclosure_res[1].split(':') + serial_number = enclosure[7] + storage_map = {} + self.handle_detail(system_info, storage_map, split=' ') + + status = 'normal' if storage_map.get('statistics_status') == 'on' \ + else 'offline' + location = storage_map.get('location') + free_capacity = self.parse_string(storage_map.get( + 'total_free_space')) + used_capacity = self.parse_string(storage_map.get( + 'total_used_capacity')) + raw_capacity = self.parse_string(storage_map.get( + 'total_drive_raw_capacity')) + subscribed_capacity = self.parse_string(storage_map.get( + 'virtual_capacity')) + firmware_version = '' + if storage_map.get('code_level') is not None: + firmware_version = storage_map.get('code_level').split(' ')[0] + s = { + 'name': storage_map.get('name'), + 'vendor': 'IBM', + 'model': storage_map.get('product_name'), + 'status': status, + 'serial_number': serial_number, + 'firmware_version': firmware_version, + 'location': location, + 'total_capacity': int(free_capacity + used_capacity), + 'raw_capacity': int(raw_capacity), + 'subscribed_capacity': int(subscribed_capacity), + 'used_capacity': int(used_capacity), + 'free_capacity': int(free_capacity) + } + return s + except exception.DelfinException as e: + err_msg = "Failed to get storage: %s" % (six.text_type(e.msg)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def handle_detail(self, deltail_info, detail_map, split): + detail_arr = deltail_info.split('\n') + for detail in detail_arr: + if detail is not None and detail != '': + strinfo = detail.split(split, 1) + key = strinfo[0] + value = '' + if len(strinfo) > 1: + value = strinfo[1] + detail_map[key] = value + + def list_storage_pools(self, storage_id): + try: + pool_list = [] + pool_info = self.exec_ssh_command('lsmdiskgrp') + pool_res = pool_info.split('\n') + for i in range(1, len(pool_res)): + if pool_res[i] is None or pool_res[i] == '': + continue + + pool_str = ' '.join(pool_res[i].split()) + strinfo = pool_str.split(' ') + detail_command = 'lsmdiskgrp %s' % strinfo[0] + deltail_info = self.exec_ssh_command(detail_command) + pool_map = {} + self.handle_detail(deltail_info, pool_map, split=' ') + status = 'normal' if pool_map.get('status') == 'online' \ + else 'offline' + total_cap = self.parse_string(pool_map.get('capacity')) + free_cap = self.parse_string(pool_map.get('free_capacity')) + used_cap = self.parse_string(pool_map.get('used_capacity')) + subscribed_capacity = self.parse_string(pool_map.get( + 'virtual_capacity')) + p = { + 'name': pool_map.get('name'), + 'storage_id': storage_id, + 'native_storage_pool_id': pool_map.get('id'), + 'description': '', + 'status': status, + 'storage_type': constants.StorageType.BLOCK, + 'subscribed_capacity': int(subscribed_capacity), + 'total_capacity': int(total_cap), + 'used_capacity': int(used_cap), + 'free_capacity': int(free_cap) + } + pool_list.append(p) + + return pool_list + except exception.DelfinException as e: + err_msg = "Failed to get storage pool: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage pool: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_volumes(self, storage_id): + try: + volume_list = [] + volume_info = self.exec_ssh_command('lsvdisk') + volume_res = volume_info.split('\n') + for i in range(1, len(volume_res)): + if volume_res[i] is None or volume_res[i] == '': + continue + volume_str = ' '.join(volume_res[i].split()) + strinfo = volume_str.split(' ') + volume_name = strinfo[1] + detail_command = 'lsvdisk -delim : %s' % volume_name + deltail_info = self.exec_ssh_command(detail_command) + volume_map = {} + self.handle_detail(deltail_info, volume_map, split=':') + status = 'normal' if volume_map.get('status') == 'online' \ + else 'offline' + volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \ + else 'thick' + total_capacity = self.parse_string(volume_map.get('capacity')) + free_capacity = self.parse_string(volume_map. + get('free_capacity')) + used_capacity = self.parse_string(volume_map. + get('used_capacity')) + compressed = True + deduplicated = True + if volume_map.get('compressed_copy') == 'no': + compressed = False + if volume_map.get('deduplicated_copy') == 'no': + deduplicated = False + + v = { + 'name': volume_map.get('name'), + 'storage_id': storage_id, + 'description': '', + 'status': status, + 'native_volume_id': str(volume_map.get('id')), + 'native_storage_pool_id': volume_map.get('mdisk_grp_id'), + 'wwn': str(volume_map.get('vdisk_UID')), + 'type': volume_type, + 'total_capacity': int(total_capacity), + 'used_capacity': int(used_capacity), + 'free_capacity': int(free_capacity), + 'compressed': compressed, + 'deduplicated': deduplicated + } + volume_list.append(v) + + return volume_list + except exception.DelfinException as e: + err_msg = "Failed to get storage volume: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage volume: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) + + def list_alerts(self, query_para): + try: + alert_list = [] + alert_info = self.exec_ssh_command('lseventlog -monitoring yes') + alert_res = alert_info.split('\n') + for i in range(1, len(alert_res)): + if alert_res[i] is None or alert_res[i] == '': + continue + alert_str = ' '.join(alert_res[i].split()) + strinfo = alert_str.split(' ', 1) + detail_command = 'lseventlog %s' % strinfo[0] + deltail_info = self.exec_ssh_command(detail_command) + alert_map = {} + self.handle_detail(deltail_info, alert_map, split=' ') + occur_time = int(alert_map.get('last_timestamp_epoch')) * \ + self.SECONDS_TO_MS + if not alert_util.is_alert_in_time_range(query_para, + occur_time): + continue + alert_name = alert_map.get('event_id_text', '') + event_id = alert_map.get('event_id') + location = alert_map.get('object_name', '') + resource_type = alert_map.get('object_type', '') + severity = self.SEVERITY_MAP.get(alert_map. + get('notification_type')) + + alert_model = { + 'alert_id': event_id, + 'alert_name': alert_name, + 'severity': severity, + 'category': constants.Category.FAULT, + 'type': 'EquipmentAlarm', + 'sequence_number': alert_map.get('sequence_number'), + 'occur_time': occur_time, + 'description': alert_name, + 'resource_type': resource_type, + 'location': location + } + alert_list.append(alert_model) + + return alert_list + except exception.DelfinException as e: + err_msg = "Failed to get storage alert: %s" % (six.text_type(e)) + LOG.error(err_msg) + raise e + except Exception as err: + err_msg = "Failed to get storage alert: %s" % (six.text_type(err)) + LOG.error(err_msg) + raise exception.InvalidResults(err_msg) diff --git a/delfin/drivers/ibm/storwize_svc/storwize_svc.py b/delfin/drivers/ibm/storwize_svc/storwize_svc.py new file mode 100644 index 000000000..ebba6150d --- /dev/null +++ b/delfin/drivers/ibm/storwize_svc/storwize_svc.py @@ -0,0 +1,62 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from delfin.drivers import driver +from delfin.drivers.ibm.storwize_svc import ssh_handler +from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler + + +class StorwizeSVCDriver(driver.StorageDriver): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.ssh_hanlder = ssh_handler.SSHHandler(**kwargs) + self.ssh_hanlder.login() + + def reset_connection(self, context, **kwargs): + self.ssh_hanlder.login() + + def get_storage(self, context): + return self.ssh_hanlder.get_storage() + + def list_storage_pools(self, context): + return self.ssh_hanlder.list_storage_pools(self.storage_id) + + def list_volumes(self, context): + return self.ssh_hanlder.list_volumes(self.storage_id) + + def list_controllers(self, context): + pass + + def list_ports(self, context): + pass + + def list_disks(self, context): + pass + + def list_alerts(self, context, query_para=None): + return self.ssh_hanlder.list_alerts(query_para) + + def add_trap_config(self, context, trap_config): + pass + + def remove_trap_config(self, context, trap_config): + pass + + @staticmethod + def parse_alert(context, alert): + return SSHHandler.parse_alert(alert) + + def clear_alert(self, context, alert): + pass diff --git a/delfin/drivers/manager.py b/delfin/drivers/manager.py index a85261ba1..be665ce52 100644 --- a/delfin/drivers/manager.py +++ b/delfin/drivers/manager.py @@ -80,6 +80,9 @@ def _get_driver_obj(self, context, cache_on_load=True, **kwargs): cls = self._get_driver_cls(**kwargs) return cls(**kwargs) + if kwargs['storage_id'] in self.driver_factory: + return self.driver_factory[kwargs['storage_id']] + with self._instance_lock: if kwargs['storage_id'] in self.driver_factory: return self.driver_factory[kwargs['storage_id']] diff --git a/delfin/drivers/utils/rest_client.py b/delfin/drivers/utils/rest_client.py index fd73741e1..9dc69ffe9 100644 --- a/delfin/drivers/utils/rest_client.py +++ b/delfin/drivers/utils/rest_client.py @@ -15,12 +15,10 @@ # under the License. import json -import ssl -import six + import requests +import six from oslo_log import log as logging -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.poolmanager import PoolManager from delfin import exception from delfin import ssl_utils @@ -30,24 +28,6 @@ LOG = logging.getLogger(__name__) -class HostNameIgnoringAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.assert_hostname = False - return super(HostNameIgnoringAdapter, self).cert_verify( - conn, url, verify, cert) - - def init_poolmanager(self, connections, maxsize, block=False, - **pool_kwargs): - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=False, - ssl_version=ssl.PROTOCOL_TLSv1, - **pool_kwargs) - - class RestClient(object): def __init__(self, **kwargs): @@ -81,7 +61,8 @@ def init_http_head(self): self.verify)) self.session.verify = self.verify self.session.trust_env = False - self.session.mount("https://", ssl_utils.HostNameIgnoreAdapter()) + self.session.mount("https://", + ssl_utils.get_host_name_ignore_adapter()) def do_call(self, url, data, method, calltimeout=consts.SOCKET_TIMEOUT): @@ -108,15 +89,10 @@ def do_call(self, url, data, method, except requests.exceptions.SSLError as e: LOG.error('SSLError for %s %s' % (method, url)) err_str = six.text_type(e) - if 'wrong ssl version' in err_str or \ - 'sslv3 alert handshake failure' in err_str: - raise exception.WrongTlsVersion() - elif 'no cipher match' in err_str: - raise exception.CipherNotMatch() - elif 'certificate verify failed' in err_str: + if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: - raise e + raise exception.SSLHandshakeFailed() except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) diff --git a/delfin/drivers/utils/ssh_client.py b/delfin/drivers/utils/ssh_client.py index bb7fbca3d..bed423938 100644 --- a/delfin/drivers/utils/ssh_client.py +++ b/delfin/drivers/utils/ssh_client.py @@ -1,5 +1,5 @@ # Copyright 2020 The SODA Authors. -# Copyright (c) 2016 Huawei Technologies Co., Ltd. +# Copyright 2011 OpenStack LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,12 +13,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import paramiko as paramiko +import paramiko +import six +from eventlet import pools from oslo_log import log as logging from paramiko.hostkeys import HostKeyEntry -from delfin import cryptor +from delfin import cryptor from delfin import exception LOG = logging.getLogger(__name__) @@ -116,7 +117,7 @@ def do_exec(self, command_str): raise exception.SSHConnectTimeout() elif 'No authentication methods available' in str(e) \ or 'Authentication failed' in str(e): - raise exception.SSHInvalidUsernameOrPassword() + raise exception.InvalidUsernameOrPassword() elif 'not a valid RSA private key file' in str(e): raise exception.InvalidPrivateKey() elif 'not found in known_hosts' in str(e): @@ -127,3 +128,114 @@ def do_exec(self, command_str): finally: self.close() return re + + +class SSHPool(pools.Pool): + SOCKET_TIMEOUT = 10 + + def __init__(self, **kwargs): + ssh_access = kwargs.get('ssh') + if ssh_access is None: + raise exception.InvalidInput('Input ssh_access is missing') + self.ssh_host = ssh_access.get('host') + self.ssh_port = ssh_access.get('port') + self.ssh_username = ssh_access.get('username') + self.ssh_password = ssh_access.get('password') + self.ssh_pub_key_type = ssh_access.get('pub_key_type') + self.ssh_pub_key = ssh_access.get('pub_key') + self.ssh_conn_timeout = ssh_access.get('conn_timeout') + self.conn_timeout = self.SOCKET_TIMEOUT + if self.ssh_conn_timeout is None: + self.ssh_conn_timeout = SSHPool.SOCKET_TIMEOUT + super(SSHPool, self).__init__(min_size=0, max_size=3) + + def set_host_key(self, host_key, ssh): + """ + Set public key,because input kwargs parameter host_key is string, + not a file path,we can not use load file to get public key,so we set + it as a string. + :param str host_key: the public key which as a string + """ + if (len(host_key) == 0) or (host_key[0] == "#"): + return + try: + e = HostKeyEntry.from_line(host_key) + except exception.SSHException: + return + if e is not None: + host_names = e.hostnames + for h in host_names: + if ssh._host_keys.check(h, e.key): + e.hostnames.remove(h) + if len(e.hostnames): + ssh._host_keys._entries.append(e) + + def create(self): + ssh = paramiko.SSHClient() + try: + if self.ssh_pub_key is None: + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + else: + host_key = '%s %s %s' % \ + (self.ssh_host, self.ssh_pub_key_type, + self.ssh_pub_key) + self.set_host_key(host_key, ssh) + + ssh.connect(hostname=self.ssh_host, port=self.ssh_port, + username=self.ssh_username, + password=cryptor.decode(self.ssh_password), + timeout=self.ssh_conn_timeout) + if self.conn_timeout: + transport = ssh.get_transport() + transport.set_keepalive(self.SOCKET_TIMEOUT) + return ssh + except Exception as e: + err = six.text_type(e) + LOG.error('doexec InvalidUsernameOrPassword error') + if 'timed out' in err: + raise exception.InvalidIpOrPort() + elif 'No authentication methods available' in err \ + or 'Authentication failed' in err: + raise exception.InvalidUsernameOrPassword() + elif 'not a valid RSA private key file' in err: + raise exception.InvalidPrivateKey() + elif 'not found in known_hosts' in err: + raise exception.SSHNotFoundKnownHosts(self.ssh_host) + else: + raise exception.SSHException(err) + + def get(self): + """Return an item from the pool, when one is available. + + This may cause the calling greenthread to block. Check if a + connection is active before returning it. For dead connections + create and return a new connection. + """ + if self.free_items: + conn = self.free_items.popleft() + if conn: + if conn.get_transport().is_active(): + return conn + else: + conn.close() + return self.create() + if self.current_size < self.max_size: + created = self.create() + self.current_size += 1 + return created + return self.channel.get() + + def remove(self, ssh): + """Close an ssh client and remove it from free_items.""" + ssh.close() + if ssh in self.free_items: + self.free_items.remove(ssh) + if self.current_size > 0: + self.current_size -= 1 + + def put(self, conn): + if self.current_size > self.max_size: + conn.close() + self.current_size -= 1 + return + super(SSHPool, self).put(conn) diff --git a/delfin/exception.py b/delfin/exception.py index 32231564b..9b0b61ede 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -248,11 +248,6 @@ class SSHConnectTimeout(DelfinException): code = 500 -class SSHInvalidUsernameOrPassword(DelfinException): - msg_fmt = _("SSH invalid username or password.") - code = 400 - - class SSHNotFoundKnownHosts(NotFound): msg_fmt = _("{0} not found in known_hosts.") code = 400 @@ -283,12 +278,8 @@ class SSLCertificateFailed(Invalid): code = 400 -class CipherNotMatch(Invalid): - msg_fmt = _("Cipher Not Match.") - - -class WrongTlsVersion(Invalid): - msg_fmt = _("Wrong TLS Version.") +class SSLHandshakeFailed(Invalid): + msg_fmt = _("SSL handshake failure.") class StorageIsSyncing(Invalid): diff --git a/delfin/ssl_utils.py b/delfin/ssl_utils.py index 260711e1a..a17078bf1 100644 --- a/delfin/ssl_utils.py +++ b/delfin/ssl_utils.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import ssl import requests from oslo_config import cfg from oslo_log import log @@ -77,6 +76,10 @@ def reload_certificate(ca_path): _load_cert(fpath, file, ca_path) +def get_host_name_ignore_adapter(): + return HostNameIgnoreAdapter() + + class HostNameIgnoreAdapter(requests.adapters.HTTPAdapter): def cert_verify(self, conn, url, verify, cert): conn.assert_hostname = False @@ -89,6 +92,4 @@ def init_poolmanager(self, connections, maxsize, block=False, self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, - ssl_version=ssl.PROTOCOL_TLSv1_2, - **pool_kwargs) + block=block, strict=True, **pool_kwargs) diff --git a/delfin/task_manager/manager.py b/delfin/task_manager/manager.py index bacee04d0..2fada9090 100644 --- a/delfin/task_manager/manager.py +++ b/delfin/task_manager/manager.py @@ -43,7 +43,7 @@ class TaskManager(manager.Manager): RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): - self.alert_sync = alerts.AlertSyncTask() + self.alert_task = alerts.AlertSyncTask() super(TaskManager, self).__init__(*args, **kwargs) def periodic_performance_collect(self): @@ -115,7 +115,7 @@ def remove_storage_in_cache(self, context, storage_id): def sync_storage_alerts(self, context, storage_id, query_para): LOG.info('Alert sync called for storage id:{0}' .format(storage_id)) - self.alert_sync.sync_alerts(context, storage_id, query_para) + self.alert_task.sync_alerts(context, storage_id, query_para) def performance_metrics_collection(self, context, storage_id, interval, is_historic, resource_task): @@ -124,3 +124,10 @@ def performance_metrics_collection(self, context, storage_id, interval, cls = importutils.import_class(resource_task) device_obj = cls(context, storage_id, interval, is_historic) device_obj.collect() + + def clear_storage_alerts(self, context, storage_id, sequence_number_list): + LOG.info('Clear alerts called for storage id: {0}' + .format(storage_id)) + return self.alert_task.clear_alerts(context, + storage_id, + sequence_number_list) diff --git a/delfin/task_manager/rpcapi.py b/delfin/task_manager/rpcapi.py index 67e7ffb71..7cbd92917 100644 --- a/delfin/task_manager/rpcapi.py +++ b/delfin/task_manager/rpcapi.py @@ -76,3 +76,10 @@ def performance_metrics_collection(self, context, storage_id, interval, interval=interval, is_historic=is_historic, resource_task=resource_task) + + def clear_storage_alerts(self, context, storage_id, sequence_number_list): + call_context = self.client.prepare(version='1.0') + return call_context.call(context, + 'clear_storage_alerts', + storage_id=storage_id, + sequence_number_list=sequence_number_list) diff --git a/delfin/task_manager/tasks/alerts.py b/delfin/task_manager/tasks/alerts.py index dc39a39e0..318b95644 100644 --- a/delfin/task_manager/tasks/alerts.py +++ b/delfin/task_manager/tasks/alerts.py @@ -55,3 +55,20 @@ def sync_alerts(self, ctx, storage_id, query_para): msg = _('Failed to sync alerts from storage device: {0}' .format(six.text_type(e))) LOG.error(msg) + + def clear_alerts(self, ctx, storage_id, sequence_number_list): + """ Clear alert from storage """ + + LOG.info('Clear alert for storage id:{0}'.format(storage_id)) + sequence_number_list = sequence_number_list or [] + failure_list = [] + for sequence_number in sequence_number_list: + try: + self.driver_manager.clear_alert(ctx, storage_id, + sequence_number) + except Exception as e: + LOG.error("Failed to clear alert with sequence number: %s " + "for storage: %s, reason: %s.", + sequence_number, storage_id, six.text_type(e)) + failure_list.append(sequence_number) + return failure_list diff --git a/delfin/test.py b/delfin/test.py index 16bba9fff..39e6bc5b3 100644 --- a/delfin/test.py +++ b/delfin/test.py @@ -90,7 +90,7 @@ def setUp(self): self.injected = [] self._services = [] # This will be cleaned up by the NestedTempfile fixture - lock_path = self.useFixture(fixtures.TempDir()).path + lock_path = '/' + self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.fixture.config( diff --git a/delfin/tests/unit/drivers/hitachi/__init__.py b/delfin/tests/unit/drivers/hitachi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/hitachi/vsp/__init__.py b/delfin/tests/unit/drivers/hitachi/vsp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py new file mode 100644 index 000000000..cb7a58ede --- /dev/null +++ b/delfin/tests/unit/drivers/hitachi/vsp/test_hitachi_vspstor.py @@ -0,0 +1,402 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import TestCase, mock + +from requests import Session + +from delfin import context +from delfin.drivers.hitachi.vsp.rest_handler import RestHandler +from delfin.drivers.hitachi.vsp.vsp_stor import HitachiVspDriver + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +ACCESS_INFO = { + "storage_id": "12345", + "rest": { + "host": "110.143.132.231", + "port": "8443", + "username": "username", + "password": "cGFzc3dvcmQ=" + }, + "ssh": { + "host": "110.143.132.231", + "port": "22", + "username": "username", + "password": "password", + "host_key": "weqewrerwerwerwe" + }, + "vendor": "hitachi", + "model": "vsp", + "extra_attributes": { + "array_id": "00112233" + } +} +GET_DEVICE_ID = { + "data": [ + { + "storageDeviceId": "800000011633", + "model": "VSP G350", + "serialNumber": 11633, + "svpIp": "110.143.132.231" + } + ] +} +GET_ALL_POOLS = { + "data": [ + { + "poolId": 0, + "poolStatus": "POLN", + "usedCapacityRate": 56, + "snapshotCount": 0, + "poolName": "p3-1", + "availableVolumeCapacity": 7796586, + "totalPoolCapacity": 17821524, + "numOfLdevs": 8, + "firstLdevId": 4, + "warningThreshold": 70, + "depletionThreshold": 80, + "virtualVolumeCapacityRate": -1, + "isMainframe": False, + "isShrinking": False, + "locatedVolumeCount": 65, + "totalLocatedCapacity": 15694896, + "blockingMode": "NB", + "totalReservedCapacity": 0, + "reservedVolumeCount": 0, + "poolType": "HDP", + "duplicationNumber": 0, + "dataReductionAccelerateCompCapacity": 0, + "dataReductionCapacity": 0, + "dataReductionBeforeCapacity": 0, + "dataReductionAccelerateCompRate": 0, + "duplicationRate": 0, + "compressionRate": 0, + "dataReductionRate": 0, + "snapshotUsedCapacity": 0, + "suspendSnapshot": True + }, + { + "poolId": 1, + "poolStatus": "POLF", + "usedCapacityRate": 78, + "snapshotCount": 0, + "poolName": "hjw_test", + "availableVolumeCapacity": 3530184, + "totalPoolCapacity": 16221576, + "numOfLdevs": 6, + "firstLdevId": 0, + "warningThreshold": 70, + "depletionThreshold": 80, + "virtualVolumeCapacityRate": -1, + "isMainframe": False, + "isShrinking": False, + "locatedVolumeCount": 24, + "totalLocatedCapacity": 12702144, + "blockingMode": "NB", + "totalReservedCapacity": 0, + "reservedVolumeCount": 0, + "poolType": "HDP", + "duplicationNumber": 0, + "dataReductionAccelerateCompCapacity": 0, + "dataReductionCapacity": 0, + "dataReductionBeforeCapacity": 0, + "dataReductionAccelerateCompRate": 0, + "duplicationRate": 0, + "compressionRate": 0, + "dataReductionRate": 0, + "snapshotUsedCapacity": 0, + "suspendSnapshot": True + } + ] +} +GET_SPECIFIC_STORAGE = { + "storageDeviceId": "800000011633", + "model": "VSP G350", + "serialNumber": 11633, + "svpIp": "51.10.192.90", + "rmiPort": 1099, + "dkcMicroVersion": "80-06-70/00", + "communicationModes": [ + { + "communicationMode": "lanConnectionMode" + } + ], + "isSecure": False +} +GET_ALL_VOLUMES = { + "data": [ + { + "ldevId": 0, + "clprId": 0, + "emulationType": "OPEN-V", + "byteFormatCapacity": "2.57 T", + "blockCapacity": 5538459648, + "composingPoolId": 1, + "attributes": [ + "POOL" + ], + "raidLevel": "RAID5", + "raidType": "3D+1P", + "numOfParityGroups": 1, + "parityGroupIds": [ + "5-1" + ], + "driveType": "SLB5E-M1R9SS", + "driveByteFormatCapacity": "1.74 T", + "driveBlockCapacity": 3750000030, + "status": "NML", + "mpBladeId": 1, + "ssid": "0004", + "resourceGroupId": 0, + "isAluaEnabled": False + }, + { + "ldevId": 1, + "clprId": 0, + "emulationType": "OPEN-V", + "byteFormatCapacity": "2.57 T", + "blockCapacity": 5538459648, + "composingPoolId": 1, + "attributes": [ + "POOL" + ], + "raidLevel": "RAID5", + "raidType": "3D+1P", + "numOfParityGroups": 1, + "parityGroupIds": [ + "5-1" + ], + "driveType": "SLB5E-M1R9SS", + "driveByteFormatCapacity": "1.74 T", + "driveBlockCapacity": 3750000030, + "status": "NML", + "mpBladeId": 4, + "ssid": "0004", + "resourceGroupId": 0, + "isAluaEnabled": False + }, + { + "ldevId": 2, + "clprId": 0, + "emulationType": "OPEN-V-CVS", + "byteFormatCapacity": "500.00 G", + "blockCapacity": 1048576000, + "numOfPorts": 4, + "ports": [ + { + "portId": "CL3-A", + "hostGroupNumber": 1, + "hostGroupName": "3A84", + "lun": 0 + }, + { + "portId": "CL2-B", + "hostGroupNumber": 0, + "hostGroupName": "2B-G00", + "lun": 0 + }, + { + "portId": "CL4-A", + "hostGroupNumber": 1, + "hostGroupName": "75_197b", + "lun": 0 + }, + { + "portId": "CL2-A", + "hostGroupNumber": 1, + "hostGroupName": "198_126b", + "lun": 0 + } + ], + "attributes": [ + "CVS", + "HDP" + ], + "label": "hjw_test_lun0", + "status": "NML", + "mpBladeId": 0, + "ssid": "0004", + "poolId": 1, + "numOfUsedBlock": 1048621056, + "isFullAllocationEnabled": False, + "resourceGroupId": 0, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "isAluaEnabled": False + }, + { + "ldevId": 99, + "clprId": 0, + "emulationType": "OPEN-V-CVS", + "byteFormatCapacity": "500.00 G", + "blockCapacity": 1048576000, + "attributes": [ + "CVS", + "HDP" + ], + "label": "AIX_performance_test_zj", + "status": "NML", + "mpBladeId": 5, + "ssid": "0004", + "poolId": 0, + "numOfUsedBlock": 1048621056, + "isFullAllocationEnabled": False, + "resourceGroupId": 0, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "isAluaEnabled": False + } + ] +} +TRAP_INFO = { + "1.3.6.1.2.1.1.3.0": "0", + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.116.3.11.4.1.1.0.1', + '1.3.6.1.4.1.116.5.11.4.2.3': 'eeeeeeeee', + '1.3.6.1.4.1.116.5.11.4.2.7': 'ddddddd', + '1.3.6.1.4.1.116.5.11.4.2.6': '14:10:10', + '1.3.6.1.4.1.116.5.11.4.2.5': '2020/11/20', + '1.3.6.1.4.1.116.5.11.4.2.2': ' System Version = 7.4.0.11 ', + '1.3.6.1.4.1.116.5.11.4.2.4': '# FRU = None ' +} +ALERT_INFO = [ + { + 'location': "test", + 'alertId': '223232', + 'alertIndex': '1111111', + 'errorDetail': 'test alert', + 'errorSection': 'someting wrong', + 'occurenceTime': '2020-11-20T10:10:10', + 'errorLevel': 'Serious' + } +] + + +def create_driver(): + kwargs = ACCESS_INFO + + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'post', return_value=m): + m.raise_for_status.return_value = 201 + m.json.return_value = { + "token": "97c13b8082444b36bc2103026205fa64", + "sessionId": 9 + } + return HitachiVspDriver(**kwargs) + + +class TestHitachiVspStorStorageDriver(TestCase): + driver = create_driver() + + def test_initrest(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = GET_DEVICE_ID + kwargs = ACCESS_INFO + rh = RestHandler(**kwargs) + rh.get_device_id() + + def test_get_storage(self): + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + RestHandler.get_rest_info = mock.Mock( + side_effect=[GET_ALL_POOLS, GET_SPECIFIC_STORAGE]) + self.driver.get_storage(context) + + def test_list_storage_pools(self): + RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_POOLS) + self.driver.list_storage_pools(context) + + def test_list_volumes(self): + RestHandler.get_rest_info = mock.Mock(return_value=GET_ALL_VOLUMES) + self.driver.list_volumes(context) + + def test_list_alerts(self): + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + RestHandler.get_rest_info = mock.Mock(return_value=ALERT_INFO) + self.driver.list_alerts(context) + + def test_parse_queried_alerts(self): + alert_list = [] + HitachiVspDriver.parse_queried_alerts(ALERT_INFO, alert_list) + + def test_parse_alert(self): + self.driver.parse_alert(context, TRAP_INFO) + + def test_rest_close_connection(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'delete', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = None + re = self.driver.close_connection() + self.assertIsNone(re) + + def test_rest_handler_cal(self): + m = mock.MagicMock(status_code=403) + with self.assertRaises(Exception) as exc: + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 403 + m.json.return_value = None + url = 'http://test' + self.driver.rest_handler.call(url, '', 'GET') + self.assertIn('Invalid ip or port', str(exc.exception)) + + def test_reset_connection(self): + RestHandler.logout = mock.Mock(return_value={}) + RestHandler.get_system_info = mock.Mock(return_value=GET_DEVICE_ID) + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'post', return_value=m): + m.raise_for_status.return_value = 201 + m.json.return_value = { + "token": "97c13b8082444b36bc2103026205fa64", + "sessionId": 9 + } + kwargs = ACCESS_INFO + re = self.driver.reset_connection(context, **kwargs) + self.assertIsNone(re) + + def test_err_storage_pools_err(self): + with self.assertRaises(Exception) as exc: + self.driver.list_storage_pools(context) + self.assertIn('Invalid ip or port', + str(exc.exception)) + + def test_err_volumes(self): + with self.assertRaises(Exception) as exc: + self.driver.list_volumes(context) + self.assertIn('Invalid ip or port', + str(exc.exception)) + + def test_list_volumes_call(self): + m = mock.MagicMock(status_code=200) + with mock.patch.object(Session, 'get', return_value=m): + m.raise_for_status.return_value = 200 + m.json.return_value = GET_ALL_VOLUMES + self.driver.list_volumes(context) + + def test_add_trap_config(self): + self.driver.add_trap_config(context, None) + + def test_remove_trap_config(self): + self.driver.remove_trap_config(context, None) + + def test_clear_alert(self): + self.driver.clear_alert(context, None) diff --git a/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py b/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py index a77f770f4..933f5cc91 100644 --- a/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py +++ b/delfin/tests/unit/drivers/hpe/hpe_3par/test_hpe_3parstor.py @@ -114,9 +114,9 @@ def test_d_get_storage(self): 'serial_number': '1307327', 'firmware_version': '3.1.2.484', 'location': None, - 'total_capacity': 11300595826688, + 'total_capacity': 7793486594048, 'raw_capacity': 9594956939264, - 'used_capacity': 9594956939264, + 'used_capacity': 6087847706624, 'free_capacity': 1705638887424 } @@ -577,26 +577,12 @@ def test_h_parse_alert(self): # Verify that all other fields are matching self.assertDictEqual(expected_alert_model, alert_model) - def test_i_clear_alert(self): + def test_clear_alert(self): driver = create_driver() - alert = {'storage_id': 'abcd-1234-56789', - 'storage_name': 'storage1', 'vendor': 'fake vendor', - 'model': 'fake model', - 'hwIsmReportingAlarmLocationInfo': 'location1', - 'hwIsmReportingAlarmFaultTitle': 'Trap Test Alarm', - 'hwIsmReportingAlarmFaultType': 'equipmentFault', - 'hwIsmReportingAlarmFaultLevel': 'criticalAlarm', - 'hwIsmReportingAlarmAlarmID': '4294967294', - 'hwIsmReportingAlarmSerialNo': '4294967295', - 'hwIsmReportingAlarmAdditionInfo': 'This is just for ' - 'testing.Please ' - 'ignore it', - 'hwIsmReportingAlarmLocationAlarmID': '230584300921369', - 'hwIsmReportingAlarmFaultTime': '2020-6-25,1:42:26.0' - } + alert_id = '230584300921369' with self.assertRaises(Exception) as exc: - driver.clear_alert(context, alert) + driver.clear_alert(context, alert_id) self.assertIn('Exception in SSH protocol', str(exc.exception)) """ diff --git a/delfin/tests/unit/drivers/ibm/__init__.py b/delfin/tests/unit/drivers/ibm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py b/delfin/tests/unit/drivers/ibm/storwize_svc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py new file mode 100644 index 000000000..864721450 --- /dev/null +++ b/delfin/tests/unit/drivers/ibm/storwize_svc/test_ibm_storwize_svc.py @@ -0,0 +1,423 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from unittest import TestCase, mock + +import paramiko + +from delfin import context +from delfin.drivers.ibm.storwize_svc.ssh_handler import SSHHandler +from delfin.drivers.ibm.storwize_svc.storwize_svc import StorwizeSVCDriver +from delfin.drivers.utils.ssh_client import SSHPool + + +class Request: + def __init__(self): + self.environ = {'delfin.context': context.RequestContext()} + pass + + +ACCESS_INFO = { + "storage_id": "12345", + "vendor": "hpe", + "model": "3par", + "rest": { + "host": "10.0.0.1", + "port": 8443, + "username": "user", + "password": "pass" + }, + "ssh": { + "host": "110.143.132.231", + "port": 22, + "username": "user", + "password": "pass", + "pub_key": "ddddddddddddddddddddddddd" + } +} + +system_info = """id 00000200A1207E1F +name Cluster_192.168.70.125 +location local +partnership +total_mdisk_capacity 8.1TB +space_in_mdisk_grps 8.1TB +space_allocated_to_vdisks 5.06TB +total_free_space 3.1TB +total_vdiskcopy_capacity 5.51TB +total_used_capacity 5.05TB +total_overallocation 67 +total_vdisk_capacity 5.51TB +total_allocated_extent_capacity 5.07TB +statistics_status on +statistics_frequency 5 +cluster_locale en_US +time_zone 246 Asia/Shanghai +code_level 7.4.0.11 (build 103.29.1609070000) +console_IP 51.10.58.200:443 +id_alias 00000200A1007E1F +gm_link_tolerance 300 +gm_inter_cluster_delay_simulation 0 +gm_intra_cluster_delay_simulation 0 +gm_max_host_delay 5 +email_reply +email_contact +email_contact_primary +email_contact_alternate +email_contact_location +email_contact2 +email_contact2_primary +email_contact2_alternate +email_state stopped +inventory_mail_interval 0 +cluster_ntp_IP_address +cluster_isns_IP_address +iscsi_auth_method none +iscsi_chap_secret +auth_service_configured no +auth_service_enabled no +auth_service_url +auth_service_user_name +auth_service_pwd_set no +auth_service_cert_set no +auth_service_type tip +relationship_bandwidth_limit 25 +tier ssd +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier enterprise +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier nearline +tier_capacity 8.13TB +tier_free_capacity 3.06TB +has_nas_key no +layer storage +rc_buffer_size 48 +compression_active no +compression_virtual_capacity 0.00MB +compression_compressed_capacity 0.00MB +compression_uncompressed_capacity 0.00MB +cache_prefetch on +email_organization +email_machine_address +email_machine_city +email_machine_state XX +email_machine_zip +email_machine_country +total_drive_raw_capacity 10.92TB +compression_destage_mode off +local_fc_port_mask 1111111111111111111111111111111 +partner_fc_port_mask 11111111111111111111111111111 +high_temp_mode off +topology standard +topology_status +rc_auth_method none +vdisk_protection_time 15 +vdisk_protection_enabled no +product_name IBM Storwize V7000 +max_replication_delay 0 +partnership_exclusion_threshold 315 +""" + +enclosure_info = """id:status:type:managed:IO_id:IO_group_name:product_MTM +1:online:control:yes:0:io_grp0:2076-124:78N16G4:2:2:2:2:24:0:0 +""" + +pools_info = """id name status mdisk_count vdisk_count capacity +1 mdiskgrp0 online 1 101 8.13TB 1024 3.06TB +""" + +pool_info = """id 1 +name mdiskgrp0 +status online +mdisk_count 1 +vdisk_count 101 +capacity 8.13TB +extent_size 1024 +free_capacity 3.06TB +virtual_capacity 5.51TB +used_capacity 5.05TB +real_capacity 5.06TB +overallocation 67 +warning 80 +easy_tier auto +easy_tier_status balanced +tier ssd +tier_mdisk_count 0 +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier enterprise +tier_mdisk_count 0 +tier_capacity 0.00MB +tier_free_capacity 0.00MB +tier nearline +tier_mdisk_count 1 +tier_capacity 8.13TB +tier_free_capacity 3.06TB +compression_active no +compression_virtual_capacity 0.00MB +compression_compressed_capacity 0.00MB +compression_uncompressed_capacity 0.00MB +site_id +site_name +parent_mdisk_grp_id 1 +parent_mdisk_grp_name mdiskgrp0 +child_mdisk_grp_count 0 +child_mdisk_grp_capacity 0.00MB +type parent +encrypt no +""" + +volumes_info = """id name IO_group_id IO_group_name status +0 V7000LUN_Mig 0 io_grp0 online 1 +""" + +volume_info = """id:0 +name:V7000LUN_Mig +IO_group_id:0 +IO_group_name:io_grp0 +status:online +mdisk_grp_id:1 +mdisk_grp_name:mdiskgrp0 +capacity:50.00GB +type:striped +formatted:no +mdisk_id: +mdisk_name: +FC_id: +FC_name: +RC_id: +RC_name: +vdisk_UID:60050768028401F87C00000000000000 +throttling:0 +preferred_node_id:3 +fast_write_state:empty +cache:readwrite +udid: +fc_map_count:0 +sync_rate:50 +copy_count:1 +se_copy_count:0 +filesystem: +mirror_write_priority:latency +RC_change:no +compressed_copy_count:0 +access_IO_group_count:1 +last_access_time:190531130236 +parent_mdisk_grp_id:1 +parent_mdisk_grp_name:mdiskgrp0 + +copy_id:0 +status:online +sync:yes +primary:yes +mdisk_grp_id:1 +mdisk_grp_name:mdiskgrp0 +type:striped +mdisk_id: +mdisk_name: +fast_write_state:empty +used_capacity:50.00GB +real_capacity:50.00GB +free_capacity:0.00MB +overallocation:100 +autoexpand: +warning: +grainsize: +se_copy:no +easy_tier:on +easy_tier_status:balanced +tier:ssd +tier_capacity:0.00MB +tier:enterprise +tier_capacity:0.00MB +tier:nearline +tier_capacity:50.00GB +compressed_copy:no +uncompressed_used_capacity:50.00GB +parent_mdisk_grp_id:1 +parent_mdisk_grp_name:mdiskgrp0 +""" + +alerts_info = """sequence_number last_timestamp object_type object_id +101 201111165750 node 3 node1 +""" + +alert_info = """sequence_number 101 +first_timestamp 201111165750 +first_timestamp_epoch 1605085070 +last_timestamp 201111165750 +last_timestamp_epoch 1605085070 +object_type node +object_id 3 +object_name node1 +copy_id +reporting_node_id 3 +reporting_node_name node1 +root_sequence_number +event_count 1 +status message +fixed no +auto_fixed no +notification_type informational +event_id 980221 +event_id_text Error log cleared +error_code +error_code_text +machine_type 2076124 +serial_number 78N16G4 +FRU None +fixed_timestamp +fixed_timestamp_epoch +callhome_type none +sense1 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense2 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense4 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense6 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense7 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +sense8 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +""" + +trap_info = { + '1.3.6.1.2.1.1.3.0': '0', + '1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.2.6.190.3', + '1.3.6.1.4.1.2.6.190.4.1': '# Machine Type = 2076124', + '1.3.6.1.4.1.2.6.190.4.2': '# Serial Number = 78N16G4', + '1.3.6.1.4.1.2.6.190.4.3': '# Error ID = 981004 : FC discovery occurred, ' + 'no configuration changes were detected', + '1.3.6.1.4.1.2.6.190.4.4': '# Error Code = ', + '1.3.6.1.4.1.2.6.190.4.5': '# System Version = 7.4.0.11 (build 103.29.' + '1609070000)', + '1.3.6.1.4.1.2.6.190.4.6': '# FRU = None ', + '1.3.6.1.4.1.2.6.190.4.7': '# System Name = Cluster_192.168.70.125', + '1.3.6.1.4.1.2.6.190.4.8': '# Node ID = 3', + '1.3.6.1.4.1.2.6.190.4.9': '# Error Sequence Number = 165', + '1.3.6.1.4.1.2.6.190.4.10': '# Timestamp = Tue Nov 10 09:08:27 2020', + '1.3.6.1.4.1.2.6.190.4.11': '# Object Type = cluster', + '1.3.6.1.4.1.2.6.190.4.12': '# Object ID = 0', + '1.3.6.1.4.1.2.6.190.4.17': '# Object Name = Cluster_192.168.70.125', + '1.3.6.1.4.1.2.6.190.4.15': '# Copy ID = ', + '1.3.6.1.4.1.2.6.190.4.16': '# Machine Part Number = ', + '1.3.6.1.4.1.2.6.190.4.13': '# Additional Data (0 -> 63) = 01080000018A0', + '1.3.6.1.4.1.2.6.190.4.14': '# Additional Data (64 -> 127) = 00000000000', + 'transport_address': '51.10.58.200', + 'storage_id': '4992d7f5-4f73-4123-a27b-6e27889f3852' +} + + +def create_driver(): + + SSHHandler.login = mock.Mock( + return_value={""}) + + return StorwizeSVCDriver(**ACCESS_INFO) + + +class TestStorwizeSvcStorageDriver(TestCase): + driver = create_driver() + + def test_init(self): + SSHHandler.login = mock.Mock( + return_value={""}) + StorwizeSVCDriver(**ACCESS_INFO) + + def test_list_storage(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[system_info, enclosure_info]) + self.driver.get_storage(context) + + def test_list_storage_pools(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[pools_info, pool_info]) + self.driver.list_storage_pools(context) + + def test_list_volumes(self): + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[volumes_info, volume_info]) + self.driver.list_volumes(context) + + def test_list_alerts(self): + query_para = { + "begin_time": 160508506000, + "end_time": 160508507000 + } + SSHPool.get = mock.Mock( + return_value={paramiko.SSHClient()}) + SSHHandler.do_exec = mock.Mock( + side_effect=[alerts_info, alert_info]) + self.driver.list_alerts(context, query_para) + + def test_list_storage_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.get_storage(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_list_pool_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.list_storage_pools(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_list_volume_with_error(self): + with self.assertRaises(Exception) as exc: + self.driver.list_volumes(context) + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_init_ssh_exec(self): + with self.assertRaises(Exception) as exc: + ssh = paramiko.SSHClient() + SSHHandler.do_exec('lssystem', ssh) + self.assertIn('', str(exc.exception)) + + def test_ssh_pool_create(self): + with self.assertRaises(Exception) as exc: + kwargs = ACCESS_INFO + ssh_pool = SSHPool(**kwargs) + ssh_pool.create() + self.assertIn('Exception in SSH protocol negotiation or logic', + str(exc.exception)) + + def test_ssh_pool_put(self): + ssh_pool = SSHPool(**ACCESS_INFO) + ssh = paramiko.SSHClient() + ssh_pool.put(ssh) + ssh_pool.remove(ssh) + + def test_parse_alert(self): + self.driver.parse_alert(context, trap_info) + + def test_reset_connection(self): + self.driver.reset_connection(context, **ACCESS_INFO) + + def test_add_trap_config(self): + trap_config = '' + self.driver.add_trap_config(context, trap_config) + + def test_remove_trap_config(self): + trap_config = '' + self.driver.remove_trap_config(context, trap_config) + + def test_clear_alert(self): + alert = '' + self.driver.clear_alert(context, alert) diff --git a/delfin/tests/unit/drivers/test_api.py b/delfin/tests/unit/drivers/test_api.py index 9c73ac6e8..7e9c7896c 100644 --- a/delfin/tests/unit/drivers/test_api.py +++ b/delfin/tests/unit/drivers/test_api.py @@ -202,8 +202,10 @@ def test_remove_storage(self, mock_storage, mock_access_info, api.discover_storage(context, ACCESS_INFO) storage_id = '12345' + + # Verify that driver instance not added to factory driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) + self.assertIsNone(driver) api.remove_storage(context, storage_id) @@ -211,154 +213,94 @@ def test_remove_storage(self, mock_storage, mock_access_info, self.assertIsNone(driver) @mock.patch.object(FakeStorageDriver, 'get_storage') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_get_storage(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_get_storage(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() storage = copy.deepcopy(STORAGE) storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage mock_fake.return_value = storage api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.get_storage(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called() @mock.patch.object(FakeStorageDriver, 'list_storage_pools') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_storage_pools(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_storage_pools(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.list_storage_pools(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_volumes') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_volumes(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_volumes(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) - storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.list_volumes(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'list_controllers') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_controllers(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_controllers(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) - storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.list_controllers(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() - @mock.patch.object(FakeStorageDriver, 'list_ports') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_ports(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch.object(FakeStorageDriver, 'list_disks') + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_disks(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) - storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) - api.list_ports(context, storage_id) + + api.list_disks(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() - @mock.patch.object(FakeStorageDriver, 'list_disks') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_list_disks(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None - mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + @mock.patch.object(FakeStorageDriver, 'list_ports') + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_list_ports(self, driver_manager, mock_fake): + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) - storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) - api.list_disks(context, storage_id) + api.list_ports(context, storage_id) + driver_manager.assert_called_once() mock_fake.assert_called_once() @mock.patch.object(FakeStorageDriver, 'parse_alert') - @mock.patch('delfin.db.storage_create') - @mock.patch('delfin.db.access_info_create') - @mock.patch('delfin.db.storage_get_all') - def test_parse_alert(self, mock_storage, mock_access_info, - mock_storage_create, mock_fake): - storage = copy.deepcopy(STORAGE) - storage['id'] = '12345' - mock_storage.return_value = None + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + @mock.patch('delfin.db.access_info_get') + def test_parse_alert(self, mock_access_info, + driver_manager, mock_fake): mock_access_info.return_value = ACCESS_INFO - mock_storage_create.return_value = storage + driver_manager.return_value = FakeStorageDriver() mock_fake.return_value = [] api = API() - api.discover_storage(context, ACCESS_INFO) storage_id = '12345' - driver = api.driver_manager.driver_factory.get(storage_id, None) - self.assertIsNotNone(driver) api.parse_alert(context, storage_id, 'alert') + mock_access_info.assert_called_once() + driver_manager.assert_called_once() mock_fake.assert_called_once() diff --git a/setup.py b/setup.py index d74902039..205b8601f 100644 --- a/setup.py +++ b/setup.py @@ -39,8 +39,10 @@ 'delfin.storage.drivers': [ 'fake_storage fake_driver = delfin.drivers.fake_storage:FakeStorageDriver', 'dellemc vmax = delfin.drivers.dell_emc.vmax.vmax:VMAXStorageDriver', + 'hitachi vsp = delfin.drivers.hitachi.vsp.vsp_stor:HitachiVspDriver', 'hpe 3par = delfin.drivers.hpe.hpe_3par.hpe_3parstor:Hpe3parStorDriver', - 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver' + 'huawei oceanstor = delfin.drivers.huawei.oceanstor.oceanstor:OceanStorDriver', + 'ibm storwize_svc = delfin.drivers.ibm.storwize_svc.storwize_svc:StorwizeSVCDriver' ] }, )