From 094780de8acbf5d053c4658dac58692a2ccacf58 Mon Sep 17 00:00:00 2001 From: chenxicqu Date: Mon, 27 Jun 2022 17:53:18 +0800 Subject: [PATCH] 1. Bug fix about alarm mangement. 2. Refactor some classes and constants. --- delfin/alert_manager/alert_processor.py | 2 +- delfin/alert_manager/snmp_validator.py | 37 ++++- delfin/alert_manager/trap_receiver.py | 125 +++++++++----- delfin/api/v1/storages.py | 4 +- delfin/common/constants.py | 119 ++++++------- delfin/drivers/utils/rest_client.py | 14 +- delfin/task_manager/tasks/alerts.py | 6 +- delfin/task_manager/tasks/resources.py | 157 +++++------------- delfin/tests/unit/alert_manager/fakes.py | 3 +- .../unit/alert_manager/test_snmp_validator.py | 9 +- .../unit/alert_manager/test_trap_receiver.py | 11 +- .../tests/unit/task_manager/test_resources.py | 29 +--- 12 files changed, 248 insertions(+), 268 deletions(-) diff --git a/delfin/alert_manager/alert_processor.py b/delfin/alert_manager/alert_processor.py index b9e38c53d..13ca3ee80 100644 --- a/delfin/alert_manager/alert_processor.py +++ b/delfin/alert_manager/alert_processor.py @@ -52,7 +52,7 @@ def process_alert_info(self, alert): ctxt, storage, alert_model) alert_util.fill_storage_attributes(alert_model, storage) except exception.IncompleteTrapInformation as e: - LOG.warn(e) + LOG.warning(e) threading.Thread(target=self.sync_storage_alert, args=(ctxt, alert['storage_id'])).start() except exception.AlertSourceNotFound: diff --git a/delfin/alert_manager/snmp_validator.py b/delfin/alert_manager/snmp_validator.py index f4564b238..6dd845c88 100644 --- a/delfin/alert_manager/snmp_validator.py +++ b/delfin/alert_manager/snmp_validator.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import binascii +import copy import six from oslo_config import cfg from oslo_log import log +from oslo_utils import encodeutils from pyasn1.type.univ import OctetString from pysnmp.entity.rfc3413.oneliner import cmdgen @@ -39,8 +41,21 @@ def __init__(self): def validate(self, ctxt, alert_source): engine_id = alert_source.get('engine_id') try: - alert_source = self.validate_connectivity(alert_source) - + hosts = alert_source['host'].split(',') + temp_alert_source = copy.deepcopy(alert_source) + # Sets a value to raise a SNMPConnectionFailed when multiple + # alarm sources fail to be verified + connection_times = 0 + for host in hosts: + temp_alert_source['host'] = host + try: + connection_times += 1 + alert_source = \ + self.validate_connectivity(ctxt, temp_alert_source) + break + except Exception as e: + if connection_times == len(hosts): + raise e # If protocol is snmpv3, the snmp_validator will update # engine id if engine id is empty. Therefore, engine id # should be saved in database. @@ -61,7 +76,7 @@ def validate(self, ctxt, alert_source): LOG.error("Failed to check snmp config. Reason: %s", msg) @staticmethod - def validate_connectivity(alert_source): + def validate_connectivity(ctxt, alert_source): # Fill optional parameters with default values if not set in input if not alert_source.get('port'): alert_source['port'] = constants.DEFAULT_SNMP_CONNECT_PORT @@ -78,6 +93,12 @@ def validate_connectivity(alert_source): if CONF.snmp_validation_enabled is False: return alert_source + storage_id = alert_source.get('storage_id') + access_info = db.access_info_get(ctxt, storage_id) + access_info = dict(access_info) + if access_info.get('model') not in constants.SNMP_SUPPORTED_MODELS: + return alert_source + cmd_gen = cmdgen.CommandGenerator() version = alert_source.get('version') @@ -103,10 +124,12 @@ def validate_connectivity(alert_source): ) auth_key = None if alert_source['auth_key']: - auth_key = cryptor.decode(alert_source['auth_key']) + auth_key = encodeutils.to_utf8( + cryptor.decode(alert_source['auth_key'])) privacy_key = None if alert_source['privacy_key']: - privacy_key = cryptor.decode(alert_source['privacy_key']) + privacy_key = encodeutils.to_utf8( + cryptor.decode(alert_source['privacy_key'])) auth_protocol = None privacy_protocol = None if alert_source['auth_protocol']: @@ -135,8 +158,8 @@ def validate_connectivity(alert_source): alert_source['engine_id'] = binascii.hexlify( engine_id.asOctets()).decode() else: - community_string = cryptor.decode( - alert_source['community_string']) + community_string = encodeutils.to_utf8( + cryptor.decode(alert_source['community_string'])) error_indication, __, __, __ = cmd_gen.getCmd( cmdgen.CommunityData( community_string, diff --git a/delfin/alert_manager/trap_receiver.py b/delfin/alert_manager/trap_receiver.py index 70fc51d1e..2f9d15fbd 100644 --- a/delfin/alert_manager/trap_receiver.py +++ b/delfin/alert_manager/trap_receiver.py @@ -15,11 +15,13 @@ import six from oslo_log import log from oslo_service import periodic_task +from oslo_utils import encodeutils from pysnmp.carrier.asyncore.dgram import udp from pysnmp.entity import engine, config from pysnmp.entity.rfc3413 import ntfrcv from pysnmp.proto.api import v2c from pysnmp.smi import builder, view +from retrying import retry from delfin import context, cryptor from delfin import db @@ -61,41 +63,53 @@ def sync_snmp_config(self, ctxt, snmp_config_to_del=None, self._add_snmp_config(ctxt, snmp_config_to_add) def _add_snmp_config(self, ctxt, new_config): - LOG.info("Start to add snmp trap config.") storage_id = new_config.get("storage_id") - version_int = self._get_snmp_version_int(ctxt, - new_config.get("version")) - if version_int == constants.SNMP_V2_INT or \ - version_int == constants.SNMP_V1_INT: - community_string = cryptor.decode( - new_config.get("community_string")) - community_index = self._get_community_index(storage_id) - config.addV1System(self.snmp_engine, community_index, - community_string, contextName=community_string) - else: - username = new_config.get("username") - engine_id = new_config.get("engine_id") - if engine_id: - engine_id = v2c.OctetString(hexValue=engine_id) - - auth_key = new_config.get("auth_key") - auth_protocol = new_config.get("auth_protocol") - privacy_key = new_config.get("privacy_key") - privacy_protocol = new_config.get("privacy_protocol") - if auth_key: - auth_key = cryptor.decode(auth_key) - if privacy_key: - privacy_key = cryptor.decode(privacy_key) - config.addV3User( - self.snmp_engine, - userName=username, - authKey=auth_key, - privKey=privacy_key, - authProtocol=self._get_usm_auth_protocol(ctxt, - auth_protocol), - privProtocol=self._get_usm_priv_protocol(ctxt, - privacy_protocol), - securityEngineId=engine_id) + LOG.info("Start to add snmp trap config for storage: %s", + storage_id) + try: + version_int = self._get_snmp_version_int(ctxt, + new_config.get("version")) + if version_int == constants.SNMP_V2_INT or \ + version_int == constants.SNMP_V1_INT: + community_string = cryptor.decode( + new_config.get("community_string")) + community_string = encodeutils.to_utf8(community_string) + community_index = self._get_community_index(storage_id) + config.addV1System(self.snmp_engine, community_index, + community_string, + contextName=community_string) + else: + username = new_config.get("username") + engine_id = new_config.get("engine_id") + if engine_id: + engine_id = v2c.OctetString(hexValue=engine_id) + + auth_key = new_config.get("auth_key") + auth_protocol = new_config.get("auth_protocol") + privacy_key = new_config.get("privacy_key") + privacy_protocol = new_config.get("privacy_protocol") + if auth_key: + auth_key = encodeutils.to_utf8(cryptor.decode(auth_key)) + if privacy_key: + privacy_key = encodeutils.to_utf8( + cryptor.decode(privacy_key)) + config.addV3User( + self.snmp_engine, + userName=username, + authKey=auth_key, + privKey=privacy_key, + authProtocol=self._get_usm_auth_protocol(ctxt, + auth_protocol), + privProtocol=self._get_usm_priv_protocol(ctxt, + privacy_protocol), + securityEngineId=engine_id) + LOG.info("Add snmp trap config for storage: %s successfully.", + storage_id) + except Exception as e: + msg = six.text_type(e) + LOG.error("Failed to add snmp trap config for storage: %s. " + "Reason: %s", storage_id, msg) + raise e def _delete_snmp_config(self, ctxt, snmp_config): LOG.info("Start to remove snmp trap config.") @@ -168,22 +182,49 @@ def _add_transport(self): udp.UdpTransport().openServerMode( (self.trap_receiver_address, int(self.trap_receiver_port))) ) - except Exception: - raise ValueError("Port binding failed: Port is in use.") + except Exception as e: + LOG.error('Failed to add transport, error is %s' + % six.text_type(e)) + raise exception.DelfinException(message=six.text_type(e)) @staticmethod def _get_alert_source_by_host(source_ip): """Gets alert source for given source ip address.""" - filters = {'host': source_ip} + filters = {'host~': source_ip} ctxt = context.RequestContext() # Using the known filter and db exceptions are handled by api - alert_source = db.alert_source_get_all(ctxt, filters=filters) - if not alert_source: + alert_sources = db.alert_source_get_all(ctxt, filters=filters) + if not alert_sources: raise exception.AlertSourceNotFoundWithHost(source_ip) - # Return first configured source that can handle the trap - return alert_source[0] + # This is to make sure unique host is configured each alert source + unique_alert_source = None + if len(alert_sources) > 1: + # Clear invalid alert_source + for alert_source in alert_sources: + try: + db.storage_get(ctxt, alert_source['storage_id']) + except exception.StorageNotFound: + LOG.warning('Found redundancy alert source for storage %s' + % alert_source['storage_id']) + try: + db.alert_source_delete( + ctxt, alert_source['storage_id']) + except Exception as e: + LOG.warning('Delete the invalid alert source failed, ' + 'reason is %s' % six.text_type(e)) + else: + unique_alert_source = alert_source + else: + unique_alert_source = alert_sources[0] + + if unique_alert_source is None: + msg = (_("Failed to get unique alert source with host %s.") + % source_ip) + raise exception.InvalidResults(msg) + + return unique_alert_source def _cb_fun(self, state_reference, context_engine_id, context_name, var_binds, cb_ctx): @@ -256,6 +297,8 @@ def _load_snmp_config(self): if len(alert_sources) < limit: finished = True + @retry(stop_max_attempt_number=180, wait_random_min=4000, + wait_random_max=6000) def start(self): """Starts the snmp trap receiver with necessary prerequisites.""" snmp_engine = engine.SnmpEngine() diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index 19ab666eb..21025c30d 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -156,8 +156,8 @@ def sync_all(self, req): try: _set_synced_if_ok(ctxt, storage['id'], resource_count) except exception.InvalidInput as e: - LOG.warn('Can not start new sync task for %s, reason is %s' - % (storage['id'], e.msg)) + LOG.warning('Can not start new sync task for %s, reason is %s' + % (storage['id'], e.msg)) continue else: for subclass in \ diff --git a/delfin/common/constants.py b/delfin/common/constants.py index 329bce5ea..55f9bf91c 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -51,16 +51,6 @@ class VolumeStatus(object): ALL = (AVAILABLE, ERROR) -class ControllerStatus(object): - NORMAL = 'normal' - OFFLINE = 'offline' - FAULT = 'fault' - DEGRADED = 'degraded' - UNKNOWN = 'unknown' - - ALL = (NORMAL, OFFLINE, FAULT, DEGRADED, UNKNOWN) - - class StorageType(object): BLOCK = 'block' FILE = 'file' @@ -242,54 +232,6 @@ class ShareProtocol(object): ALL = (CIFS, NFS, FTP, HDFS) -class HostStatus(object): - NORMAL = 'normal' - OFFLINE = 'offline' - ABNORMAL = 'abnormal' - DEGRADED = 'degraded' - - ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED) - - -class HostOSTypes(object): - LINUX = 'Linux' - WINDOWS = 'Windows' - SOLARIS = 'Solaris' - HP_UX = 'HP-UX' - AIX = 'AIX' - XEN_SERVER = 'XenServer' - VMWARE_ESX = 'VMware ESX' - LINUX_VIS = 'LINUX_VIS' - WINDOWS_SERVER_2012 = 'Windows Server 2012' - ORACLE_VM = 'Oracle VM' - OPEN_VMS = 'Open VMS' - MAC_OS = 'Mac OS' - UNKNOWN = 'Unknown' - - ALL = (LINUX, WINDOWS, SOLARIS, HP_UX, AIX, XEN_SERVER, VMWARE_ESX, - LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, MAC_OS, - UNKNOWN) - - -class InitiatorStatus(object): - ONLINE = 'online' - OFFLINE = 'offline' - UNKNOWN = 'unknown' - - ALL = (ONLINE, OFFLINE, UNKNOWN) - - -class InitiatorType(object): - FC = 'fc' - ISCSI = 'iscsi' - NVME_OVER_ROCE = 'roce' - SAS = 'sas' - NVME_OVER_FABRIC = 'nvme-of' - UNKNOWN = 'unknown' - - ALL = (FC, ISCSI, NVME_OVER_ROCE, SAS, NVME_OVER_FABRIC, UNKNOWN) - - # Enumerations for alert severity class Severity(object): FATAL = 'Fatal' @@ -315,6 +257,27 @@ class ClearType(object): MANUAL = 'Manual' +class ControllerStatus(object): + NORMAL = 'normal' + OFFLINE = 'offline' + FAULT = 'fault' + DEGRADED = 'degraded' + UNKNOWN = 'unknown' + + ALL = (NORMAL, OFFLINE, FAULT, DEGRADED, UNKNOWN) + + +class InitiatorType(object): + FC = 'fc' + ISCSI = 'iscsi' + NVME_OVER_ROCE = 'roce' + SAS = 'sas' + NVME_OVER_FABRIC = 'nvme-of' + UNKNOWN = 'unknown' + + ALL = (FC, ISCSI, NVME_OVER_ROCE, SAS, NVME_OVER_FABRIC, UNKNOWN) + + # Enumerations for alert type based on X.733 Specification class EventType(object): COMMUNICATIONS_ALARM = 'CommunicationsAlarm' @@ -693,3 +656,43 @@ class FileSystemMetric: unit='KB', description='The average size of write IO requests ' 'in KB') + + +SNMP_SUPPORTED_MODELS = ('vsp', '3par', 'cmode', 'msa', 'hnas') + + +class HostStatus(object): + NORMAL = 'normal' + OFFLINE = 'offline' + ABNORMAL = 'abnormal' + DEGRADED = 'degraded' + + ALL = (NORMAL, OFFLINE, ABNORMAL, DEGRADED) + + +class HostOSTypes(object): + LINUX = 'Linux' + WINDOWS = 'Windows' + SOLARIS = 'Solaris' + HP_UX = 'HP-UX' + AIX = 'AIX' + XEN_SERVER = 'XenServer' + VMWARE_ESX = 'VMware ESX' + LINUX_VIS = 'LINUX_VIS' + WINDOWS_SERVER_2012 = 'Windows Server 2012' + ORACLE_VM = 'Oracle VM' + OPEN_VMS = 'Open VMS' + MAC_OS = 'Mac OS' + UNKNOWN = 'Unknown' + + ALL = (LINUX, WINDOWS, SOLARIS, HP_UX, AIX, XEN_SERVER, VMWARE_ESX, + LINUX_VIS, WINDOWS_SERVER_2012, ORACLE_VM, OPEN_VMS, MAC_OS, + UNKNOWN) + + +class InitiatorStatus(object): + ONLINE = 'online' + OFFLINE = 'offline' + UNKNOWN = 'unknown' + + ALL = (ONLINE, OFFLINE, UNKNOWN) diff --git a/delfin/drivers/utils/rest_client.py b/delfin/drivers/utils/rest_client.py index 91c71cefc..e72868eb6 100644 --- a/delfin/drivers/utils/rest_client.py +++ b/delfin/drivers/utils/rest_client.py @@ -85,25 +85,27 @@ def do_call(self, url, data, method, try: res = func(url, **kwargs) except requests.exceptions.ConnectTimeout as ct: - LOG.error('Connect Timeout err: {}'.format(ct)) + LOG.error('Connect Timeout error for url([{}]{}): {}'.format( + method, url, ct)) raise exception.InvalidIpOrPort() except requests.exceptions.ReadTimeout as rt: - LOG.error('Read timed out err: {}'.format(rt)) + LOG.error('Read timed out error for url([{}]{}): {}'.format( + method, url, rt)) raise exception.StorageBackendException(six.text_type(rt)) except requests.exceptions.SSLError as e: - LOG.error('SSLError for %s %s' % (method, url)) err_str = six.text_type(e) + LOG.error('SSLError for url([{}]{}): {}'.format( + method, url, err_str)) if 'certificate verify failed' in err_str: raise exception.SSLCertificateFailed() else: raise exception.SSLHandshakeFailed() except Exception as err: - LOG.exception('Bad response from server: %(url)s.' - ' Error: %(err)s', {'url': url, 'err': err}) + LOG.error('Bad response from server for url([{}]{}): {}'.format( + method, url, err)) if 'WSAETIMEDOUT' in str(err): raise exception.ConnectTimeout() elif 'Failed to establish a new connection' in str(err): - LOG.error('Failed to establish: {}'.format(err)) raise exception.InvalidIpOrPort() elif 'Read timed out' in str(err): raise exception.StorageBackendException(six.text_type(err)) diff --git a/delfin/task_manager/tasks/alerts.py b/delfin/task_manager/tasks/alerts.py index cc38783a4..70e976059 100644 --- a/delfin/task_manager/tasks/alerts.py +++ b/delfin/task_manager/tasks/alerts.py @@ -33,15 +33,15 @@ def __init__(self): def sync_alerts(self, ctx, storage_id, query_para): """ Syncs all alerts from storage side to exporter """ - - LOG.info('Syncing alerts for storage id:{0}'.format(storage_id)) + LOG.info('Syncing alerts for storage id:{0}, query_para: {1}'.format( + storage_id, query_para)) try: storage = db.storage_get(ctx, storage_id) current_alert_list = self.driver_manager.list_alerts(ctx, storage_id, query_para) - if not len(current_alert_list): + if not current_alert_list: # No alerts to sync LOG.info('No alerts to sync from storage device for ' 'storage id:{0}'.format(storage_id)) diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index 725efe104..05f11bd96 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -43,8 +43,8 @@ def _set_synced_after(func, *args, **kwargs): try: storage = db.storage_get(self.context, self.storage_id) except exception.StorageNotFound: - LOG.warn('Storage %s not found when set synced' - % self.storage_id) + LOG.warning('Storage %s not found when set synced' + % self.storage_id) else: # One sync task done, sync status minus 1 # When sync status get to 0 @@ -434,35 +434,21 @@ def sync(self): # Collect the storage host initiator list from driver and database storage_host_initiators = self.driver_api \ .list_storage_host_initiators(self.context, self.storage_id) - db_storage_host_initiators = db.storage_host_initiators_get_all( - self.context, filters={"storage_id": self.storage_id}) - - add_list, update_list, delete_id_list = self._classify_resources( - storage_host_initiators, db_storage_host_initiators, - 'native_storage_host_initiator_id') - - LOG.debug('###StorageHostInitiatorTask for {0}:add={1},delete={2},' - 'update={3}'.format(self.storage_id, - len(add_list), - len(delete_id_list), - len(update_list))) - if delete_id_list: - db.storage_host_initiators_delete(self.context, delete_id_list) - - if update_list: - db.storage_host_initiators_update(self.context, update_list) - - if add_list: - db.storage_host_initiators_create(self.context, add_list) - + if storage_host_initiators: + db.storage_host_initiators_delete_by_storage( + self.context, self.storage_id) + db.storage_host_initiators_create( + self.context, storage_host_initiators) + LOG.info('Building storage host initiator successful for ' + 'storage id:{0}'.format(self.storage_id)) except AttributeError as e: LOG.error(e) except NotImplementedError: # Ignore this exception because driver may not support it. pass except Exception as e: - msg = _('Failed to sync storage host initiators entry in DB: {0}' - .format(e)) + msg = _('Failed to sync storage host initiators entry ' + 'in DB: {0}'.format(e)) LOG.error(msg) else: LOG.info("Syncing storage host initiators successful!!!") @@ -475,58 +461,28 @@ def remove(self): class StorageHostTask(StorageResourceTask): - def __init__(self, context, storage_id): - super(StorageHostTask, self).__init__(context, storage_id) - - @check_deleted() - @set_synced_after() - def sync(self): - """ - :return: - """ - LOG.info('Syncing storage hosts for storage id:{0}' - .format(self.storage_id)) - try: - # Collect the storage hosts list from driver and database - storage_hosts = self.driver_api.list_storage_hosts( - self.context, self.storage_id) - db_storage_hosts = db.storage_hosts_get_all( - self.context, filters={"storage_id": self.storage_id}) + NATIVE_RESOURCE_ID = 'native_storage_host_id' - add_list, update_list, delete_id_list = self._classify_resources( - storage_hosts, db_storage_hosts, 'native_storage_host_id' - ) + def driver_list_resources(self): + return self.driver_api.list_storage_hosts(self.context, + self.storage_id) - LOG.debug('###StorageHostTask for {0}:add={1},delete={2},' - 'update={3}'.format(self.storage_id, - len(add_list), - len(delete_id_list), - len(update_list))) - if delete_id_list: - db.storage_hosts_delete(self.context, delete_id_list) + def db_resource_get_all(self, filters): + return db.storage_hosts_get_all(self.context, + filters=filters) - if update_list: - db.storage_hosts_update(self.context, update_list) + def db_resources_delete(self, delete_id_list): + return db.storage_hosts_delete(self.context, delete_id_list) - if add_list: - db.storage_hosts_create(self.context, add_list) + def db_resources_update(self, update_list): + return db.storage_hosts_update(self.context, update_list) - except AttributeError as e: - LOG.error(e) - except NotImplementedError: - # Ignore this exception because driver may not support it. - pass - except Exception as e: - msg = _('Failed to sync storage hosts entry in DB: {0}' - .format(e)) - LOG.error(msg) - else: - LOG.info("Syncing storage hosts successful!!!") + def db_resources_create(self, add_list): + return db.storage_hosts_create(self.context, add_list) - def remove(self): - LOG.info('Remove storage hosts for storage id:{0}' - .format(self.storage_id)) - db.storage_hosts_delete_by_storage(self.context, self.storage_id) + def db_resource_delete_by_storage(self): + return db.storage_hosts_delete_by_storage(self.context, + self.storage_id) class StorageHostGroupTask(StorageResourceTask): @@ -728,53 +684,24 @@ def remove(self): class MaskingViewTask(StorageResourceTask): - def __init__(self, context, storage_id): - super(MaskingViewTask, self).__init__(context, storage_id) + NATIVE_RESOURCE_ID = 'native_masking_view_id' - @check_deleted() - @set_synced_after() - def sync(self): - """ - :return: - """ - LOG.info('Syncing masking view for storage id:{0}' - .format(self.storage_id)) - try: - # Collect the masking views from driver and database - masking_views = self.driver_api \ - .list_masking_views(self.context, self.storage_id) - db_masking_views = db.masking_views_get_all( - self.context, filters={"storage_id": self.storage_id}) - - add_list, update_list, delete_id_list = self._classify_resources( - masking_views, db_masking_views, 'native_masking_view_id') + def driver_list_resources(self): + return self.driver_api.list_masking_views(self.context, + self.storage_id) - LOG.debug('###MaskingViewTask for {0}:add={1},delete={2},' - 'update={3}'.format(self.storage_id, - len(add_list), - len(delete_id_list), - len(update_list))) - if delete_id_list: - db.masking_views_delete(self.context, delete_id_list) + def db_resource_get_all(self, filters): + return db.masking_views_get_all(self.context, filters=filters) - if update_list: - db.masking_views_update(self.context, update_list) + def db_resources_delete(self, delete_id_list): + return db.masking_views_delete(self.context, delete_id_list) - if add_list: - db.masking_views_create(self.context, add_list) + def db_resources_update(self, update_list): + return db.masking_views_update(self.context, update_list) - except AttributeError as e: - LOG.error(e) - except NotImplementedError: - # Ignore this exception because driver may not support it. - pass - except Exception as e: - msg = _('Failed to sync masking views entry in DB: {0}'.format(e)) - LOG.error(msg) - else: - LOG.info("Syncing masking views successful!!!") + def db_resources_create(self, add_list): + return db.masking_views_create(self.context, add_list) - def remove(self): - LOG.info('Remove masking views for storage id:{0}' - .format(self.storage_id)) - db.masking_views_delete_by_storage(self.context, self.storage_id) + def db_resource_delete_by_storage(self): + return db.masking_views_delete_by_storage(self.context, + self.storage_id) diff --git a/delfin/tests/unit/alert_manager/fakes.py b/delfin/tests/unit/alert_manager/fakes.py index 25321c7b7..693eff3e6 100644 --- a/delfin/tests/unit/alert_manager/fakes.py +++ b/delfin/tests/unit/alert_manager/fakes.py @@ -51,7 +51,8 @@ def fake_v3_alert_source(): 'auth_key': 'YWJjZDEyMzQ1Njc=', 'auth_protocol': 'HMACMD5', 'privacy_key': 'YWJjZDEyMzQ1Njc=', - 'privacy_protocol': 'DES' + 'privacy_protocol': 'DES', + 'host': '127.0.0.1' } diff --git a/delfin/tests/unit/alert_manager/test_snmp_validator.py b/delfin/tests/unit/alert_manager/test_snmp_validator.py index c10554c46..17c19c7a2 100644 --- a/delfin/tests/unit/alert_manager/test_snmp_validator.py +++ b/delfin/tests/unit/alert_manager/test_snmp_validator.py @@ -50,11 +50,13 @@ def test_validate(self, mock_validate_connectivity): mock.Mock()) @mock.patch.object(cmdgen.CommandGenerator, 'getCmd', fakes.mock_cmdgen_get_cmd) + @mock.patch('delfin.db.access_info_get') @mock.patch('pysnmp.entity.observer.MetaObserver.registerObserver') @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' '.closeDispatcher') def test_validate_connectivity(self, mock_close_dispatcher, - mock_register_observer): + mock_register_observer, + mock_access_info_get): # Get a random host a = random.randint(0, 255) b = random.randint(0, 255) @@ -67,8 +69,9 @@ def test_validate_connectivity(self, mock_close_dispatcher, v3_alert_source = fakes.fake_v3_alert_source() v3_alert_source['host'] = host v3_alert_source['port'] = port + mock_access_info_get.return_value = {'model': 'vsp'} snmp_validator.SNMPValidator.validate_connectivity( - v3_alert_source) + context.RequestContext(), v3_alert_source) self.assertEqual(mock_close_dispatcher.call_count, 1) self.assertEqual(mock_register_observer.call_count, 1) # snmpv2c @@ -76,7 +79,7 @@ def test_validate_connectivity(self, mock_close_dispatcher, v2_alert_source['host'] = host v2_alert_source['port'] = port snmp_validator.SNMPValidator.validate_connectivity( - v2_alert_source) + context.RequestContext(), v2_alert_source) self.assertEqual(mock_close_dispatcher.call_count, 2) self.assertEqual(mock_register_observer.call_count, 1) diff --git a/delfin/tests/unit/alert_manager/test_trap_receiver.py b/delfin/tests/unit/alert_manager/test_trap_receiver.py index 8b91f4447..e7d5a091c 100644 --- a/delfin/tests/unit/alert_manager/test_trap_receiver.py +++ b/delfin/tests/unit/alert_manager/test_trap_receiver.py @@ -87,10 +87,11 @@ def test_add_transport_successful(self): def test_add_transport_exception(self): trap_receiver_inst = self._get_trap_receiver() - + exception_msg = r"int\(\) argument must be a string, " \ + "a bytes-like object or a number, not 'NoneType'" # Mock exception by not initialising snmp engine - self.assertRaisesRegex(ValueError, - "Port binding failed: Port is in use", + self.assertRaisesRegex(exception.DelfinException, + exception_msg, trap_receiver_inst._add_transport) @mock.patch('pysnmp.carrier.asyncore.dispatch.AbstractTransportDispatcher' @@ -133,7 +134,7 @@ def test_sync_snmp_config_add_v2_version(self, mock_add_config, ctxt = {} alert_config = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv2c', - 'community_string': 'public'} + 'community_string': b'public'} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() trap_receiver_inst.sync_snmp_config(ctxt, @@ -169,7 +170,7 @@ def test_sync_snmp_config_add_invalid_version(self): ctxt = {} alert_source_config = {'storage_id': 'abcd-1234-5678', 'version': 'snmpv4', - 'community_string': 'public'} + 'community_string': b'public'} trap_receiver_inst = self._get_trap_receiver() trap_receiver_inst.snmp_engine = engine.SnmpEngine() self.assertRaisesRegex(exception.InvalidSNMPConfig, "Invalid snmp " diff --git a/delfin/tests/unit/task_manager/test_resources.py b/delfin/tests/unit/task_manager/test_resources.py index ca8fea81c..b61122ff9 100644 --- a/delfin/tests/unit/task_manager/test_resources.py +++ b/delfin/tests/unit/task_manager/test_resources.py @@ -750,21 +750,13 @@ def test_remove(self, mock_share_del): class TestStorageHostInitiatorTask(test.TestCase): @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') @mock.patch('delfin.drivers.api.API.list_storage_host_initiators') - @mock.patch('delfin.db.storage_host_initiators_get_all') - @mock.patch('delfin.db.storage_host_initiators_delete') - @mock.patch('delfin.db.storage_host_initiators_update') + @mock.patch('delfin.db.storage_host_initiators_delete_by_storage') @mock.patch('delfin.db.storage_host_initiators_create') def test_sync_successful(self, mock_storage_host_initiator_create, - mock_storage_host_initiator_update, - mock_storage_host_initiator_del, - mock_storage_host_initiators_get_all, + mock_storage_host_initiator_delete_by_storage, mock_list_storage_host_initiators, get_lock): storage_host_initiator_obj = resources.StorageHostInitiatorTask( context, 'c5c91c98-91aa-40e6-85ac-37a1d3b32bda') - storage_host_initiator_obj.sync() - self.assertTrue(mock_list_storage_host_initiators.called) - self.assertTrue(mock_storage_host_initiators_get_all.called) - self.assertTrue(get_lock.called) # Collect the storage host initiators from fake_storage fake_storage_obj = fake_storage.FakeStorageDriver() @@ -772,25 +764,10 @@ def test_sync_successful(self, mock_storage_host_initiator_create, # Add the storage host initiators to DB mock_list_storage_host_initiators.return_value \ = fake_storage_obj.list_storage_host_initiators(context) - mock_storage_host_initiators_get_all.return_value = list() storage_host_initiator_obj.sync() + self.assertTrue(mock_storage_host_initiator_delete_by_storage.called) self.assertTrue(mock_storage_host_initiator_create.called) - # Update the storage host initiators to DB - mock_list_storage_host_initiators.return_value \ - = storage_host_initiators_list - mock_storage_host_initiators_get_all.return_value \ - = storage_host_initiators_list - storage_host_initiator_obj.sync() - self.assertTrue(mock_storage_host_initiator_update.called) - - # Delete the storage host initiators to DB - mock_list_storage_host_initiators.return_value = list() - mock_storage_host_initiators_get_all.return_value \ - = storage_host_initiators_list - storage_host_initiator_obj.sync() - self.assertTrue(mock_storage_host_initiator_del.called) - @mock.patch('delfin.db.storage_host_initiators_delete_by_storage') def test_remove(self, mock_storage_host_initiators_del): storage_host_initiator_obj = resources.StorageHostInitiatorTask(