From 6e200b450bf8a16feeec12b417348f84009b427e Mon Sep 17 00:00:00 2001 From: Amit Roushan Date: Fri, 12 Mar 2021 16:22:15 +0000 Subject: [PATCH 1/4] removed older performance monitoring --- delfin/api/v1/performance.py | 157 --------------------------------- delfin/api/v1/router.py | 7 -- delfin/api/v1/storages.py | 46 +--------- delfin/service.py | 1 - delfin/task_manager/manager.py | 70 +-------------- delfin/task_manager/rpcapi.py | 10 --- etc/delfin/delfin.conf | 3 - etc/scheduler_config.json | 3 - installer/install_delfin.py | 5 -- 9 files changed, 2 insertions(+), 300 deletions(-) delete mode 100644 delfin/api/v1/performance.py delete mode 100644 etc/scheduler_config.json diff --git a/delfin/api/v1/performance.py b/delfin/api/v1/performance.py deleted file mode 100644 index 5dfe5ad41..000000000 --- a/delfin/api/v1/performance.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2020 The SODA Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import json - -from oslo_config import cfg -from oslo_log import log -from delfin import db -from delfin import context, exception -from delfin.api.common import wsgi -from delfin.common import constants, config -from delfin.task_manager import rpcapi as task_rpcapi -from delfin.task_manager.tasks import resources -from delfin.api import validation -from delfin.api.schemas import perf_collection -from datetime import datetime - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - -scheduler_opts = [ - cfg.StrOpt('config_path', default='scheduler', - help='The config path for scheduler'), -] - -CONF.register_opts(scheduler_opts, "scheduler") - - -class PerformanceController(wsgi.Controller): - def __init__(self): - super().__init__() - self.task_rpcapi = task_rpcapi.TaskAPI() - - @validation.schema(perf_collection.update) - def metrics_config(self, req, body, id): - """ - :param req: - :param body: - :param id: - :return: - """ - ctxt = req.environ['delfin.context'] - - # check storage is registered - db.storage_get(ctxt, id) - - metrics_config_dict = body - metrics_config_dict.update(body) - - # get scheduler object - schedule = config.Scheduler.getInstance() - - # The path of scheduler config file - config_file = CONF.scheduler.config_path - - try: - # Load the scheduler configuration file - data = config.load_json_file(config_file) - storage_found = False - for storage in data.get("storages"): - config_storage_id = storage.get('id') - if config_storage_id == id: - for resource in metrics_config_dict.keys(): - storage_dict = storage.get(resource) - metric_dict = metrics_config_dict.get(resource) - storage_dict.update(metric_dict) - - interval = storage_dict.get('interval') - is_historic = storage_dict.get('is_historic') - - job_id = id + resource - - if schedule.get_job(job_id): - schedule.reschedule_job( - job_id=job_id, trigger='interval', - seconds=interval) - else: - schedule.add_job( - self.perf_collect, 'interval', args=[ - id, interval, is_historic, resource], - seconds=interval, - next_run_time=datetime.now(), id=job_id) - - storage_found = True - - if not storage_found: - temp_dict = {'id': id} - temp_dict.update(metrics_config_dict) - data.get("storages").append(temp_dict) - - for resource in metrics_config_dict.keys(): - resource_dict = metrics_config_dict.get(resource) - interval = resource_dict.get('interval') - is_historic = resource_dict.get('is_historic') - - job_id = id + resource - - schedule.add_job( - self.perf_collect, 'interval', args=[ - id, interval, is_historic, resource], - seconds=interval, next_run_time=datetime.now(), - id=job_id) - - with open(config_file, "w") as jsonFile: - json.dump(data, jsonFile) - jsonFile.close() - - except TypeError as e: - LOG.error("Error occurred during parsing of config file") - raise exception.InvalidContentType(e) - except json.decoder.JSONDecodeError as e: - msg = ("Not able to open the config file: {0}" - .format(config_file)) - LOG.error(msg) - raise exception.InvalidInput(e.msg) - else: - return metrics_config_dict - finally: - try: - schedule.start() - except Exception as e: - LOG.debug("Scheduler is already running.{0}".format(e)) - - def perf_collect(self, storage_id, interval, is_historic, resource): - """ - This function received the request from scheduler to create tasks - and push those tasks to rabbitmq. - :param storage_id: The registered storage_id - :param interval: collection interval period - :param is_historic: to enable historic collection - :param resource: resource type, ex: array, pool, volume etc. - :return: - """ - ctxt = context.RequestContext() - - LOG.debug("Request received to create perf_collect task for storage_" - "id :{0} and resource_type:{1}".format(storage_id, resource) - ) - - self.task_rpcapi.performance_metrics_collection( - ctxt, storage_id, interval, is_historic, - resources.PerformanceCollectionTask.__module__ + - '.' + constants.RESOURCE_CLASS_TYPE.get(resource)) - - -def create_resource(): - return wsgi.Resource(PerformanceController()) diff --git a/delfin/api/v1/router.py b/delfin/api/v1/router.py index ad76c9c77..e8e86953c 100644 --- a/delfin/api/v1/router.py +++ b/delfin/api/v1/router.py @@ -20,7 +20,6 @@ from delfin.api.v1 import controllers from delfin.api.v1 import disks from delfin.api.v1 import filesystems -from delfin.api.v1 import performance from delfin.api.v1 import ports from delfin.api.v1 import qtrees from delfin.api.v1 import quotas @@ -52,12 +51,6 @@ def _setup_routes(self, mapper): action="get_capabilities", conditions={"method": ["GET"]}) - self.resources['performance'] = performance.create_resource() - mapper.connect("storages", "/storages/{id}/metrics-config", - controller=self.resources['performance'], - action="metrics_config", - conditions={"method": ["PUT"]}) - self.resources['access_info'] = access_info.create_resource() mapper.connect("storages", "/storages/{id}/access-info", controller=self.resources['access_info'], diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index d08b971d5..01d18271e 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -13,9 +13,7 @@ # limitations under the License. import copy -import json -import six from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils @@ -28,7 +26,7 @@ from delfin.api.common import wsgi from delfin.api.schemas import storages as schema_storages from delfin.api.views import storages as storage_view -from delfin.common import constants, config +from delfin.common import constants from delfin.drivers import api as driverapi from delfin.i18n import _ from delfin.task_manager import rpcapi as task_rpcapi @@ -116,7 +114,6 @@ def delete(self, req, id): storage['id'], subclass.__module__ + '.' + subclass.__name__) self.task_rpcapi.remove_storage_in_cache(ctxt, storage['id']) - self._unregister_perf_collection(storage['id']) @wsgi.response(202) def sync_all(self, req): @@ -195,47 +192,6 @@ def _storage_exist(self, context, access_info): return False - def _unregister_perf_collection(self, storage_id): - - schedule = config.Scheduler.getInstance() - - # The path of scheduler config file - config_file = CONF.scheduler.config_path - - try: - # Load the scheduler configuration file - data = config.load_json_file(config_file) - for storage in data.get("storages"): - config_storage_id = storage.get('id') - if config_storage_id == storage_id: - for resource in storage.keys(): - # Skip storage id attribute and - # check for all metric collection jobs - if resource == 'id': - continue - job_id = storage_id + resource - - if schedule.get_job(job_id): - schedule.remove_job(job_id) - - # Remove the entry for storage being deleted and - # update schedular config file - data['storages'].remove(storage) - with open(config_file, "w") as jsonFile: - json.dump(data, jsonFile) - jsonFile.close() - break - except TypeError: - LOG.error("Failed to unregister performance collection. Error " - "occurred during parsing of config file") - except json.decoder.JSONDecodeError: - msg = ("Failed to unregister performance collection. Not able to " - "open the config file: {0} ".format(config_file)) - LOG.error(msg) - except Exception as e: - msg = _('Failed to unregister performance collection. Reason: {0}' - .format(six.text_type(e))) - LOG.error(msg) @wsgi.response(200) def get_capabilities(self, req, id): diff --git a/delfin/service.py b/delfin/service.py index cb7896672..ca04d2c0a 100644 --- a/delfin/service.py +++ b/delfin/service.py @@ -268,7 +268,6 @@ def create(cls, host=None, binary=None, topic=None, def start(self): super(TaskService, self).start() - self.manager.periodic_performance_collect() class WSGIService(service.ServiceBase): diff --git a/delfin/task_manager/manager.py b/delfin/task_manager/manager.py index 2fada9090..68004f82b 100644 --- a/delfin/task_manager/manager.py +++ b/delfin/task_manager/manager.py @@ -16,25 +16,13 @@ **periodical task manager** """ -from datetime import datetime -from oslo_config import cfg from oslo_log import log from oslo_utils import importutils -from delfin.common import constants, config -from delfin import manager, exception -from delfin.api.v1.performance import PerformanceController +from delfin import manager from delfin.drivers import manager as driver_manager from delfin.task_manager.tasks import alerts LOG = log.getLogger(__name__) -CONF = cfg.CONF - -scheduler_opts = [ - cfg.StrOpt('config_path', default='scheduler', - help='The config path for scheduler'), -] - -CONF.register_opts(scheduler_opts, "scheduler") class TaskManager(manager.Manager): @@ -46,54 +34,6 @@ def __init__(self, service_name=None, *args, **kwargs): self.alert_task = alerts.AlertSyncTask() super(TaskManager, self).__init__(*args, **kwargs) - def periodic_performance_collect(self): - """ - """ - try: - # Load the scheduler configuration file - data = config.load_json_file(CONF.scheduler.config_path) - - # create the object of periodic scheduler - schedule = config.Scheduler.getInstance() - - # create the objet to StorageController class, so that - # methods of that class can be called by scheduler - storage_cls = PerformanceController() - - # parse the scheduler configuration file and start the task - # for each storage - for storage in data.get("storages"): - storage_id = storage.get('id') - - for resource in storage.keys(): - if resource == 'id': - continue - - resource_type = storage.get(resource) - if (resource_type.get('perf_collection') and - resource_type.get('interval') > constants. - SCHEDULING_MIN_INTERVAL): - is_historic = resource_type.get('is_historic') - interval = resource_type.get('interval') - - # add the task to scheduler(basically, it calls the - # perf_collect method from PerformanceController class - # ) and execute the task immediate and after every - # interval - job_id = storage_id + resource - schedule.add_job( - storage_cls.perf_collect, 'interval', args=[ - storage_id, interval, is_historic, resource], - seconds=interval, next_run_time=datetime.now(), - id=job_id) - - except TypeError as e: - LOG.error("Error occurred during parsing of config file") - raise exception.InvalidContentType(e) - else: - # start the scheduler - schedule.start() - def sync_storage_resource(self, context, storage_id, resource_task): LOG.debug("Received the sync_storage task: {0} request for storage" " id:{1}".format(resource_task, storage_id)) @@ -117,14 +57,6 @@ def sync_storage_alerts(self, context, storage_id, query_para): .format(storage_id)) self.alert_task.sync_alerts(context, storage_id, query_para) - def performance_metrics_collection(self, context, storage_id, interval, - is_historic, resource_task): - LOG.debug("Received the performance collection task: {0} request" - "for storage_id:{1}".format(resource_task, storage_id)) - cls = importutils.import_class(resource_task) - device_obj = cls(context, storage_id, interval, is_historic) - device_obj.collect() - def clear_storage_alerts(self, context, storage_id, sequence_number_list): LOG.info('Clear alerts called for storage id: {0}' .format(storage_id)) diff --git a/delfin/task_manager/rpcapi.py b/delfin/task_manager/rpcapi.py index 7cbd92917..dc9231f29 100644 --- a/delfin/task_manager/rpcapi.py +++ b/delfin/task_manager/rpcapi.py @@ -67,16 +67,6 @@ def sync_storage_alerts(self, context, storage_id, query_para): storage_id=storage_id, query_para=query_para) - def performance_metrics_collection(self, context, storage_id, interval, - is_historic, resource_task): - call_context = self.client.prepare(version='1.0') - return call_context.cast(context, - 'performance_metrics_collection', - storage_id=storage_id, - interval=interval, - is_historic=is_historic, - resource_task=resource_task) - def clear_storage_alerts(self, context, storage_id, sequence_number_list): call_context = self.client.prepare(version='1.0') return call_context.call(context, diff --git a/etc/delfin/delfin.conf b/etc/delfin/delfin.conf index d50ff7cef..cdc7f5bf1 100644 --- a/etc/delfin/delfin.conf +++ b/etc/delfin/delfin.conf @@ -10,9 +10,6 @@ api_max_limit = 1000 connection = sqlite:////var/lib/delfin/delfin.sqlite db_backend = sqlalchemy -[scheduler] -config_path = /etc/delfin/scheduler_config.json - [KAFKA_EXPORTER] kafka_topic_name = "delfin-kafka" kafka_ip = 'localhost' diff --git a/etc/scheduler_config.json b/etc/scheduler_config.json deleted file mode 100644 index 700c1d5fc..000000000 --- a/etc/scheduler_config.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "storages": [] -} diff --git a/installer/install_delfin.py b/installer/install_delfin.py index d4070aa19..821497f02 100644 --- a/installer/install_delfin.py +++ b/installer/install_delfin.py @@ -139,11 +139,6 @@ def main(): 'delfin', 'delfin.conf') copy_files(conf_file_src, conf_file) - # Copy the scheduler_config.json file - conf_file_src = os.path.join(delfin_source_path, 'etc', - 'scheduler_config.json') - copy_files(conf_file_src, delfin_etc_dir) - # install install_delfin() From d0b65d9406893074e1a72a580e8cb9476a63f67b Mon Sep 17 00:00:00 2001 From: Amit Roushan Date: Fri, 12 Mar 2021 16:24:04 +0000 Subject: [PATCH 2/4] added performance monitoring trigger --- delfin/api/v1/storages.py | 33 ++ delfin/common/constants.py | 8 + delfin/drivers/driver_spec_schema.py | 514 +++++++++++++++++++++++++++ delfin/drivers/helper.py | 7 + delfin/drivers/manager.py | 55 +++ 5 files changed, 617 insertions(+) create mode 100644 delfin/drivers/driver_spec_schema.py diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index 01d18271e..8f0572013 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -101,6 +101,21 @@ def create(self, req, body): msg = _('Failed to sync resources for storage: %(storage)s. ' 'Error: %(err)s') % {'storage': storage['id'], 'err': e} LOG.error(msg) + + try: + # Trigger Performance monitoring + capabilities = self.driver_api.get_capabilities( + context=ctxt, storage_id=storage['id']) + # ignore if capabilities is empty + if capabilities: + _create_performance_monitoring_task(ctxt, storage['id'], + capabilities) + except Exception as e: + # Unexpected error occurred, while performance monitoring. + msg = _('Failed to trigger performance monitoring for storage: ' + '%(storage)s. Error: %(err)s') % {'storage': storage['id'], + 'err': e} + LOG.error(msg) return storage_view.build_storage(storage) @wsgi.response(202) @@ -239,3 +254,21 @@ def _set_synced_if_ok(context, storage_id, resource_count): storage['sync_status'] = resource_count * constants.ResourceSync.START storage['updated_at'] = current_time db.storage_update(context, storage['id'], storage) + + +def _create_performance_monitoring_task(context, storage_id, capabilities): + # check resource_metric attribute availability and + # check if resource_metric is empty + if 'resource_metrics' not in capabilities \ + or not bool(capabilities.get('resource_metrics')): + msg = _('Skipping performance monitoring as resource metric is empty ' + 'for storage: %(storage)s.') % {'storage': storage_id} + LOG.warning(msg) + + task = dict() + task.update(storage_id=storage_id) + task.update(args=capabilities.get('resource_metrics')) + task.update(interval=constants.Task.DEFAULT_TASK_INTERVAL) + task.update(method=constants.Task.PERFORMANCE_TASK_METHOD) + # push task in DB + db.task_create(context=context, values=task) diff --git a/delfin/common/constants.py b/delfin/common/constants.py index b756db2d1..eb2567fe2 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -303,3 +303,11 @@ class ResourceSync(object): START = 100 SUCCEED = 100 FAILED = 101 + + +class Task(object): + DEFAULT_TASK_INTERVAL = 30 + """Default task interval""" + PERFORMANCE_TASK_METHOD = "delfin.task_manager.tasks." \ + "performance_monitoring" + """Performance monitoring task name""" diff --git a/delfin/drivers/driver_spec_schema.py b/delfin/drivers/driver_spec_schema.py new file mode 100644 index 000000000..3056c994b --- /dev/null +++ b/delfin/drivers/driver_spec_schema.py @@ -0,0 +1,514 @@ +# Copyright 2020 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DRIVER_SPECIFICATION_SCHEMA = { + 'type': 'object', + 'properties': { + 'is_historic': {'type': 'boolean'}, + 'resource_metrics': { + 'type': 'object', + 'properties': { + 'storage': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'memoryUsage': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["%"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + 'storagePool': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + 'volume': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + 'controller': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'cpuUsage': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["%"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'memoryUsage': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["%"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + 'port': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeResponseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + 'disk': { + 'type': 'object', + 'properties': { + 'throughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'responseTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'requests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'serviceTime': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["ms"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'readRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeRequests': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + }, + 'additionalProperties': False + }, + }, + 'additionalProperties': False + }, + }, + 'additionalProperties': False, + 'required': ['is_historic'] +} diff --git a/delfin/drivers/helper.py b/delfin/drivers/helper.py index daa0ade00..1ce4b7828 100644 --- a/delfin/drivers/helper.py +++ b/delfin/drivers/helper.py @@ -72,3 +72,10 @@ def check_storage_consistency(context, storage_id, storage_new): (storage_new['serial_number'], storage_present['serial_number'])) raise exception.StorageSerialNumberMismatch(msg) + + +def empty_driver_capabilities(context): + return { + 'is_historic': False, + 'resource_metrics': {} + } diff --git a/delfin/drivers/manager.py b/delfin/drivers/manager.py index be665ce52..918b19f3b 100644 --- a/delfin/drivers/manager.py +++ b/delfin/drivers/manager.py @@ -16,6 +16,7 @@ import six import stevedore import threading +import jsonschema from oslo_log import log @@ -23,6 +24,9 @@ from delfin import exception from delfin import utils from delfin import ssl_utils +from delfin import context +from delfin.drivers.helper import empty_driver_capabilities +from delfin.drivers.driver_spec_schema import DRIVER_SPECIFICATION_SCHEMA LOG = log.getLogger(__name__) @@ -38,6 +42,52 @@ def __init__(self): # each of storage systems so that the session between driver # and storage system is effectively used. self.driver_factory = dict() + self._validate_driver_spec() + + def _validate_driver_spec(self): + for name in self.names(): + driver_cls = self[name].plugin + spec = driver_cls.get_capabilities( + context=context.RequestContext(is_admin=True)) + + # in case get_capabilities not implemented + if spec is None: + # update list_resource_metrics to return empty + self._add_default_capabilities(driver_cls) + self.alert_driver_error( + driver_cls, "Driver's capability list is empty " + "for %s" % name) + try: + jsonschema.validate(spec, DRIVER_SPECIFICATION_SCHEMA) + except jsonschema.ValidationError as ex: + if isinstance(ex.cause, exception.InvalidName): + detail = "An invalid 'name' value was provided " \ + "in capability list" + elif len(ex.path) > 0: + detail = "Invalid input for capability list " \ + "configured in field/attribute %(path)s." \ + " %(message)s" % {'path': ex.path.pop(), + 'message': ex.message} + else: + detail = ex.message + # update list_resource_metrics to return empty + self._add_default_capabilities(driver_cls) + self.alert_driver_error(driver_cls, detail) + except TypeError as ex: + # update list_resource_metrics to return empty + self._add_default_capabilities(driver_cls) + + # NOTE: If passing non string value to patternProperties + # parameter, TypeError happens. Here is for catching + # the TypeError. + detail = six.text_type(ex) + self.alert_driver_error(driver_cls, detail) + + def alert_driver_error(self, driver_cls, msg): + LOG.warning(msg) + # FIXME (Amit): Add Alert for driver's error + # Also enable feature flag to make Northbound API aware + return def get_driver(self, context, invoke_on_load=True, cache_on_load=True, **kwargs): @@ -114,3 +164,8 @@ def _get_driver_cls(self, **kwargs): msg = "Storage driver '%s' could not be found." % name LOG.error(msg) raise exception.StorageDriverNotFound(name) + + @staticmethod + def _add_default_capabilities(driver_cls): + driver_cls.get_capabilities = \ + staticmethod(empty_driver_capabilities) From d422a2a86ed0b45f4c10db543373fe418bc79b2b Mon Sep 17 00:00:00 2001 From: Amit Roushan Date: Sat, 13 Mar 2021 07:46:10 +0000 Subject: [PATCH 3/4] fixed review comment --- delfin/api/schemas/perf_collection.py | 30 -- delfin/api/v1/storages.py | 19 +- delfin/common/config.py | 37 +- delfin/common/constants.py | 7 +- delfin/drivers/driver_spec_schema.py | 514 ---------------------- delfin/drivers/helper.py | 7 - delfin/drivers/manager.py | 55 --- delfin/exception.py | 5 + delfin/task_manager/tasks/resources.py | 74 +--- delfin/tests/unit/api/v1/test_storages.py | 203 +++++++-- 10 files changed, 201 insertions(+), 750 deletions(-) delete mode 100644 delfin/api/schemas/perf_collection.py delete mode 100644 delfin/drivers/driver_spec_schema.py diff --git a/delfin/api/schemas/perf_collection.py b/delfin/api/schemas/perf_collection.py deleted file mode 100644 index 803403032..000000000 --- a/delfin/api/schemas/perf_collection.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2020 The SODA Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -update = { - 'type': 'object', - 'properties': { - 'array_polling': { - 'type': 'object', - 'properties': { - 'perf_collection': {'type': 'boolean'}, - 'interval': {'type': 'integer'}, - 'is_historic': {'type': 'boolean'} - }, - 'required': ['perf_collection', 'interval', 'is_historic'], - 'additionalProperties': False - }}, - 'required': ['array_polling'], - 'additionalProperties': False -} diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index 8f0572013..2cfbf419e 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -13,6 +13,7 @@ # limitations under the License. import copy +import six from oslo_config import cfg from oslo_log import log @@ -106,15 +107,16 @@ def create(self, req, body): # Trigger Performance monitoring capabilities = self.driver_api.get_capabilities( context=ctxt, storage_id=storage['id']) - # ignore if capabilities is empty - if capabilities: - _create_performance_monitoring_task(ctxt, storage['id'], - capabilities) + # validate capabilities + validation.validate_capabilities(capabilities) + # trigger performance monitoring + _create_performance_monitoring_task(ctxt, storage['id'], + capabilities) except Exception as e: # Unexpected error occurred, while performance monitoring. - msg = _('Failed to trigger performance monitoring for storage: ' + msg = _('Failed to create performance monitoring task for storage:' '%(storage)s. Error: %(err)s') % {'storage': storage['id'], - 'err': e} + 'err': six.text_type(e)} LOG.error(msg) return storage_view.build_storage(storage) @@ -207,7 +209,6 @@ def _storage_exist(self, context, access_info): return False - @wsgi.response(200) def get_capabilities(self, req, id): """ @@ -261,9 +262,7 @@ def _create_performance_monitoring_task(context, storage_id, capabilities): # check if resource_metric is empty if 'resource_metrics' not in capabilities \ or not bool(capabilities.get('resource_metrics')): - msg = _('Skipping performance monitoring as resource metric is empty ' - 'for storage: %(storage)s.') % {'storage': storage_id} - LOG.warning(msg) + raise exception.EmptyResourceMetrics() task = dict() task.update(storage_id=storage_id) diff --git a/delfin/common/config.py b/delfin/common/config.py index 363903233..7443d7867 100644 --- a/delfin/common/config.py +++ b/delfin/common/config.py @@ -24,14 +24,12 @@ stepping stone. """ -import json import socket -from delfin import exception + from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from oslo_utils import netutils -from apscheduler.schedulers.background import BackgroundScheduler LOG = log.getLogger(__name__) @@ -130,36 +128,3 @@ def set_middleware_defaults(): 'DELETE', 'PATCH'] ) - - -def load_json_file(config_file): - try: - with open(config_file) as f: - data = json.load(f) - return data - except json.decoder.JSONDecodeError as e: - msg = ("{0} file is not correct. Please check the configuration file" - .format(config_file)) - LOG.error(msg) - raise exception.InvalidInput(e.msg) - except FileNotFoundError as e: - LOG.error(e) - raise exception.ConfigNotFound(e) - - -class Scheduler: - __instance = None - - @staticmethod - def getInstance(): - """ Get instance of scheduler class """ - if Scheduler.__instance is None: - Scheduler.__instance = BackgroundScheduler() - return Scheduler.__instance - - def __init__(self): - if Scheduler.__instance is not None: - raise Exception("The instance of scheduler class is already" - "running.") - else: - Scheduler.__instance = self diff --git a/delfin/common/constants.py b/delfin/common/constants.py index eb2567fe2..ecf2fa2b8 100644 --- a/delfin/common/constants.py +++ b/delfin/common/constants.py @@ -23,9 +23,6 @@ # Valid access type supported currently. ACCESS_TYPE = ['rest', 'ssh', 'cli', 'smis'] -RESOURCE_CLASS_TYPE = {'array_polling': 'ArrayPerformanceCollection'} -SCHEDULING_MIN_INTERVAL = 5 - # Custom fields for Delfin objects class StorageStatus(object): @@ -308,6 +305,6 @@ class ResourceSync(object): class Task(object): DEFAULT_TASK_INTERVAL = 30 """Default task interval""" - PERFORMANCE_TASK_METHOD = "delfin.task_manager.tasks." \ - "performance_monitoring" + PERFORMANCE_TASK_METHOD = "delfin.task_manager.tasks.telemetry." \ + "PerformanceCollectionTask" """Performance monitoring task name""" diff --git a/delfin/drivers/driver_spec_schema.py b/delfin/drivers/driver_spec_schema.py deleted file mode 100644 index 3056c994b..000000000 --- a/delfin/drivers/driver_spec_schema.py +++ /dev/null @@ -1,514 +0,0 @@ -# Copyright 2020 The SODA Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DRIVER_SPECIFICATION_SCHEMA = { - 'type': 'object', - 'properties': { - 'is_historic': {'type': 'boolean'}, - 'resource_metrics': { - 'type': 'object', - 'properties': { - 'storage': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'memoryUsage': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - 'storagePool': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - 'volume': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - 'controller': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'cpuUsage': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'memoryUsage': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - 'port': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeThroughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - 'disk': { - 'type': 'object', - 'properties': { - 'throughput': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'responseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'serviceTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'readRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeRequests': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - }, - 'additionalProperties': False - }, - }, - 'additionalProperties': False - }, - }, - 'additionalProperties': False, - 'required': ['is_historic'] -} diff --git a/delfin/drivers/helper.py b/delfin/drivers/helper.py index 1ce4b7828..daa0ade00 100644 --- a/delfin/drivers/helper.py +++ b/delfin/drivers/helper.py @@ -72,10 +72,3 @@ def check_storage_consistency(context, storage_id, storage_new): (storage_new['serial_number'], storage_present['serial_number'])) raise exception.StorageSerialNumberMismatch(msg) - - -def empty_driver_capabilities(context): - return { - 'is_historic': False, - 'resource_metrics': {} - } diff --git a/delfin/drivers/manager.py b/delfin/drivers/manager.py index 918b19f3b..be665ce52 100644 --- a/delfin/drivers/manager.py +++ b/delfin/drivers/manager.py @@ -16,7 +16,6 @@ import six import stevedore import threading -import jsonschema from oslo_log import log @@ -24,9 +23,6 @@ from delfin import exception from delfin import utils from delfin import ssl_utils -from delfin import context -from delfin.drivers.helper import empty_driver_capabilities -from delfin.drivers.driver_spec_schema import DRIVER_SPECIFICATION_SCHEMA LOG = log.getLogger(__name__) @@ -42,52 +38,6 @@ def __init__(self): # each of storage systems so that the session between driver # and storage system is effectively used. self.driver_factory = dict() - self._validate_driver_spec() - - def _validate_driver_spec(self): - for name in self.names(): - driver_cls = self[name].plugin - spec = driver_cls.get_capabilities( - context=context.RequestContext(is_admin=True)) - - # in case get_capabilities not implemented - if spec is None: - # update list_resource_metrics to return empty - self._add_default_capabilities(driver_cls) - self.alert_driver_error( - driver_cls, "Driver's capability list is empty " - "for %s" % name) - try: - jsonschema.validate(spec, DRIVER_SPECIFICATION_SCHEMA) - except jsonschema.ValidationError as ex: - if isinstance(ex.cause, exception.InvalidName): - detail = "An invalid 'name' value was provided " \ - "in capability list" - elif len(ex.path) > 0: - detail = "Invalid input for capability list " \ - "configured in field/attribute %(path)s." \ - " %(message)s" % {'path': ex.path.pop(), - 'message': ex.message} - else: - detail = ex.message - # update list_resource_metrics to return empty - self._add_default_capabilities(driver_cls) - self.alert_driver_error(driver_cls, detail) - except TypeError as ex: - # update list_resource_metrics to return empty - self._add_default_capabilities(driver_cls) - - # NOTE: If passing non string value to patternProperties - # parameter, TypeError happens. Here is for catching - # the TypeError. - detail = six.text_type(ex) - self.alert_driver_error(driver_cls, detail) - - def alert_driver_error(self, driver_cls, msg): - LOG.warning(msg) - # FIXME (Amit): Add Alert for driver's error - # Also enable feature flag to make Northbound API aware - return def get_driver(self, context, invoke_on_load=True, cache_on_load=True, **kwargs): @@ -164,8 +114,3 @@ def _get_driver_cls(self, **kwargs): msg = "Storage driver '%s' could not be found." % name LOG.error(msg) raise exception.StorageDriverNotFound(name) - - @staticmethod - def _add_default_capabilities(driver_cls): - driver_cls.get_capabilities = \ - staticmethod(empty_driver_capabilities) diff --git a/delfin/exception.py b/delfin/exception.py index b13494074..6380c26ac 100644 --- a/delfin/exception.py +++ b/delfin/exception.py @@ -323,3 +323,8 @@ class InvalidStorageCapability(Invalid): class StorageCapabilityNotSupported(Invalid): msg_fmt = _("Capability feature not supported by storage") code = 501 + + +class EmptyResourceMetrics(DelfinException): + msg_fmt = _("Empty resource metric in capabilities") + code = 501 diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index 6becf8b8c..a9e32517b 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -21,7 +21,6 @@ from delfin import db from delfin import exception from delfin.common import constants -from delfin.exporter import base_exporter from delfin.drivers import api as driverapi from delfin.i18n import _ @@ -167,9 +166,8 @@ def sync(self): # collect the storage pools list from driver and database storage_pools = self.driver_api.list_storage_pools(self.context, self.storage_id) - db_pools = db.storage_pool_get_all(self.context, - filters={"storage_id": - self.storage_id}) + db_pools = db.storage_pool_get_all( + self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_pools, db_pools, 'native_storage_pool_id' @@ -212,9 +210,8 @@ def sync(self): # collect the volumes list from driver and database storage_volumes = self.driver_api.list_volumes(self.context, self.storage_id) - db_volumes = db.volume_get_all(self.context, - filters={"storage_id": - self.storage_id}) + db_volumes = db.volume_get_all( + self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_volumes, db_volumes, 'native_volume_id' @@ -261,9 +258,8 @@ def sync(self): # collect the controllers list from driver and database storage_controllers = self.driver_api.list_controllers( self.context, self.storage_id) - db_controllers = db.controller_get_all(self.context, - filters={"storage_id": - self.storage_id}) + db_controllers = db.controller_get_all( + self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_controllers, db_controllers, 'native_controller_id' @@ -312,9 +308,8 @@ def sync(self): # collect the ports list from driver and database storage_ports = self.driver_api.list_ports(self.context, self.storage_id) - db_ports = db.port_get_all(self.context, - filters={"storage_id": - self.storage_id}) + db_ports = db.port_get_all( + self.context, filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_ports, db_ports, 'native_port_id' @@ -363,8 +358,7 @@ def sync(self): storage_disks = self.driver_api.list_disks(self.context, self.storage_id) db_disks = db.disk_get_all(self.context, - filters={"storage_id": - self.storage_id}) + filters={"storage_id": self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_disks, db_disks, 'native_disk_id' @@ -609,53 +603,3 @@ def remove(self): LOG.info('Remove shares for storage id:{0}' .format(self.storage_id)) db.share_delete_by_storage(self.context, self.storage_id) - - -class PerformanceCollectionTask(object): - - def __init__(self): - self.driver_api = driverapi.API() - self.perf_exporter = base_exporter.PerformanceExporterManager() - - -class ArrayPerformanceCollection(PerformanceCollectionTask): - def __init__(self, context, storage_id, interval, is_historic): - super(ArrayPerformanceCollection, self).__init__() - self.context = context - self.storage_id = storage_id - self.interval = interval - self.is_historic = is_historic - - def collect(self): - """ - :return: - """ - LOG.info('Collecting array performance metrics for storage id:{0}' - .format(self.storage_id)) - try: - # collect the performance metrics from driver and push to - # prometheus exporter api - array_metrics = self.driver_api.collect_array_metrics( - self.context, self.storage_id, self.interval, - self.is_historic) - # fill extra labels to metric by fetching metadata from resource DB - try: - array_details = db.storage_get(self.context, storage_id=self - .storage_id) - for m in array_metrics: - m.labels["name"] = array_details.name - m.labels["serial_number"] = array_details.serial_number - - except Exception as e: - msg = _('Failed to add extra labels to array performance ' - 'metrics: {0}'.format(e)) - LOG.error(msg) - - self.perf_exporter.dispatch(self.context, array_metrics) - - except Exception as e: - msg = _('Failed to collect array performance metrics from ' - 'driver: {0}'.format(e)) - LOG.error(msg) - else: - LOG.info("Array performance metrics collection done!!!") diff --git a/delfin/tests/unit/api/v1/test_storages.py b/delfin/tests/unit/api/v1/test_storages.py index 4e56117ca..44d7f80fc 100644 --- a/delfin/tests/unit/api/v1/test_storages.py +++ b/delfin/tests/unit/api/v1/test_storages.py @@ -14,23 +14,13 @@ from unittest import mock -from delfin.common import constants - from delfin import db from delfin import exception from delfin import test from delfin.api.v1.storages import StorageController +from delfin.common import constants from delfin.tests.unit.api import fakes -fake_schedular_config = { - "storages": [{"id": "fake_id", - "array_polling": {"perf_collection": True, "interval": 12, - "is_historic": True}}]} - -invalid_schedular_config = '"storages": [{"id": "fake_id","array_polling": {' \ - '"perf_collection": True, "interval": 12,' \ - '"is_historic": True}}]} ' - class TestStorageController(test.TestCase): @@ -44,24 +34,8 @@ def setUp(self): @mock.patch.object(db, 'storage_get', mock.Mock(return_value={'id': 'fake_id'})) - @mock.patch('delfin.common.config.load_json_file') - def test_delete(self, mock_load_json_file): + def test_delete(self): req = fakes.HTTPRequest.blank('/storages/fake_id') - mock_load_json_file.return_value = fake_schedular_config - self.controller.delete(req, 'fake_id') - ctxt = req.environ['delfin.context'] - db.storage_get.assert_called_once_with(ctxt, 'fake_id') - self.task_rpcapi.remove_storage_resource.assert_called_with( - ctxt, 'fake_id', mock.ANY) - self.task_rpcapi.remove_storage_in_cache.assert_called_once_with( - ctxt, 'fake_id') - - @mock.patch.object(db, 'storage_get', - mock.Mock(return_value={'id': 'fake_id'})) - @mock.patch('delfin.common.config.load_json_file') - def test_delete_invalid_schedular_conf(self, mock_load_json_file): - req = fakes.HTTPRequest.blank('/storages/fake_id') - mock_load_json_file.return_value = invalid_schedular_config self.controller.delete(req, 'fake_id') ctxt = req.environ['delfin.context'] db.storage_get.assert_called_once_with(ctxt, 'fake_id') @@ -378,3 +352,176 @@ def test_get_capabilities_with_invalid_capabilities(self): self.assertRaises(exception.InvalidStorageCapability, self.controller.get_capabilities, req, storage_id) + + def test_create_with_performance_monitoring(self): + self.mock_object( + self.controller.driver_api, 'discover_storage', + mock.Mock(return_value={ + "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", + 'name': 'fake_driver', + 'description': 'it is a fake driver.', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'status': 'normal', + 'serial_number': '2102453JPN12KA000011', + 'firmware_version': '1.0.0', + 'location': 'HK', + 'total_capacity': 1024 * 1024, + 'used_capacity': 3126, + 'free_capacity': 1045449, + "sync_status": constants.SyncStatus.SYNCED, + 'raw_capacity': 1610612736000, + 'subscribed_capacity': 219902325555200 + })) + self.mock_object( + db, 'access_info_get_all', + fakes.fake_access_info_get_all) + self.mock_object( + db, 'storage_get', + mock.Mock(side_effect=exception.StorageNotFound('fake_id'))) + self.mock_object( + self.controller, 'sync', + fakes.fake_sync) + body = { + 'model': 'fake_driver', + 'vendor': 'fake_storage', + 'rest': { + 'username': 'admin', + 'password': 'abcd', + 'host': '10.0.0.76', + 'port': 1234 + }, + 'extra_attributes': {'array_id': '0001234567891'} + } + req = fakes.HTTPRequest.blank( + '/storages') + + resource_metrics = { + "storage": { + "throughput": { + "unit": "MB/s", + "description": "Represents how much data is " + "successfully transferred in MB/s" + }, + } + } + + self.mock_object( + self.controller.driver_api, 'get_capabilities', + mock.Mock(return_value={ + 'is_historic': False, + 'resource_metrics': resource_metrics + })) + + self.mock_object( + self.controller.driver_api, 'get_capabilities', + mock.Mock(return_value={ + 'is_historic': False, + 'resource_metrics': resource_metrics + })) + + def test_task_create(context, values): + self.assertEqual(values['resource_metrics'], resource_metrics) + + db.task_create = test_task_create + + res_dict = self.controller.create(req, body=body) + expctd_dict = { + "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", + 'name': 'fake_driver', + 'description': 'it is a fake driver.', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'status': 'normal', + 'serial_number': '2102453JPN12KA000011', + 'firmware_version': '1.0.0', + 'location': 'HK', + 'total_capacity': 1024 * 1024, + 'used_capacity': 3126, + 'free_capacity': 1045449, + "sync_status": "SYNCED", + 'raw_capacity': 1610612736000, + 'subscribed_capacity': 219902325555200 + } + + self.assertDictEqual(expctd_dict, res_dict) + + def test_create_with_performance_monitoring_with_empty_metric(self): + self.mock_object( + self.controller.driver_api, 'discover_storage', + mock.Mock(return_value={ + "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", + 'name': 'fake_driver', + 'description': 'it is a fake driver.', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'status': 'normal', + 'serial_number': '2102453JPN12KA000011', + 'firmware_version': '1.0.0', + 'location': 'HK', + 'total_capacity': 1024 * 1024, + 'used_capacity': 3126, + 'free_capacity': 1045449, + "sync_status": constants.SyncStatus.SYNCED, + 'raw_capacity': 1610612736000, + 'subscribed_capacity': 219902325555200 + })) + self.mock_object( + db, 'access_info_get_all', + fakes.fake_access_info_get_all) + self.mock_object( + db, 'storage_get', + mock.Mock(side_effect=exception.StorageNotFound('fake_id'))) + + self.mock_object(self.controller, 'sync', fakes.fake_sync) + body = { + 'model': 'fake_driver', + 'vendor': 'fake_storage', + 'rest': { + 'username': 'admin', + 'password': 'abcd', + 'host': '10.0.0.76', + 'port': 1234 + }, + 'extra_attributes': {'array_id': '0001234567891'} + } + + req = fakes.HTTPRequest.blank( + '/storages') + + resource_metrics = {} + + self.mock_object( + self.controller.driver_api, 'get_capabilities', + mock.Mock(return_value={ + 'is_historic': False, + 'resource_metrics': resource_metrics + })) + + self.mock_object( + self.controller.driver_api, 'get_capabilities', + mock.Mock(return_value={ + 'is_historic': False, + 'resource_metrics': resource_metrics + })) + + res_dict = self.controller.create(req, body=body) + expctd_dict = { + "id": "12c2d52f-01bc-41f5-b73f-7abf6f38a2a6", + 'name': 'fake_driver', + 'description': 'it is a fake driver.', + 'vendor': 'fake_vendor', + 'model': 'fake_model', + 'status': 'normal', + 'serial_number': '2102453JPN12KA000011', + 'firmware_version': '1.0.0', + 'location': 'HK', + 'total_capacity': 1024 * 1024, + 'used_capacity': 3126, + 'free_capacity': 1045449, + "sync_status": "SYNCED", + 'raw_capacity': 1610612736000, + 'subscribed_capacity': 219902325555200 + } + + self.assertDictEqual(expctd_dict, res_dict) From 2fcb93857e61f2febe8ee74a7d7ad440ec8ec650 Mon Sep 17 00:00:00 2001 From: sushanthakumar Date: Fri, 19 Mar 2021 11:23:10 +0530 Subject: [PATCH 4/4] Updated review comments --- delfin/api/v1/storages.py | 11 ++++++----- delfin/task_manager/tasks/resources.py | 23 ++++++++++++++--------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/delfin/api/v1/storages.py b/delfin/api/v1/storages.py index 2cfbf419e..ef729a9ae 100755 --- a/delfin/api/v1/storages.py +++ b/delfin/api/v1/storages.py @@ -107,16 +107,18 @@ def create(self, req, body): # Trigger Performance monitoring capabilities = self.driver_api.get_capabilities( context=ctxt, storage_id=storage['id']) - # validate capabilities validation.validate_capabilities(capabilities) - # trigger performance monitoring _create_performance_monitoring_task(ctxt, storage['id'], capabilities) + except exception.EmptyResourceMetrics: + msg = _("Resource metric provided by capabilities is empty for " + "storage: %s") % storage['id'] + LOG.info(msg) except Exception as e: - # Unexpected error occurred, while performance monitoring. msg = _('Failed to create performance monitoring task for storage:' '%(storage)s. Error: %(err)s') % {'storage': storage['id'], 'err': six.text_type(e)} + LOG.error(msg) return storage_view.build_storage(storage) @@ -258,7 +260,7 @@ def _set_synced_if_ok(context, storage_id, resource_count): def _create_performance_monitoring_task(context, storage_id, capabilities): - # check resource_metric attribute availability and + # Check resource_metric attribute availability and # check if resource_metric is empty if 'resource_metrics' not in capabilities \ or not bool(capabilities.get('resource_metrics')): @@ -269,5 +271,4 @@ def _create_performance_monitoring_task(context, storage_id, capabilities): task.update(args=capabilities.get('resource_metrics')) task.update(interval=constants.Task.DEFAULT_TASK_INTERVAL) task.update(method=constants.Task.PERFORMANCE_TASK_METHOD) - # push task in DB db.task_create(context=context, values=task) diff --git a/delfin/task_manager/tasks/resources.py b/delfin/task_manager/tasks/resources.py index a9e32517b..527556e93 100644 --- a/delfin/task_manager/tasks/resources.py +++ b/delfin/task_manager/tasks/resources.py @@ -166,8 +166,9 @@ def sync(self): # collect the storage pools list from driver and database storage_pools = self.driver_api.list_storage_pools(self.context, self.storage_id) - db_pools = db.storage_pool_get_all( - self.context, filters={"storage_id": self.storage_id}) + db_pools = db.storage_pool_get_all(self.context, + filters={"storage_id": + self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_pools, db_pools, 'native_storage_pool_id' @@ -210,8 +211,9 @@ def sync(self): # collect the volumes list from driver and database storage_volumes = self.driver_api.list_volumes(self.context, self.storage_id) - db_volumes = db.volume_get_all( - self.context, filters={"storage_id": self.storage_id}) + db_volumes = db.volume_get_all(self.context, + filters={"storage_id": + self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_volumes, db_volumes, 'native_volume_id' @@ -258,8 +260,9 @@ def sync(self): # collect the controllers list from driver and database storage_controllers = self.driver_api.list_controllers( self.context, self.storage_id) - db_controllers = db.controller_get_all( - self.context, filters={"storage_id": self.storage_id}) + db_controllers = db.controller_get_all(self.context, + filters={"storage_id": + self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_controllers, db_controllers, 'native_controller_id' @@ -308,8 +311,9 @@ def sync(self): # collect the ports list from driver and database storage_ports = self.driver_api.list_ports(self.context, self.storage_id) - db_ports = db.port_get_all( - self.context, filters={"storage_id": self.storage_id}) + db_ports = db.port_get_all(self.context, + filters={"storage_id": + self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_ports, db_ports, 'native_port_id' @@ -358,7 +362,8 @@ def sync(self): storage_disks = self.driver_api.list_disks(self.context, self.storage_id) db_disks = db.disk_get_all(self.context, - filters={"storage_id": self.storage_id}) + filters={"storage_id": + self.storage_id}) add_list, update_list, delete_id_list = self._classify_resources( storage_disks, db_disks, 'native_disk_id'