Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Performance collection trigger after storage registration #506

Merged
merged 6 commits into from
Mar 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions delfin/api/schemas/perf_collection.py

This file was deleted.

157 changes: 0 additions & 157 deletions delfin/api/v1/performance.py

This file was deleted.

7 changes: 0 additions & 7 deletions delfin/api/v1/router.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from delfin.api.v1 import controllers
from delfin.api.v1 import disks
from delfin.api.v1 import filesystems
from delfin.api.v1 import performance
from delfin.api.v1 import ports
from delfin.api.v1 import qtrees
from delfin.api.v1 import quotas
Expand Down Expand Up @@ -52,12 +51,6 @@ def _setup_routes(self, mapper):
action="get_capabilities",
conditions={"method": ["GET"]})

self.resources['performance'] = performance.create_resource()
mapper.connect("storages", "/storages/{id}/metrics-config",
controller=self.resources['performance'],
action="metrics_config",
conditions={"method": ["PUT"]})

self.resources['access_info'] = access_info.create_resource()
mapper.connect("storages", "/storages/{id}/access-info",
controller=self.resources['access_info'],
Expand Down
81 changes: 35 additions & 46 deletions delfin/api/v1/storages.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@
# limitations under the License.

import copy
import json

import six

from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
Expand All @@ -28,7 +27,7 @@
from delfin.api.common import wsgi
from delfin.api.schemas import storages as schema_storages
from delfin.api.views import storages as storage_view
from delfin.common import constants, config
from delfin.common import constants
from delfin.drivers import api as driverapi
from delfin.i18n import _
from delfin.task_manager import rpcapi as task_rpcapi
Expand Down Expand Up @@ -103,6 +102,24 @@ def create(self, req, body):
msg = _('Failed to sync resources for storage: %(storage)s. '
'Error: %(err)s') % {'storage': storage['id'], 'err': e}
LOG.error(msg)

try:
# Trigger Performance monitoring
capabilities = self.driver_api.get_capabilities(
context=ctxt, storage_id=storage['id'])
validation.validate_capabilities(capabilities)
_create_performance_monitoring_task(ctxt, storage['id'],
wisererik marked this conversation as resolved.
Show resolved Hide resolved
capabilities)
except exception.EmptyResourceMetrics:
msg = _("Resource metric provided by capabilities is empty for "
"storage: %s") % storage['id']
LOG.info(msg)
except Exception as e:
wisererik marked this conversation as resolved.
Show resolved Hide resolved
msg = _('Failed to create performance monitoring task for storage:'
'%(storage)s. Error: %(err)s') % {'storage': storage['id'],
'err': six.text_type(e)}

LOG.error(msg)
return storage_view.build_storage(storage)

@wsgi.response(202)
Expand All @@ -116,7 +133,6 @@ def delete(self, req, id):
storage['id'],
subclass.__module__ + '.' + subclass.__name__)
self.task_rpcapi.remove_storage_in_cache(ctxt, storage['id'])
self._unregister_perf_collection(storage['id'])

@wsgi.response(202)
def sync_all(self, req):
Expand Down Expand Up @@ -195,48 +211,6 @@ def _storage_exist(self, context, access_info):

return False

def _unregister_perf_collection(self, storage_id):

schedule = config.Scheduler.getInstance()

# The path of scheduler config file
config_file = CONF.scheduler.config_path

try:
# Load the scheduler configuration file
data = config.load_json_file(config_file)
for storage in data.get("storages"):
config_storage_id = storage.get('id')
if config_storage_id == storage_id:
for resource in storage.keys():
# Skip storage id attribute and
# check for all metric collection jobs
if resource == 'id':
continue
job_id = storage_id + resource

if schedule.get_job(job_id):
schedule.remove_job(job_id)

# Remove the entry for storage being deleted and
# update schedular config file
data['storages'].remove(storage)
with open(config_file, "w") as jsonFile:
json.dump(data, jsonFile)
jsonFile.close()
break
except TypeError:
LOG.error("Failed to unregister performance collection. Error "
"occurred during parsing of config file")
except json.decoder.JSONDecodeError:
msg = ("Failed to unregister performance collection. Not able to "
"open the config file: {0} ".format(config_file))
LOG.error(msg)
except Exception as e:
msg = _('Failed to unregister performance collection. Reason: {0}'
.format(six.text_type(e)))
LOG.error(msg)

@wsgi.response(200)
def get_capabilities(self, req, id):
"""
Expand Down Expand Up @@ -283,3 +257,18 @@ def _set_synced_if_ok(context, storage_id, resource_count):
storage['sync_status'] = resource_count * constants.ResourceSync.START
storage['updated_at'] = current_time
db.storage_update(context, storage['id'], storage)


def _create_performance_monitoring_task(context, storage_id, capabilities):
# Check resource_metric attribute availability and
# check if resource_metric is empty
if 'resource_metrics' not in capabilities \
or not bool(capabilities.get('resource_metrics')):
raise exception.EmptyResourceMetrics()

task = dict()
task.update(storage_id=storage_id)
task.update(args=capabilities.get('resource_metrics'))
task.update(interval=constants.Task.DEFAULT_TASK_INTERVAL)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hope this has to be inn sync with scheduler , better to take from conf file.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes agreed, That's why added as constant. Will fix it in scheduler PR

task.update(method=constants.Task.PERFORMANCE_TASK_METHOD)
db.task_create(context=context, values=task)
37 changes: 1 addition & 36 deletions delfin/common/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,12 @@
stepping stone.

"""
import json
import socket
from delfin import exception

from oslo_config import cfg
from oslo_log import log
from oslo_middleware import cors
from oslo_utils import netutils
from apscheduler.schedulers.background import BackgroundScheduler

LOG = log.getLogger(__name__)

Expand Down Expand Up @@ -130,36 +128,3 @@ def set_middleware_defaults():
'DELETE',
'PATCH']
)


def load_json_file(config_file):
try:
with open(config_file) as f:
data = json.load(f)
return data
except json.decoder.JSONDecodeError as e:
msg = ("{0} file is not correct. Please check the configuration file"
.format(config_file))
LOG.error(msg)
raise exception.InvalidInput(e.msg)
except FileNotFoundError as e:
LOG.error(e)
raise exception.ConfigNotFound(e)


class Scheduler:
__instance = None

@staticmethod
def getInstance():
""" Get instance of scheduler class """
if Scheduler.__instance is None:
Scheduler.__instance = BackgroundScheduler()
return Scheduler.__instance

def __init__(self):
if Scheduler.__instance is not None:
raise Exception("The instance of scheduler class is already"
"running.")
else:
Scheduler.__instance = self
Loading