From 839e31bca043566d945294fff8dc2ff93cfe3ca8 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Thu, 3 Jun 2021 15:20:37 +0530 Subject: [PATCH 01/10] updating common metric spec --- .../schemas/storage_capabilities_schema.py | 150 ++++++------- delfin/tests/unit/api/fakes.py | 211 +++++++++++++++++- 2 files changed, 274 insertions(+), 87 deletions(-) diff --git a/delfin/api/schemas/storage_capabilities_schema.py b/delfin/api/schemas/storage_capabilities_schema.py index a6f817a67..0cbd314de 100644 --- a/delfin/api/schemas/storage_capabilities_schema.py +++ b/delfin/api/schemas/storage_capabilities_schema.py @@ -40,7 +40,7 @@ 'maxLength': 255} }, }, - 'requests': { + 'iops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -67,7 +67,7 @@ 'maxLength': 255} }, }, - 'readRequests': { + 'readIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -76,7 +76,7 @@ 'maxLength': 255} }, }, - 'writeRequests': { + 'writeIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -85,15 +85,7 @@ 'maxLength': 255} }, }, - 'memoryUsage': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, + }, 'additionalProperties': False }, @@ -118,7 +110,7 @@ 'maxLength': 255} }, }, - 'requests': { + 'iops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -145,7 +137,7 @@ 'maxLength': 255} }, }, - 'readRequests': { + 'readIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -154,7 +146,7 @@ 'maxLength': 255} }, }, - 'writeRequests': { + 'writeIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -187,7 +179,7 @@ 'maxLength': 255} }, }, - 'requests': { + 'iops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -196,160 +188,161 @@ 'maxLength': 255} }, }, - 'readResponseTime': { + 'readThroughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeResponseTime': { + 'writeThroughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readThroughput': { + 'readIops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeThroughput': { + 'writeIops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readRequests': { + 'cacheHitRatio': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["%"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeRequests': { + 'readCacheHitRatio': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["%"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - }, - 'additionalProperties': False - }, - 'controller': { - 'type': 'object', - 'properties': { - 'throughput': { + 'writeCacheHitRatio': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'unit': {'type': 'string', 'enum': ["%"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'responseTime': { + 'ioSize': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["KB"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readResponseTime': { + 'readIoSize': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["KB"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeResponseTime': { + 'writeIoSize': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["KB"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'requests': { + + }, + 'additionalProperties': False + }, + 'controller': { + 'type': 'object', + 'properties': { + 'throughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readThroughput': { + 'responseTime': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'unit': {'type': 'string', 'enum': ["ms"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeThroughput': { + 'iops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readRequests': { + 'readThroughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'writeRequests': { + 'writeThroughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'cpuUsage': { + 'readIops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'memoryUsage': { + 'writeIops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["%"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} @@ -379,25 +372,7 @@ 'maxLength': 255} }, }, - 'readResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'writeResponseTime': { - 'type': 'object', - 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, - 'description': {'type': 'string', - 'minLength': 1, - 'maxLength': 255} - }, - }, - 'requests': { + 'iops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -424,7 +399,7 @@ 'maxLength': 255} }, }, - 'readRequests': { + 'readIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -433,7 +408,7 @@ 'maxLength': 255} }, }, - 'writeRequests': { + 'writeIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -466,7 +441,7 @@ 'maxLength': 255} }, }, - 'requests': { + 'iops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -475,16 +450,16 @@ 'maxLength': 255} }, }, - 'serviceTime': { + 'readIops': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["ms"]}, + 'unit': {'type': 'string', 'enum': ["IOPS"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} }, }, - 'readRequests': { + 'writeIops': { 'type': 'object', 'properties': { 'unit': {'type': 'string', 'enum': ["IOPS"]}, @@ -493,10 +468,19 @@ 'maxLength': 255} }, }, - 'writeRequests': { + 'readThroughput': { 'type': 'object', 'properties': { - 'unit': {'type': 'string', 'enum': ["IOPS"]}, + 'unit': {'type': 'string', 'enum': ["MB/s"]}, + 'description': {'type': 'string', + 'minLength': 1, + 'maxLength': 255} + }, + }, + 'writeThroughput': { + 'type': 'object', + 'properties': { + 'unit': {'type': 'string', 'enum': ["MB/s"]}, 'description': {'type': 'string', 'minLength': 1, 'maxLength': 255} diff --git a/delfin/tests/unit/api/fakes.py b/delfin/tests/unit/api/fakes.py index adfd6f0cd..0d2db60ae 100644 --- a/delfin/tests/unit/api/fakes.py +++ b/delfin/tests/unit/api/fakes.py @@ -439,7 +439,7 @@ def fake_get_capabilities(context, storage_id): "description": "Average time taken for an IO " "operation in ms" }, - "requests": { + "iops": { "unit": "IOPS", "description": "Input/output operations per second" }, @@ -453,15 +453,218 @@ def fake_get_capabilities(context, storage_id): "description": "Represents how much data write is " "successfully transferred in MB/s" }, - "readRequests": { + "readIops": { "unit": "IOPS", "description": "Read requests per second" }, - "writeRequests": { + "writeIops": { "unit": "IOPS", "description": "Write requests per second" }, - } + }, + "storagePool": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per" + " second" + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "volume": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per" + " second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + "cacheHitRatio": { + "unit": "%", + "description": "Percentage of io that are cache " + "hits" + }, + "readCacheHitRatio": { + "unit": "%", + "description": "Percentage of read ops that are cache" + " hits" + }, + "writeCacheHitRatio": { + "unit": "%", + "description": "Percentage of write ops that are cache" + " hits" + }, + "ioSize": { + "unit": "KB", + "description": "The average size of IO requests in KB" + }, + "readIoSize": { + "unit": "KB", + "description": "The average size of read IO requests " + "in KB." + }, + "writeIoSize": { + "unit": "KB", + "description": "The average size of read IO requests" + " in KB." + }, + }, + "controller": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per " + "second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per " + "second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "port": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per " + "second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per " + "second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "disk": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per" + " second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per" + " second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + } } From 6062f77fae3086060fd3e4905fecc9fe5a6e51d4 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Fri, 4 Jun 2021 17:55:31 +0530 Subject: [PATCH 02/10] Adding implementaion for resource leval metric collection for fake storage --- delfin/drivers/fake_storage/__init__.py | 280 +++++++++++++++++++++--- 1 file changed, 255 insertions(+), 25 deletions(-) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index e5165e3cb..c1c1beaca 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -13,7 +13,6 @@ # limitations under the License. import random -import datetime import decorator import math import six @@ -55,11 +54,21 @@ MIN_CONTROLLERS, MAX_CONTROLLERS = 1, 5 PAGE_LIMIT = 500 MIN_STORAGE, MAX_STORAGE = 1, 10 -MIN_PERF_VALUES, MAX_PERF_VALUES = 1, 4 MIN_QUOTA, MAX_QUOTA = 1, 100 MIN_FS, MAX_FS = 1, 10 MIN_QTREE, MAX_QTREE = 1, 100 MIN_SHARE, MAX_SHARE = 1, 100 +# Minimum sampling interval +MINIMUM_SAMPLE_DURATION_IN_MS = 5 * 1000 +# count of instances for each resource type +RESOURCE_COUNT_DICT = { + "storage": 1, + "storagePool": 10, + "volume": 1000, + "port": 10, + "controller": 4, + "disk": 10, +} def get_range_val(range_str, t): @@ -468,41 +477,58 @@ def _get_volume_range(self, start, end): volume_list.append(v) return volume_list - def _get_random_performance(self): + def _get_random_performance(self, metric_list, start_time, end_time): def get_random_timestamp_value(): rtv = {} - for i in range(MIN_PERF_VALUES, MAX_PERF_VALUES): - timestamp = int(float(datetime.datetime.now().timestamp() - ) * 1000) + timestamp = start_time + while timestamp < end_time: rtv[timestamp] = random.uniform(1, 100) + timestamp += MINIMUM_SAMPLE_DURATION_IN_MS + return rtv # The sample performance_params after filling looks like, # performance_params = {timestamp1: value1, timestamp2: value2} performance_params = {} - for key in constants.DELFIN_ARRAY_METRICS: + for key in metric_list.keys(): performance_params[key] = get_random_timestamp_value() return performance_params + @wait_random(MIN_WAIT, MAX_WAIT) + def get_resource_perf_metrics(self, storage_id, start_time, end_time, + resource_type, metric_list): + LOG.info("###########collecting metrics for resource %s: from" + " storage %s" % (resource_type, self.storage_id)) + resource_metrics = [] + resource_count = RESOURCE_COUNT_DICT[resource_type] + + for i in range(resource_count): + labels = {'storage_id': storage_id, + 'resource_type': resource_type, + 'resource_id': resource_type + str(i), + 'type': 'RAW'} + fake_metrics = self._get_random_performance(metric_list, + start_time, end_time) + for key in metric_list.keys(): + labels['unit'] = metric_list[key]['unit'] + m = constants.metric_struct(name=key, labels=labels, + values=fake_metrics[key]) + resource_metrics.append(m) + return resource_metrics + @wait_random(MIN_WAIT, MAX_WAIT) def collect_perf_metrics(self, context, storage_id, resource_metrics, start_time, end_time): """Collects performance metric for the given interval""" - rd_array_count = random.randint(MIN_STORAGE, MAX_STORAGE) - LOG.debug("Fake_perf_metrics number for %s: %d" % ( - storage_id, rd_array_count)) - array_metrics = [] - labels = {'storage_id': storage_id, 'resource_type': 'array'} - fake_metrics = self._get_random_performance() - - for _ in range(rd_array_count): - for key in constants.DELFIN_ARRAY_METRICS: - m = constants.metric_struct(name=key, labels=labels, - values=fake_metrics[key]) - array_metrics.append(m) - - return array_metrics + merged_metrics = [] + for key in resource_metrics.keys(): + m = self.get_resource_perf_metrics(storage_id, + start_time, + end_time, key, + resource_metrics[key]) + merged_metrics += m + return merged_metrics @staticmethod def get_capabilities(context): @@ -521,7 +547,7 @@ def get_capabilities(context): "description": "Average time taken for an IO " "operation in ms" }, - "requests": { + "iops": { "unit": "IOPS", "description": "Input/output operations per second" }, @@ -535,14 +561,218 @@ def get_capabilities(context): "description": "Represents how much data write is " "successfully transferred in MB/s" }, - "readRequests": { + "readIops": { "unit": "IOPS", "description": "Read requests per second" }, - "writeRequests": { + "writeIops": { "unit": "IOPS", "description": "Write requests per second" }, - } + }, + "storagePool": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per" + " second" + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "volume": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per" + " second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + "cacheHitRatio": { + "unit": "%", + "description": "Percentage of io that are cache " + "hits" + }, + "readCacheHitRatio": { + "unit": "%", + "description": "Percentage of read ops that are cache" + " hits" + }, + "writeCacheHitRatio": { + "unit": "%", + "description": "Percentage of write ops that are cache" + " hits" + }, + "ioSize": { + "unit": "KB", + "description": "The average size of IO requests in KB" + }, + "readIoSize": { + "unit": "KB", + "description": "The average size of read IO requests " + "in KB." + }, + "writeIoSize": { + "unit": "KB", + "description": "The average size of read IO requests" + " in KB." + }, + }, + "controller": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per " + "second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per " + "second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "port": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per " + "second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per " + "second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per " + "second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + "disk": { + "throughput": { + "unit": "MB/s", + "description": "Total data transferred per second " + }, + "responseTime": { + "unit": "ms", + "description": "Average time taken for an IO " + "operation" + }, + "iops": { + "unit": "IOPS", + "description": "Read and write operations per" + " second" + }, + "readThroughput": { + "unit": "MB/s", + "description": "Total read data transferred per" + " second " + }, + "writeThroughput": { + "unit": "MB/s", + "description": "Total write data transferred per" + " second " + }, + "readIops": { + "unit": "IOPS", + "description": "Read operations per second" + }, + "writeIops": { + "unit": "IOPS", + "description": "Write operations per second" + }, + + }, + } + } From b3f9fd39e9fd1f77ea38b66cf50a68aaff287ca0 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Tue, 15 Jun 2021 20:14:39 +0530 Subject: [PATCH 03/10] Enhancements to prometheus exporter to support new metrics --- delfin/exporter/prometheus/exporter_server.py | 30 ++++-- delfin/exporter/prometheus/prometheus.py | 91 +++++++++++++------ 2 files changed, 88 insertions(+), 33 deletions(-) diff --git a/delfin/exporter/prometheus/exporter_server.py b/delfin/exporter/prometheus/exporter_server.py index ecbd9a95d..7bfae0a9e 100644 --- a/delfin/exporter/prometheus/exporter_server.py +++ b/delfin/exporter/prometheus/exporter_server.py @@ -11,11 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import glob +import os from flask import Flask from oslo_config import cfg import sys +from oslo_log import log + +LOG = log.getLogger(__name__) app = Flask(__name__) @@ -26,9 +30,9 @@ help='The exporter server host ip'), cfg.IntOpt('metric_server_port', default=8195, help='The exporter server port'), - cfg.StrOpt('metrics_cache_file', default='/var/lib/delfin/delfin_exporter' - '.txt', - help='The temp cache file used for persisting metrics'), + cfg.StrOpt('metrics_dir', default='/var/lib/delfin/metrics', + + help='The temp directory to keep incoming metrics'), ] cfg.CONF.register_opts(prometheus_opts, group=grp) cfg.CONF(sys.argv[1:]) @@ -36,9 +40,21 @@ @app.route("/metrics", methods=['GET']) def getfile(): - with open(cfg.CONF.PROMETHEUS_EXPORTER.metrics_cache_file, "r+") as f: - data = f.read() - f.truncate(0) + try: + os.chdir(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir) + except OSError as e: + LOG.error('Error opening metrics folder') + raise Exception(e) + file_list = [] + for file in glob.glob("*.prom"): + file_list.append(file) + data = '' + for file in file_list: + file_name = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir + '/' + file + with open(file_name, "r") as f: + data += f.read() + # Remove a metric file after reading it + os.remove(file_name) return data diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index fce5d518b..1b152002a 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -11,20 +11,29 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import datetime +import os +import pytz from oslo_config import cfg +from oslo_log import log +from tzlocal import get_localzone + +LOG = log.getLogger(__name__) grp = cfg.OptGroup('PROMETHEUS_EXPORTER') prometheus_opts = [ - cfg.StrOpt('metrics_cache_file', default='/var/lib/delfin/delfin_exporter' - '.txt', - help='The temp cache file used for persisting metrics'), + cfg.StrOpt('metrics_dir', default='/var/lib/delfin/metrics', + + help='The temp directory to keep incoming metrics'), + cfg.StrOpt('timezone', + default='local', + help='time zone of prometheus server ' + ), ] cfg.CONF.register_opts(prometheus_opts, group=grp) - """" The metrics received from driver is should be in this format storage_metrics = [Metric(name='response_time', @@ -41,26 +50,47 @@ values={1600998817585: 20.264160223426305})] """ -unit_of_metric = {'response_time': 'ms', 'throughput': 'IOPS', - 'read_throughput': 'IOPS', 'write_throughput': 'IOPS', - 'bandwidth': 'MBps', 'read_bandwidth': 'MBps', - 'write_bandwidth': 'MBps' - } - class PrometheusExporter(object): + def __init__(self): + self.timestamp_offset_ms = self.set_timestamp_offset_from_utc_ms() + self.metrics_dir = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir + + def set_timestamp_offset_from_utc_ms(self): + """Set timestamp offset from utc required for all metrics""" + try: + timez = get_localzone() + if cfg.CONF.PROMETHEUS_EXPORTER.timezone != 'local': + timez = pytz.timezone(cfg.CONF.PROMETHEUS_EXPORTER.timezone) + timez.utcoffset(datetime.datetime.now()) + return int(timez.utcoffset( + datetime.datetime.now()).total_seconds() * 1000) + except Exception: + LOG.error('Error while setting timestamp' + ' offset for prometheus exporter') + # return no offset in case of an error + return 0 + # Print metrics in Prometheus format. - def _write_to_prometheus_format(self, f, metric, labels, values): - f.write("# HELP storage_%s storage metric for %s\n" % (metric, metric)) - f.write("# TYPE storage_%s gauge\n" % metric) + def _write_to_prometheus_format(self, f, metric, + labels, prom_labels, values): + f.write("# HELP %s metric for resource %s and instance %s\n" + % (metric, labels.get('resource_type'), + labels.get('resource_id'))) + f.write("# TYPE %s gauge\n" % metric) for timestamp, value in values.items(): - f.write("storage_%s{%s} %f %d\n" % (metric, labels, - value, timestamp)) + timestamp += self.timestamp_offset_ms + f.write("%s{%s} %f %d\n" % (metric, prom_labels, + value, timestamp)) def push_to_prometheus(self, storage_metrics): - with open(cfg.CONF.PROMETHEUS_EXPORTER.metrics_cache_file, "a+") as f: + time_stamp = str(datetime.datetime.now().timestamp()) + temp_file_name = self.metrics_dir + '/' + time_stamp + ".prom.temp" + actual_file_name = self.metrics_dir + '/' + time_stamp + ".prom" + # make a temp file with current timestamp + with open(temp_file_name, "w") as f: for metric in storage_metrics: name = metric.name labels = metric.labels @@ -69,14 +99,23 @@ def push_to_prometheus(self, storage_metrics): storage_name = labels.get('name') storage_sn = labels.get('serial_number') resource_type = labels.get('resource_type') - unit = unit_of_metric.get(name) + resource_id = labels.get('resource_id') + unit = labels.get('unit') value_type = labels.get('value_type', 'gauge') - storage_labels = ( - "storage_id=\"%s\",storage_name=\"%s\",storage_sn=\"%s\"," - "resource_type=\"%s\", " - "type=\"%s\",unit=\"%s\",value_type=\"%s\"" % - (storage_id, storage_name, storage_sn, resource_type, - 'RAW', unit, value_type)) - - self._write_to_prometheus_format(f, name, storage_labels, + prom_labels = ( + "storage_id=\"%s\",storage_name=\"%s\"," + "storage_sn=\"%s\"," + "resource_type=\"%s\",resource_id=\"%s\"" + "type=\"%s\",unit=\"%s\",value_type=\"%s\"" % + (storage_id, storage_name, storage_sn, resource_type, + resource_id, + 'RAW', unit, value_type)) + name = labels.get('resource_type') + '_' + name + self._write_to_prometheus_format(f, name, labels, prom_labels, values) + # this is done so that the exporter server never see an incomplete file + try: + f.close() + os.renames(temp_file_name, actual_file_name) + except Exception: + LOG.error('Error while renaming the temporary metric file') From 5bae68150d11fd303c11979c7a70f28a52272601 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Wed, 16 Jun 2021 11:56:50 +0530 Subject: [PATCH 04/10] modify indentation --- delfin/exporter/prometheus/prometheus.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index 1b152002a..2c7ba89de 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -103,13 +103,17 @@ def push_to_prometheus(self, storage_metrics): unit = labels.get('unit') value_type = labels.get('value_type', 'gauge') prom_labels = ( - "storage_id=\"%s\",storage_name=\"%s\"," - "storage_sn=\"%s\"," - "resource_type=\"%s\",resource_id=\"%s\"" - "type=\"%s\",unit=\"%s\",value_type=\"%s\"" % - (storage_id, storage_name, storage_sn, resource_type, - resource_id, - 'RAW', unit, value_type)) + "storage_id=\"%s\"," + "storage_name=\"%s\"," + "storage_sn=\"%s\"," + "resource_type=\"%s\"," + "resource_id=\"%s\"," + "type=\"%s\"," + "unit=\"%s\"," + "value_type=\"%s\"" % + (storage_id, storage_name, storage_sn, resource_type, + resource_id, + 'RAW', unit, value_type)) name = labels.get('resource_type') + '_' + name self._write_to_prometheus_format(f, name, labels, prom_labels, values) From 97dd69e0480d7edf1d40dbe1119601aac073e4e9 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Wed, 16 Jun 2021 14:47:32 +0530 Subject: [PATCH 05/10] correcting labels overwritten probelm --- delfin/drivers/fake_storage/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/delfin/drivers/fake_storage/__init__.py b/delfin/drivers/fake_storage/__init__.py index c1c1beaca..716d19611 100644 --- a/delfin/drivers/fake_storage/__init__.py +++ b/delfin/drivers/fake_storage/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import copy import random import decorator import math @@ -513,7 +513,7 @@ def get_resource_perf_metrics(self, storage_id, start_time, end_time, labels['unit'] = metric_list[key]['unit'] m = constants.metric_struct(name=key, labels=labels, values=fake_metrics[key]) - resource_metrics.append(m) + resource_metrics.append(copy.deepcopy(m)) return resource_metrics @wait_random(MIN_WAIT, MAX_WAIT) From 8ebb86baaa2f1d6d2c9a2154aa1532c93b6e4219 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Thu, 17 Jun 2021 12:30:11 +0530 Subject: [PATCH 06/10] Adressing review comments --- delfin/exporter/prometheus/exporter_server.py | 6 ++---- delfin/exporter/prometheus/prometheus.py | 3 ++- delfin/tests/unit/drivers/test_api.py | 16 ++++++++++++++-- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/delfin/exporter/prometheus/exporter_server.py b/delfin/exporter/prometheus/exporter_server.py index 7bfae0a9e..4b47f473a 100644 --- a/delfin/exporter/prometheus/exporter_server.py +++ b/delfin/exporter/prometheus/exporter_server.py @@ -45,16 +45,14 @@ def getfile(): except OSError as e: LOG.error('Error opening metrics folder') raise Exception(e) - file_list = [] - for file in glob.glob("*.prom"): - file_list.append(file) data = '' - for file in file_list: + for file in glob.glob("*.prom"): file_name = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir + '/' + file with open(file_name, "r") as f: data += f.read() # Remove a metric file after reading it os.remove(file_name) + return data diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index 2c7ba89de..a88942a2a 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -54,7 +54,7 @@ class PrometheusExporter(object): def __init__(self): - self.timestamp_offset_ms = self.set_timestamp_offset_from_utc_ms() + self.timestamp_offset_ms = 0 self.metrics_dir = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir def set_timestamp_offset_from_utc_ms(self): @@ -86,6 +86,7 @@ def _write_to_prometheus_format(self, f, metric, value, timestamp)) def push_to_prometheus(self, storage_metrics): + self.timestamp_offset_ms = self.set_timestamp_offset_from_utc_ms() time_stamp = str(datetime.datetime.now().timestamp()) temp_file_name = self.metrics_dir + '/' + time_stamp + ".prom.temp" actual_file_name = self.metrics_dir + '/' + time_stamp + ".prom" diff --git a/delfin/tests/unit/drivers/test_api.py b/delfin/tests/unit/drivers/test_api.py index ec1ada643..f591e8cbd 100644 --- a/delfin/tests/unit/drivers/test_api.py +++ b/delfin/tests/unit/drivers/test_api.py @@ -20,7 +20,7 @@ from delfin import context from delfin import exception -from delfin.common import config # noqa +from delfin.common import config, constants # noqa from delfin.drivers.api import API from delfin.drivers.fake_storage import FakeStorageDriver @@ -74,7 +74,6 @@ def test_init(self): @mock.patch('delfin.db.storage_get_all') def test_discover_storage(self, mock_storage, mock_access_info, mock_storage_create): - # Case: Positive scenario for fake driver discovery storage = copy.deepcopy(STORAGE) storage['id'] = '12345' @@ -350,3 +349,16 @@ def test_get_capabilities(self, driver_manager): self.assertTrue('resource_metrics' in capabilities) driver_manager.assert_called_once() + + @mock.patch('delfin.drivers.manager.DriverManager.get_driver') + def test_collect_perf_metrics(self, driver_manager): + driver_manager.return_value = FakeStorageDriver() + storage_id = '12345' + capabilities = API().get_capabilities(context, storage_id) + + metrics = API().collect_perf_metrics(context, storage_id, + capabilities['resource_metrics'], + 1622808000000, 1622808000001) + self.assertTrue('resource_metrics' in capabilities) + self.assertTrue(True, isinstance(metrics[0], constants.metric_struct)) + self.assertEqual(driver_manager.call_count, 2) From d68f2e970ced32714d581ca644bdfa5f3c5a881b Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Thu, 17 Jun 2021 18:44:01 +0530 Subject: [PATCH 07/10] adding UTs for prometheus exporter --- delfin/exporter/prometheus/exporter_server.py | 4 +-- delfin/exporter/prometheus/prometheus.py | 5 +-- .../unit/exporter/prometheus/__init__.py | 0 .../exporter/prometheus/test_prometheus.py | 35 +++++++++++++++++++ 4 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 delfin/tests/unit/exporter/prometheus/__init__.py create mode 100644 delfin/tests/unit/exporter/prometheus/test_prometheus.py diff --git a/delfin/exporter/prometheus/exporter_server.py b/delfin/exporter/prometheus/exporter_server.py index 4b47f473a..8294caf62 100644 --- a/delfin/exporter/prometheus/exporter_server.py +++ b/delfin/exporter/prometheus/exporter_server.py @@ -24,13 +24,13 @@ app = Flask(__name__) grp = cfg.OptGroup('PROMETHEUS_EXPORTER') - +METRICS_CACHE_DIR = '/var/lib/delfin/metrics' prometheus_opts = [ cfg.StrOpt('metric_server_ip', default='0.0.0.0', help='The exporter server host ip'), cfg.IntOpt('metric_server_port', default=8195, help='The exporter server port'), - cfg.StrOpt('metrics_dir', default='/var/lib/delfin/metrics', + cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR, help='The temp directory to keep incoming metrics'), ] diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index a88942a2a..7f47c44b5 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -22,9 +22,9 @@ LOG = log.getLogger(__name__) grp = cfg.OptGroup('PROMETHEUS_EXPORTER') - +METRICS_CACHE_DIR = '/var/lib/delfin/metrics' prometheus_opts = [ - cfg.StrOpt('metrics_dir', default='/var/lib/delfin/metrics', + cfg.StrOpt('metrics_dir', default=METRICS_CACHE_DIR, help='The temp directory to keep incoming metrics'), cfg.StrOpt('timezone', @@ -34,6 +34,7 @@ ] cfg.CONF.register_opts(prometheus_opts, group=grp) + """" The metrics received from driver is should be in this format storage_metrics = [Metric(name='response_time', diff --git a/delfin/tests/unit/exporter/prometheus/__init__.py b/delfin/tests/unit/exporter/prometheus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/delfin/tests/unit/exporter/prometheus/test_prometheus.py b/delfin/tests/unit/exporter/prometheus/test_prometheus.py new file mode 100644 index 000000000..176fdf3f2 --- /dev/null +++ b/delfin/tests/unit/exporter/prometheus/test_prometheus.py @@ -0,0 +1,35 @@ +# Copyright 2021 The SODA Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import glob +import os +from unittest import TestCase + +from delfin.exporter.prometheus import prometheus +from delfin.common.constants import metric_struct + +fake_metrics = [metric_struct(name='throughput', + labels={'storage_id': '12345', + 'resource_type': 'storage', + 'resource_id': 'storage0', + 'type': 'RAW', 'unit': 'MB/s'}, + values={1622808000000: 61.9388895680357})] + + +class TestPrometheusExporter(TestCase): + + def test_push_to_prometheus(self): + prometheus_obj = prometheus.PrometheusExporter() + prometheus_obj.metrics_dir = os.getcwd() + prometheus_obj.push_to_prometheus(fake_metrics) + self.assertTrue(glob.glob(prometheus_obj.metrics_dir + '/' + '*.prom')) From 3e00bf75e33c232ff21c85342989e418502049c1 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Fri, 18 Jun 2021 14:12:03 +0530 Subject: [PATCH 08/10] using os.join for metrics file path --- delfin/exporter/prometheus/prometheus.py | 30 +++++++++++++----------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index 7f47c44b5..93e0a6480 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -34,7 +34,6 @@ ] cfg.CONF.register_opts(prometheus_opts, group=grp) - """" The metrics received from driver is should be in this format storage_metrics = [Metric(name='response_time', @@ -89,8 +88,10 @@ def _write_to_prometheus_format(self, f, metric, def push_to_prometheus(self, storage_metrics): self.timestamp_offset_ms = self.set_timestamp_offset_from_utc_ms() time_stamp = str(datetime.datetime.now().timestamp()) - temp_file_name = self.metrics_dir + '/' + time_stamp + ".prom.temp" - actual_file_name = self.metrics_dir + '/' + time_stamp + ".prom" + temp_file_name = os.path.join(self.metrics_dir, + time_stamp + ".prom.temp") + actual_file_name = os.path.join(self.metrics_dir, + time_stamp + ".prom") # make a temp file with current timestamp with open(temp_file_name, "w") as f: for metric in storage_metrics: @@ -103,19 +104,20 @@ def push_to_prometheus(self, storage_metrics): resource_type = labels.get('resource_type') resource_id = labels.get('resource_id') unit = labels.get('unit') + type = labels.get('type', 'RAW') value_type = labels.get('value_type', 'gauge') prom_labels = ( - "storage_id=\"%s\"," - "storage_name=\"%s\"," - "storage_sn=\"%s\"," - "resource_type=\"%s\"," - "resource_id=\"%s\"," - "type=\"%s\"," - "unit=\"%s\"," - "value_type=\"%s\"" % - (storage_id, storage_name, storage_sn, resource_type, - resource_id, - 'RAW', unit, value_type)) + "storage_id=\"%s\"," + "storage_name=\"%s\"," + "storage_sn=\"%s\"," + "resource_type=\"%s\"," + "resource_id=\"%s\"," + "type=\"%s\"," + "unit=\"%s\"," + "value_type=\"%s\"" % + (storage_id, storage_name, storage_sn, resource_type, + resource_id, + type, unit, value_type)) name = labels.get('resource_type') + '_' + name self._write_to_prometheus_format(f, name, labels, prom_labels, values) From 5db0b485e37e66ee14d9aaa209773e549fc7cb25 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Fri, 18 Jun 2021 18:26:42 +0530 Subject: [PATCH 09/10] Adding code to check metrics dir exists --- delfin/exporter/prometheus/exporter_server.py | 3 +++ delfin/exporter/prometheus/prometheus.py | 11 +++++++++++ 2 files changed, 14 insertions(+) diff --git a/delfin/exporter/prometheus/exporter_server.py b/delfin/exporter/prometheus/exporter_server.py index 8294caf62..99cb4db4f 100644 --- a/delfin/exporter/prometheus/exporter_server.py +++ b/delfin/exporter/prometheus/exporter_server.py @@ -41,6 +41,9 @@ @app.route("/metrics", methods=['GET']) def getfile(): try: + if not os.path.exists(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir): + LOG.error('No metrics cache folder exists') + return '' os.chdir(cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir) except OSError as e: LOG.error('Error opening metrics folder') diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index 93e0a6480..1b155d2a8 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -57,6 +57,15 @@ def __init__(self): self.timestamp_offset_ms = 0 self.metrics_dir = cfg.CONF.PROMETHEUS_EXPORTER.metrics_dir + def check_metrics_dir_exists(self, directory): + try: + if not os.path.exists(directory): + os.makedirs(directory) + return True + except Exception as e: + LOG.error('Error while creating metrics directory') + return False + def set_timestamp_offset_from_utc_ms(self): """Set timestamp offset from utc required for all metrics""" try: @@ -87,6 +96,8 @@ def _write_to_prometheus_format(self, f, metric, def push_to_prometheus(self, storage_metrics): self.timestamp_offset_ms = self.set_timestamp_offset_from_utc_ms() + if not self.check_metrics_dir_exists(self.metrics_dir): + return time_stamp = str(datetime.datetime.now().timestamp()) temp_file_name = os.path.join(self.metrics_dir, time_stamp + ".prom.temp") From 6a9bbbab04f3e118c546e925b183c685a766e483 Mon Sep 17 00:00:00 2001 From: Najmudheen Date: Fri, 18 Jun 2021 18:43:25 +0530 Subject: [PATCH 10/10] Adding code to check metrics dir exists --- delfin/exporter/prometheus/prometheus.py | 29 +++++++++++++----------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/delfin/exporter/prometheus/prometheus.py b/delfin/exporter/prometheus/prometheus.py index 1b155d2a8..95ca7406a 100644 --- a/delfin/exporter/prometheus/prometheus.py +++ b/delfin/exporter/prometheus/prometheus.py @@ -14,6 +14,7 @@ import datetime import os import pytz +import six from oslo_config import cfg from oslo_log import log @@ -63,7 +64,9 @@ def check_metrics_dir_exists(self, directory): os.makedirs(directory) return True except Exception as e: - LOG.error('Error while creating metrics directory') + msg = six.text_type(e) + LOG.error("Error while creating metrics directory. Reason: %s", + msg) return False def set_timestamp_offset_from_utc_ms(self): @@ -115,20 +118,20 @@ def push_to_prometheus(self, storage_metrics): resource_type = labels.get('resource_type') resource_id = labels.get('resource_id') unit = labels.get('unit') - type = labels.get('type', 'RAW') + m_type = labels.get('type', 'RAW') value_type = labels.get('value_type', 'gauge') prom_labels = ( - "storage_id=\"%s\"," - "storage_name=\"%s\"," - "storage_sn=\"%s\"," - "resource_type=\"%s\"," - "resource_id=\"%s\"," - "type=\"%s\"," - "unit=\"%s\"," - "value_type=\"%s\"" % - (storage_id, storage_name, storage_sn, resource_type, - resource_id, - type, unit, value_type)) + "storage_id=\"%s\"," + "storage_name=\"%s\"," + "storage_sn=\"%s\"," + "resource_type=\"%s\"," + "resource_id=\"%s\"," + "type=\"%s\"," + "unit=\"%s\"," + "value_type=\"%s\"" % + (storage_id, storage_name, storage_sn, resource_type, + resource_id, + m_type, unit, value_type)) name = labels.get('resource_type') + '_' + name self._write_to_prometheus_format(f, name, labels, prom_labels, values)