From 9b5b47028489d2504264ac9f2e6ba5d051a54d4f Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 24 May 2023 16:00:07 -0600 Subject: [PATCH] Add max_scale option Fixes #3130 --- CHANGELOG.md | 3 ++ .../sdk/metrics/_internal/aggregation.py | 15 +++++++- ...xponential_bucket_histogram_aggregation.py | 37 +++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ce6fc4251..0f10920db7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased + +- Add max_scale option to Exponential Bucket Histogram Aggregation [#3323](https://github.com/open-telemetry/opentelemetry-python/pull/3323)) - Use BoundedAttributes instead of raw dict to extract attributes from LogRecord and Support dropped_attributes_count in LogRecord ([#3310](https://github.com/open-telemetry/opentelemetry-python/pull/3310)) + ## Version 1.18.0/0.39b0 (2023-05-04) - Select histogram aggregation with an environment variable diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 7312df1aa7..ae21db907d 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -386,6 +386,7 @@ def __init__( # See the derivation here: # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exponential-bucket-histogram-aggregation) max_size: int = 160, + max_scale: int = 20, ): super().__init__(attributes) # max_size is the maximum capacity of the positive and negative @@ -403,6 +404,7 @@ def __init__( ) self._max_size = max_size + self._max_scale = max_scale # _sum is the sum of all the values aggregated by this aggregator. self._sum = 0 @@ -428,7 +430,14 @@ def __init__( # _mapping corresponds to the current scale, is shared by both the # positive and negative buckets. - self._mapping = LogarithmMapping(LogarithmMapping._max_scale) + + if self._max_scale > 20: + _logger.warning( + "max_scale is set to %s which is " + "larger than the recommended value of 20", + self._max_scale, + ) + self._mapping = LogarithmMapping(self._max_scale) self._instrument_temporality = AggregationTemporality.DELTA self._start_time_unix_nano = start_time_unix_nano @@ -941,9 +950,10 @@ class ExponentialBucketHistogramAggregation(Aggregation): def __init__( self, max_size: int = 160, + max_scale: int = 20, ): - self._max_size = max_size + self._max_scale = max_scale def _create_aggregation( self, @@ -955,6 +965,7 @@ def _create_aggregation( attributes, start_time_unix_nano, max_size=self._max_size, + max_scale=self._max_scale, ) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 65a437bc6d..9bea75e426 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -13,6 +13,7 @@ # limitations under the License. from itertools import permutations +from logging import WARNING from math import ldexp from sys import float_info from types import MethodType @@ -37,6 +38,9 @@ LogarithmMapping, ) from opentelemetry.sdk.metrics._internal.measurement import Measurement +from opentelemetry.sdk.metrics.view import ( + ExponentialBucketHistogramAggregation, +) def get_counts(buckets: Buckets) -> int: @@ -77,6 +81,39 @@ def swap( class TestExponentialBucketHistogramAggregation(TestCase): + @patch("opentelemetry.sdk.metrics._internal.aggregation.LogarithmMapping") + def test_create_aggregation(self, mock_logarithm_mapping): + exponential_bucket_histogram_aggregation = ( + ExponentialBucketHistogramAggregation() + )._create_aggregation(Mock(), Mock(), Mock()) + + self.assertEqual( + exponential_bucket_histogram_aggregation._max_scale, 20 + ) + + mock_logarithm_mapping.assert_called_with(20) + + exponential_bucket_histogram_aggregation = ( + ExponentialBucketHistogramAggregation(max_scale=10) + )._create_aggregation(Mock(), Mock(), Mock()) + + self.assertEqual( + exponential_bucket_histogram_aggregation._max_scale, 10 + ) + + mock_logarithm_mapping.assert_called_with(10) + + with self.assertLogs(level=WARNING): + exponential_bucket_histogram_aggregation = ( + ExponentialBucketHistogramAggregation(max_scale=100) + )._create_aggregation(Mock(), Mock(), Mock()) + + self.assertEqual( + exponential_bucket_histogram_aggregation._max_scale, 100 + ) + + mock_logarithm_mapping.assert_called_with(100) + def assertInEpsilon(self, first, second, epsilon): self.assertLessEqual(first, (second * (1 + epsilon))) self.assertGreaterEqual(first, (second * (1 - epsilon)))