diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index aec90285161..b007633b68f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -86,6 +86,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Metricbeat* +- Fix Azure Monitor support for multiple aggregation types {issue}39192[39192] {pull}39204[39204] *Osquerybeat* diff --git a/x-pack/metricbeat/module/azure/azure_test.go b/x-pack/metricbeat/module/azure/azure_test.go new file mode 100644 index 00000000000..c3d67525ddb --- /dev/null +++ b/x-pack/metricbeat/module/azure/azure_test.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azure + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGroupMetricsDefinitionsByResourceId(t *testing.T) { + + t.Run("Group metrics definitions by resource ID", func(t *testing.T) { + metrics := []Metric{ + { + ResourceId: "resource-1", + Namespace: "namespace-1", + Names: []string{"metric-1"}, + }, + { + ResourceId: "resource-1", + Namespace: "namespace-1", + Names: []string{"metric-2"}, + }, + { + ResourceId: "resource-1", + Namespace: "namespace-1", + Names: []string{"metric-3"}, + }, + } + + metricsByResourceId := groupMetricsDefinitionsByResourceId(metrics) + + assert.Equal(t, 1, len(metricsByResourceId)) + assert.Equal(t, 3, len(metricsByResourceId["resource-1"])) + }) +} diff --git a/x-pack/metricbeat/module/azure/client_test.go b/x-pack/metricbeat/module/azure/client_test.go index 79b1742ded0..c23326ac82b 100644 --- a/x-pack/metricbeat/module/azure/client_test.go +++ b/x-pack/metricbeat/module/azure/client_test.go @@ -9,10 +9,12 @@ import ( "testing" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" ) var ( @@ -35,6 +37,7 @@ var ( }, }}}, } + countUnit = armmonitor.MetricUnit("Count") ) func mockMapResourceMetrics(client *Client, resources []*armresources.GenericResourceExpanded, resourceConfig ResourceConfig) ([]Metric, error) { @@ -112,4 +115,157 @@ func TestGetMetricValues(t *testing.T) { assert.Equal(t, len(client.ResourceConfigurations.Metrics[0].Values), 0) m.AssertExpectations(t) }) + + t.Run("multiple aggregation types", func(t *testing.T) { + client := NewMockClient() + referenceTime := time.Now().UTC() + client.ResourceConfigurations = ResourceConfiguration{ + Metrics: []Metric{ + { + Namespace: "Microsoft.EventHub/Namespaces", + Names: []string{"ActiveConnections"}, + Aggregations: "Maximum,Minimum,Average", + TimeGrain: "PT1M", + }, + }, + } + + m := &MockService{} + m.On( + "GetMetricValues", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return( + []armmonitor.Metric{{ + ID: to.Ptr("test"), + Name: &armmonitor.LocalizableString{ + Value: to.Ptr("ActiveConnections"), + LocalizedValue: to.Ptr("ActiveConnections"), + }, + Timeseries: []*armmonitor.TimeSeriesElement{{ + Data: []*armmonitor.MetricValue{{ + Average: to.Ptr(1.0), + Maximum: to.Ptr(2.0), + Minimum: to.Ptr(3.0), + TimeStamp: to.Ptr(time.Now()), + }}, + }}, + Type: to.Ptr("Microsoft.Insights/metrics"), + Unit: &countUnit, + DisplayDescription: to.Ptr("Total Active Connections for Microsoft.EventHub."), + ErrorCode: to.Ptr("Success"), + }}, + "PT1M", + nil, + ) + + client.AzureMonitorService = m + mr := MockReporterV2{} + + metricValues := client.GetMetricValues(referenceTime, client.ResourceConfigurations.Metrics, &mr) + + require.Equal(t, len(metricValues), 1) + require.Equal(t, len(metricValues[0].Values), 1) + + assert.Equal(t, *metricValues[0].Values[0].avg, 1.0) + assert.Equal(t, *metricValues[0].Values[0].max, 2.0) + assert.Equal(t, *metricValues[0].Values[0].min, 3.0) + + require.Equal(t, len(client.ResourceConfigurations.Metrics[0].Values), 1) + + m.AssertExpectations(t) + }) + + t.Run("single aggregation types", func(t *testing.T) { + client := NewMockClient() + referenceTime := time.Now().UTC() + timestamp := time.Now().UTC() + client.ResourceConfigurations = ResourceConfiguration{ + Metrics: []Metric{ + { + Namespace: "Microsoft.EventHub/Namespaces", + Names: []string{"ActiveConnections"}, + Aggregations: "Maximum", + TimeGrain: "PT1M", + }, { + Namespace: "Microsoft.EventHub/Namespaces", + Names: []string{"ActiveConnections"}, + Aggregations: "Minimum", + TimeGrain: "PT1M", + }, { + Namespace: "Microsoft.EventHub/Namespaces", + Names: []string{"ActiveConnections"}, + Aggregations: "Average", + TimeGrain: "PT1M", + }, + }, + } + + m := &MockService{} + + x := []struct { + aggregation string + data []*armmonitor.MetricValue + }{ + {aggregation: "Maximum", data: []*armmonitor.MetricValue{{Maximum: to.Ptr(3.0), TimeStamp: to.Ptr(timestamp)}}}, + {aggregation: "Minimum", data: []*armmonitor.MetricValue{{Minimum: to.Ptr(1.0), TimeStamp: to.Ptr(timestamp)}}}, + {aggregation: "Average", data: []*armmonitor.MetricValue{{Average: to.Ptr(2.0), TimeStamp: to.Ptr(timestamp)}}}, + } + + for _, v := range x { + m.On( + "GetMetricValues", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + v.aggregation, + mock.Anything, + ).Return( + []armmonitor.Metric{{ + ID: to.Ptr("test"), + Name: &armmonitor.LocalizableString{ + Value: to.Ptr("ActiveConnections"), + LocalizedValue: to.Ptr("ActiveConnections"), + }, + Timeseries: []*armmonitor.TimeSeriesElement{{ + Data: v.data, + }}, + Type: to.Ptr("Microsoft.Insights/metrics"), + Unit: &countUnit, + DisplayDescription: to.Ptr("Total Active Connections for Microsoft.EventHub."), + ErrorCode: to.Ptr("Success"), + }}, + "PT1M", + nil, + ).Once() + } + + client.AzureMonitorService = m + mr := MockReporterV2{} + + metricValues := client.GetMetricValues(referenceTime, client.ResourceConfigurations.Metrics, &mr) + + require.Equal(t, 3, len(metricValues)) + + require.Equal(t, 1, len(metricValues[0].Values)) + require.Equal(t, 1, len(metricValues[1].Values)) + require.Equal(t, 1, len(metricValues[2].Values)) + + require.NotNil(t, metricValues[0].Values[0].max, "max value is nil") + require.NotNil(t, metricValues[1].Values[0].min, "min value is nil") + require.NotNil(t, metricValues[2].Values[0].avg, "avg value is nil") + + assert.Equal(t, *metricValues[0].Values[0].max, 3.0) + assert.Equal(t, *metricValues[1].Values[0].min, 1.0) + assert.Equal(t, *metricValues[2].Values[0].avg, 2.0) + + m.AssertExpectations(t) + }) } diff --git a/x-pack/metricbeat/module/azure/data.go b/x-pack/metricbeat/module/azure/data.go index c46aee9da24..b2fffb40426 100644 --- a/x-pack/metricbeat/module/azure/data.go +++ b/x-pack/metricbeat/module/azure/data.go @@ -133,41 +133,8 @@ func mapToKeyValuePoints(metrics []Metric) []KeyValuePoint { var points []KeyValuePoint for _, metric := range metrics { for _, value := range metric.Values { - point := KeyValuePoint{ - Timestamp: value.timestamp, - Dimensions: mapstr.M{}, - } - metricName := managePropertyName(value.name) - switch { - case value.min != nil: - point.Key = fmt.Sprintf("%s.%s", metricName, "min") - point.Value = value.min - case value.max != nil: - point.Key = fmt.Sprintf("%s.%s", metricName, "max") - point.Value = value.max - case value.avg != nil: - point.Key = fmt.Sprintf("%s.%s", metricName, "avg") - point.Value = value.avg - case value.total != nil: - point.Key = fmt.Sprintf("%s.%s", metricName, "total") - point.Value = value.total - case value.count != nil: - point.Key = fmt.Sprintf("%s.%s", metricName, "count") - point.Value = value.count - } - - point.Namespace = metric.Namespace - point.ResourceId = metric.ResourceId - point.ResourceSubId = metric.ResourceSubId - point.TimeGrain = metric.TimeGrain - - // The number of dimensions in the metric definition and the - // number of dimensions in the metric values should be the same. - // - // But, since definitions and values are retrieved from different - // API endpoints, we need to make sure that we don't panic if the - // number of dimensions is different. + dimensions := mapstr.M{} if len(metric.Dimensions) == len(value.dimensions) { // Take the dimension name from the metric definition and the // dimension value from the metric value. @@ -180,11 +147,75 @@ func mapToKeyValuePoints(metrics []Metric) []KeyValuePoint { // Dimensions from metric definition and metric value are // not guaranteed to be in the same order, so we need to // find by name the right value for each dimension. - _, _ = point.Dimensions.Put(dim.Name, getDimensionValue(dim.Name, value.dimensions)) + // _, _ = point.Dimensions.Put(dim.Name, getDimensionValue(dim.Name, value.dimensions)) + _, _ = dimensions.Put(dim.Name, getDimensionValue(dim.Name, value.dimensions)) } } - points = append(points, point) + if value.min != nil { + points = append(points, KeyValuePoint{ + Key: fmt.Sprintf("%s.%s", metricName, "min"), + Value: value.min, + Namespace: metric.Namespace, + ResourceId: metric.ResourceId, + ResourceSubId: metric.ResourceSubId, + TimeGrain: metric.TimeGrain, + Dimensions: dimensions, + Timestamp: value.timestamp, + }) + } + + if value.max != nil { + points = append(points, KeyValuePoint{ + Key: fmt.Sprintf("%s.%s", metricName, "max"), + Value: value.max, + Namespace: metric.Namespace, + ResourceId: metric.ResourceId, + ResourceSubId: metric.ResourceSubId, + TimeGrain: metric.TimeGrain, + Dimensions: dimensions, + Timestamp: value.timestamp, + }) + } + + if value.avg != nil { + points = append(points, KeyValuePoint{ + Key: fmt.Sprintf("%s.%s", metricName, "avg"), + Value: value.avg, + Namespace: metric.Namespace, + ResourceId: metric.ResourceId, + ResourceSubId: metric.ResourceSubId, + TimeGrain: metric.TimeGrain, + Dimensions: dimensions, + Timestamp: value.timestamp, + }) + } + + if value.total != nil { + points = append(points, KeyValuePoint{ + Key: fmt.Sprintf("%s.%s", metricName, "total"), + Value: value.total, + Namespace: metric.Namespace, + ResourceId: metric.ResourceId, + ResourceSubId: metric.ResourceSubId, + TimeGrain: metric.TimeGrain, + Dimensions: dimensions, + Timestamp: value.timestamp, + }) + } + + if value.count != nil { + points = append(points, KeyValuePoint{ + Key: fmt.Sprintf("%s.%s", metricName, "count"), + Value: value.count, + Namespace: metric.Namespace, + ResourceId: metric.ResourceId, + ResourceSubId: metric.ResourceSubId, + TimeGrain: metric.TimeGrain, + Dimensions: dimensions, + Timestamp: value.timestamp, + }) + } } } diff --git a/x-pack/metricbeat/module/azure/data_test.go b/x-pack/metricbeat/module/azure/data_test.go index 85b781ed64e..1519f78982d 100644 --- a/x-pack/metricbeat/module/azure/data_test.go +++ b/x-pack/metricbeat/module/azure/data_test.go @@ -62,7 +62,37 @@ func TestMapToKeyValuePoints(t *testing.T) { resourceSubId := "test" timeGrain := "PT1M" - t.Run("test aggregation types", func(t *testing.T) { + t.Run("test single aggregation type (single config)", func(t *testing.T) { + + metrics := []Metric{{ + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "min", + Values: []MetricValue{{name: metricName, min: &minValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }} + + actual := mapToKeyValuePoints(metrics) + + expected := []KeyValuePoint{ + { + Key: fmt.Sprintf("%s.%s", metricName, "min"), + Value: &minValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + } + + assert.Equal(t, expected, actual) + }) + + t.Run("test single aggregation types (multiple configs)", func(t *testing.T) { metrics := []Metric{{ Namespace: namespace, @@ -161,4 +191,79 @@ func TestMapToKeyValuePoints(t *testing.T) { assert.Equal(t, expected, actual) }) + + t.Run("test multiple aggregation types (multiple configs)", func(t *testing.T) { + metrics := []Metric{{ + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "Minimum,Maximum,Average,Total,Count", + Values: []MetricValue{ + {name: metricName, min: &minValue, timestamp: timestamp}, + {name: metricName, max: &maxValue, timestamp: timestamp}, + {name: metricName, avg: &avgValue, timestamp: timestamp}, + {name: metricName, total: &totalValue, timestamp: timestamp}, + {name: metricName, count: &countValue, timestamp: timestamp}, + }, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }} + + actual := mapToKeyValuePoints(metrics) + + expected := []KeyValuePoint{ + { + Key: fmt.Sprintf("%s.%s", metricName, "min"), + Value: &minValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "max"), + Value: &maxValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "avg"), + Value: &avgValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "total"), + Value: &totalValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "count"), + Value: &countValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + } + + assert.Equal(t, expected, actual) + }) } diff --git a/x-pack/metricbeat/module/azure/metric_registry.go b/x-pack/metricbeat/module/azure/metric_registry.go index cdaa9496b5d..c127701c996 100644 --- a/x-pack/metricbeat/module/azure/metric_registry.go +++ b/x-pack/metricbeat/module/azure/metric_registry.go @@ -5,6 +5,7 @@ package azure import ( + "fmt" "strings" "time" @@ -118,8 +119,14 @@ func (m *MetricRegistry) buildMetricKey(metric Metric) string { keyComponents := []string{ metric.Namespace, metric.ResourceId, + metric.Aggregations, + metric.TimeGrain, + strings.Join(metric.Names, ","), + } + + for _, dim := range metric.Dimensions { + keyComponents = append(keyComponents, fmt.Sprintf("%s=%s", dim.Name, dim.Value)) } - keyComponents = append(keyComponents, metric.Names...) return strings.Join(keyComponents, ",") } diff --git a/x-pack/metricbeat/module/azure/metric_registry_test.go b/x-pack/metricbeat/module/azure/metric_registry_test.go index a0ecdc84b85..63984aa6b59 100644 --- a/x-pack/metricbeat/module/azure/metric_registry_test.go +++ b/x-pack/metricbeat/module/azure/metric_registry_test.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -func TestNewMetricRegistry(t *testing.T) { +func TestMetricRegistry(t *testing.T) { logger := logp.NewLogger("test azure monitor") t.Run("Collect metrics with a regular 5 minutes period", func(t *testing.T) { @@ -90,4 +90,140 @@ func TestNewMetricRegistry(t *testing.T) { assert.True(t, needsUpdate, "metric should not need update") }) + + t.Run("Metrics with different aggregation types", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + referenceTime := time.Now().UTC() + lastCollectionAt := referenceTime.Add(-time.Minute * 10) + + metric1 := Metric{ + ResourceId: "test", + Namespace: "test", + Aggregations: "Maximum", + } + metric2 := Metric{ + ResourceId: "test", + Namespace: "test", + Aggregations: "Minimum", + } + + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT5M", + timestamp: lastCollectionAt, + } + + // Update metrics collection info for previous collection + metricRegistry.Update(metric1, metricCollectionInfo) + metricRegistry.Update(metric2, metricCollectionInfo) + + // Update metric info for metric1 + metricRegistry.Update(metric1, MetricCollectionInfo{ + timeGrain: "PT5M", + timestamp: referenceTime, + }) + + // Check if metrics need update + metric1NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric1) + metric2NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric2) + + assert.False(t, metric1NeedsUpdate, "metric should not need update") + assert.True(t, metric2NeedsUpdate, "metric should need update") + }) + + t.Run("Metrics with different dimensions", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + referenceTime := time.Now().UTC() + lastCollectionAt := referenceTime.Add(-time.Minute * 10) + + metric1 := Metric{ + ResourceId: "resource-id-1", + Namespace: "namespace-1", + Names: []string{"metric-name-1"}, + Dimensions: []Dimension{ + {Name: "dimension-1", Value: "*"}, + }, + TimeGrain: "PT1M", + } + metric2 := Metric{ + ResourceId: "resource-id-1", + Namespace: "namespace-1", + Names: []string{"metric-name-1"}, + Dimensions: []Dimension{ + {Name: "dimension-2", Value: "*"}, + }, + TimeGrain: "PT1M", + } + + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT1M", + timestamp: lastCollectionAt, + } + + // Update metrics collection info for previous collection + metricRegistry.Update(metric1, metricCollectionInfo) + metricRegistry.Update(metric2, metricCollectionInfo) + + // Update metric info for metric1 + metricRegistry.Update(metric1, MetricCollectionInfo{ + timeGrain: "PT1M", + timestamp: referenceTime, + }) + + // Check if metrics need update + metric1NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric1) + metric2NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric2) + + assert.False(t, metric1NeedsUpdate, "metric should not need update") + assert.True(t, metric2NeedsUpdate, "metric should need update") + }) + + t.Run("Metrics with different timegrain", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + referenceTime := time.Now().UTC() + lastCollectionAt := referenceTime.Add(-time.Minute * 10) + + metric1 := Metric{ + ResourceId: "resource-id-1", + Namespace: "namespace-1", + Names: []string{"metric-name-1"}, + Dimensions: []Dimension{ + {Name: "dimension-1", Value: "*"}, + }, + TimeGrain: "PT1M", + } + metric2 := Metric{ + ResourceId: "resource-id-1", + Namespace: "namespace-1", + Names: []string{"metric-name-1"}, + Dimensions: []Dimension{ + {Name: "dimension-1", Value: "*"}, + }, + TimeGrain: "PT5M", + } + + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT1M", + timestamp: lastCollectionAt, + } + + // Update metrics collection info for previous collection + metricRegistry.Update(metric1, metricCollectionInfo) + metricRegistry.Update(metric2, metricCollectionInfo) + + // Update metric info for metric1 + metricRegistry.Update(metric1, MetricCollectionInfo{ + timeGrain: "PT1M", + timestamp: referenceTime, + }) + + // Check if metrics need update + metric1NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric1) + metric2NeedsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric2) + + assert.False(t, metric1NeedsUpdate, "metric should not need update") + assert.True(t, metric2NeedsUpdate, "metric should need update") + }) } diff --git a/x-pack/metricbeat/module/azure/mock_service.go b/x-pack/metricbeat/module/azure/mock_service.go index 9626952fa6d..293adc7c9a7 100644 --- a/x-pack/metricbeat/module/azure/mock_service.go +++ b/x-pack/metricbeat/module/azure/mock_service.go @@ -43,7 +43,7 @@ func (client *MockService) GetMetricNamespaces(resourceId string) (armmonitor.Me // GetMetricValues is a mock function for the azure service func (client *MockService) GetMetricValues(resourceId string, namespace string, timegrain string, timespan string, metricNames []string, aggregations string, filter string) ([]armmonitor.Metric, string, error) { - args := client.Called(resourceId, namespace) + args := client.Called(resourceId, namespace, timegrain, timespan, metricNames, aggregations, filter) return args.Get(0).([]armmonitor.Metric), args.String(1), args.Error(2) } diff --git a/x-pack/metricbeat/module/azure/service_interface.go b/x-pack/metricbeat/module/azure/service_interface.go index cb524c7f6ea..75ae48d3d6e 100644 --- a/x-pack/metricbeat/module/azure/service_interface.go +++ b/x-pack/metricbeat/module/azure/service_interface.go @@ -15,5 +15,16 @@ type Service interface { GetResourceDefinitions(id []string, group []string, rType string, query string) ([]*armresources.GenericResourceExpanded, error) GetMetricDefinitionsWithRetry(resourceId string, namespace string) (armmonitor.MetricDefinitionCollection, error) GetMetricNamespaces(resourceId string) (armmonitor.MetricNamespaceCollection, error) - GetMetricValues(resourceId string, namespace string, timegrain string, timespan string, metricNames []string, aggregations string, filter string) ([]armmonitor.Metric, string, error) + // GetMetricValues returns the metric values for the given resource ID, namespace, timegrain, timespan, metricNames, aggregations and filter. + // + // If the timegrain is empty, the default timegrain for the metric is used and returned. + GetMetricValues( + resourceId string, // resourceId is the ID of the resource to query (e.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}") + namespace string, // namespace is the metric namespace to query (e.g. "Microsoft.Compute/virtualMachines") + timegrain string, // timegrain is the timegrain to use for the metric query (e.g. "PT1M"); if empty, returns the default timegrain for the metric. + timespan string, // timespan is the time interval to query (e.g. 2024-04-29T14:03:00Z/2024-04-29T14:04:00Z) + metricNames []string, // metricNames is the list of metric names to query (e.g. ["ServiceApiLatency", "Availability"]) + aggregations string, // aggregations is the comma-separated list of aggregations to use for the metric query (e.g. "Average,Maximum,Minimum") + filter string, // filter is the filter to query for dimensions (e.g. "ActivityType eq '*' AND ActivityName eq '*' AND StatusCode eq '*' AND StatusCodeClass eq '*'") + ) ([]armmonitor.Metric, string, error) }