From bc8c4fd655026674933e748f859848e7a59e7ea8 Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Thu, 9 Nov 2023 15:28:32 -0600 Subject: [PATCH 01/27] [processor/transform] convert_sum_to_gauge in metric context --- .../metrics/func_convert_sum_to_gauge.go | 10 +- .../func_convert_sum_to_gauge_datapoint.go | 37 +++++++ ...unc_convert_sum_to_gauge_datapoint_test.go | 99 +++++++++++++++++++ .../metrics/func_convert_sum_to_gauge_test.go | 4 +- .../internal/metrics/functions.go | 21 +++- .../internal/metrics/functions_test.go | 3 +- 6 files changed, 164 insertions(+), 10 deletions(-) create mode 100644 processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go create mode 100644 processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint_test.go diff --git a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go index 5166795b8730..f4763e65c9e5 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go +++ b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge.go @@ -9,19 +9,19 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" ) -func newConvertSumToGaugeFactory() ottl.Factory[ottldatapoint.TransformContext] { +func newConvertSumToGaugeFactory() ottl.Factory[ottlmetric.TransformContext] { return ottl.NewFactory("convert_sum_to_gauge", nil, createConvertSumToGaugeFunction) } -func createConvertSumToGaugeFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { +func createConvertSumToGaugeFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { return convertSumToGauge() } -func convertSumToGauge() (ottl.ExprFunc[ottldatapoint.TransformContext], error) { - return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { +func convertSumToGauge() (ottl.ExprFunc[ottlmetric.TransformContext], error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { metric := tCtx.GetMetric() if metric.Type() != pmetric.MetricTypeSum { return nil, nil diff --git a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go new file mode 100644 index 000000000000..ca2f09c8a121 --- /dev/null +++ b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +func newConvertDatapointSumToGaugeFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_sum_to_gauge", nil, createDatapointConvertSumToGaugeFunction) +} + +func createDatapointConvertSumToGaugeFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + return convertDatapointSumToGauge() +} + +func convertDatapointSumToGauge() (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeSum { + return nil, nil + } + + dps := metric.Sum().DataPoints() + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.SetEmptyGauge().DataPoints()) + + return nil, nil + }, nil +} diff --git a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint_test.go b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint_test.go new file mode 100644 index 000000000000..3c6af3787f2f --- /dev/null +++ b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_datapoint_test.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +func Test_convertDatapointSumToGauge(t *testing.T) { + sumInput := pmetric.NewMetric() + + dp1 := sumInput.SetEmptySum().DataPoints().AppendEmpty() + dp1.SetIntValue(10) + + dp2 := sumInput.Sum().DataPoints().AppendEmpty() + dp2.SetDoubleValue(14.5) + + gaugeInput := pmetric.NewMetric() + gaugeInput.SetEmptyGauge() + + histogramInput := pmetric.NewMetric() + histogramInput.SetEmptyHistogram() + + expoHistogramInput := pmetric.NewMetric() + expoHistogramInput.SetEmptyExponentialHistogram() + + summaryInput := pmetric.NewMetric() + summaryInput.SetEmptySummary() + + tests := []struct { + name string + input pmetric.Metric + want func(pmetric.Metric) + }{ + { + name: "convert sum to gauge", + input: sumInput, + want: func(metric pmetric.Metric) { + sumInput.CopyTo(metric) + + dps := sumInput.Sum().DataPoints() + dps.CopyTo(metric.SetEmptyGauge().DataPoints()) + }, + }, + { + name: "noop for gauge", + input: gaugeInput, + want: func(metric pmetric.Metric) { + gaugeInput.CopyTo(metric) + }, + }, + { + name: "noop for histogram", + input: histogramInput, + want: func(metric pmetric.Metric) { + histogramInput.CopyTo(metric) + }, + }, + { + name: "noop for exponential histogram", + input: expoHistogramInput, + want: func(metric pmetric.Metric) { + expoHistogramInput.CopyTo(metric) + }, + }, + { + name: "noop for summary", + input: summaryInput, + want: func(metric pmetric.Metric) { + summaryInput.CopyTo(metric) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := pmetric.NewMetric() + tt.input.CopyTo(metric) + + ctx := ottldatapoint.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) + + exprFunc, _ := convertDatapointSumToGauge() + + _, err := exprFunc(nil, ctx) + assert.Nil(t, err) + + expected := pmetric.NewMetric() + tt.want(expected) + + assert.Equal(t, expected, metric) + }) + } +} diff --git a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_test.go b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_test.go index 38a597541e77..9b4c7962619f 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_sum_to_gauge_test.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" ) func Test_convertSumToGauge(t *testing.T) { @@ -83,7 +83,7 @@ func Test_convertSumToGauge(t *testing.T) { metric := pmetric.NewMetric() tt.input.CopyTo(metric) - ctx := ottldatapoint.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) + ctx := ottlmetric.NewTransformContext(metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) exprFunc, _ := convertSumToGauge() diff --git a/processor/transformprocessor/internal/metrics/functions.go b/processor/transformprocessor/internal/metrics/functions.go index 708b14489d0b..b5782bb28554 100644 --- a/processor/transformprocessor/internal/metrics/functions.go +++ b/processor/transformprocessor/internal/metrics/functions.go @@ -4,22 +4,34 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" import ( + "go.opentelemetry.io/collector/featuregate" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" ) +var useConvertSumToGaugeMetricContext = featuregate.GlobalRegistry().MustRegister( + "processor.transform.ConvertSumToGaugeMetricContext", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("When enabled will use metric context for convert_sum_to_gauge"), +) + func DataPointFunctions() map[string]ottl.Factory[ottldatapoint.TransformContext] { functions := ottlfuncs.StandardFuncs[ottldatapoint.TransformContext]() datapointFunctions := ottl.CreateFactoryMap[ottldatapoint.TransformContext]( - newConvertSumToGaugeFactory(), newConvertGaugeToSumFactory(), newConvertSummarySumValToSumFactory(), newConvertSummaryCountValToSumFactory(), ) + if !useConvertSumToGaugeMetricContext.IsEnabled() { + f := newConvertDatapointSumToGaugeFactory() + datapointFunctions[f.Name()] = f + } + for k, v := range datapointFunctions { functions[k] = v } @@ -35,9 +47,14 @@ func MetricFunctions() map[string]ottl.Factory[ottlmetric.TransformContext] { newExtractCountMetricFactory(), ) + if useConvertSumToGaugeMetricContext.IsEnabled() { + f := newConvertSumToGaugeFactory() + metricFunctions[f.Name()] = f + } + for k, v := range metricFunctions { functions[k] = v - } + return functions } diff --git a/processor/transformprocessor/internal/metrics/functions_test.go b/processor/transformprocessor/internal/metrics/functions_test.go index fd48a31d2666..cb7dadc86f67 100644 --- a/processor/transformprocessor/internal/metrics/functions_test.go +++ b/processor/transformprocessor/internal/metrics/functions_test.go @@ -16,7 +16,7 @@ import ( func Test_DataPointFunctions(t *testing.T) { expected := ottlfuncs.StandardFuncs[ottldatapoint.TransformContext]() - expected["convert_sum_to_gauge"] = newConvertSumToGaugeFactory() + expected["convert_sum_to_gauge"] = newConvertDatapointSumToGaugeFactory() expected["convert_gauge_to_sum"] = newConvertGaugeToSumFactory() expected["convert_summary_sum_val_to_sum"] = newConvertSummarySumValToSumFactory() expected["convert_summary_count_val_to_sum"] = newConvertSummaryCountValToSumFactory() @@ -31,6 +31,7 @@ func Test_DataPointFunctions(t *testing.T) { func Test_MetricFunctions(t *testing.T) { expected := ottlfuncs.StandardFuncs[ottlmetric.TransformContext]() + expected["convert_sum_to_gauge"] = newConvertSumToGaugeFactory() expected["extract_sum_metric"] = newExtractSumMetricFactory() expected["extract_count_metric"] = newExtractCountMetricFactory() actual := MetricFunctions() From ff90a52c36e46db1097cf89fdab3e57957b7d669 Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Wed, 15 Nov 2023 15:24:11 -0500 Subject: [PATCH 02/27] [processor/transform] convert_gauge_to_sum in metric context --- .../metrics/func_convert_gauge_to_sum.go | 10 +++++----- .../metrics/func_convert_gauge_to_sum_test.go | 4 ++-- .../internal/metrics/functions.go | 17 ++++++++++++----- .../internal/metrics/functions_test.go | 3 ++- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go index 640d26d2f063..3c3a5100dc3c 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" ) type convertGaugeToSumArguments struct { @@ -18,11 +18,11 @@ type convertGaugeToSumArguments struct { Monotonic bool } -func newConvertGaugeToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { +func newConvertGaugeToSumFactory() ottl.Factory[ottlmetric.TransformContext] { return ottl.NewFactory("convert_gauge_to_sum", &convertGaugeToSumArguments{}, createConvertGaugeToSumFunction) } -func createConvertGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { +func createConvertGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottlmetric.TransformContext], error) { args, ok := oArgs.(*convertGaugeToSumArguments) if !ok { @@ -32,7 +32,7 @@ func createConvertGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Argument return convertGaugeToSum(args.StringAggTemp, args.Monotonic) } -func convertGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { +func convertGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottlmetric.TransformContext], error) { var aggTemp pmetric.AggregationTemporality switch stringAggTemp { case "delta": @@ -43,7 +43,7 @@ func convertGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottl return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) } - return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { metric := tCtx.GetMetric() if metric.Type() != pmetric.MetricTypeGauge { return nil, nil diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go index a3ab17d94ce7..feaeb18dc035 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" ) func Test_convertGaugeToSum(t *testing.T) { @@ -115,7 +115,7 @@ func Test_convertGaugeToSum(t *testing.T) { metric := pmetric.NewMetric() tt.input.CopyTo(metric) - ctx := ottldatapoint.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) + ctx := ottlmetric.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) exprFunc, _ := convertGaugeToSum(tt.stringAggTemp, tt.monotonic) diff --git a/processor/transformprocessor/internal/metrics/functions.go b/processor/transformprocessor/internal/metrics/functions.go index b5782bb28554..d6f0db73c9f2 100644 --- a/processor/transformprocessor/internal/metrics/functions.go +++ b/processor/transformprocessor/internal/metrics/functions.go @@ -22,14 +22,17 @@ func DataPointFunctions() map[string]ottl.Factory[ottldatapoint.TransformContext functions := ottlfuncs.StandardFuncs[ottldatapoint.TransformContext]() datapointFunctions := ottl.CreateFactoryMap[ottldatapoint.TransformContext]( - newConvertGaugeToSumFactory(), newConvertSummarySumValToSumFactory(), newConvertSummaryCountValToSumFactory(), ) if !useConvertSumToGaugeMetricContext.IsEnabled() { - f := newConvertDatapointSumToGaugeFactory() - datapointFunctions[f.Name()] = f + for _, f := range []ottl.Factory[ottldatapoint.TransformContext]{ + newConvertDatapointSumToGaugeFactory(), + newConvertDatapointGaugeToSumFactory(), + } { + datapointFunctions[f.Name()] = f + } } for k, v := range datapointFunctions { @@ -48,8 +51,12 @@ func MetricFunctions() map[string]ottl.Factory[ottlmetric.TransformContext] { ) if useConvertSumToGaugeMetricContext.IsEnabled() { - f := newConvertSumToGaugeFactory() - metricFunctions[f.Name()] = f + for _, f := range []ottl.Factory[ottlmetric.TransformContext]{ + newConvertSumToGaugeFactory(), + newConvertGaugeToSumFactory(), + } { + metricFunctions[f.Name()] = f + } } for k, v := range metricFunctions { diff --git a/processor/transformprocessor/internal/metrics/functions_test.go b/processor/transformprocessor/internal/metrics/functions_test.go index cb7dadc86f67..c35a8e4935f3 100644 --- a/processor/transformprocessor/internal/metrics/functions_test.go +++ b/processor/transformprocessor/internal/metrics/functions_test.go @@ -17,7 +17,7 @@ import ( func Test_DataPointFunctions(t *testing.T) { expected := ottlfuncs.StandardFuncs[ottldatapoint.TransformContext]() expected["convert_sum_to_gauge"] = newConvertDatapointSumToGaugeFactory() - expected["convert_gauge_to_sum"] = newConvertGaugeToSumFactory() + expected["convert_gauge_to_sum"] = newConvertDatapointGaugeToSumFactory() expected["convert_summary_sum_val_to_sum"] = newConvertSummarySumValToSumFactory() expected["convert_summary_count_val_to_sum"] = newConvertSummaryCountValToSumFactory() @@ -32,6 +32,7 @@ func Test_DataPointFunctions(t *testing.T) { func Test_MetricFunctions(t *testing.T) { expected := ottlfuncs.StandardFuncs[ottlmetric.TransformContext]() expected["convert_sum_to_gauge"] = newConvertSumToGaugeFactory() + expected["convert_gauge_to_sum"] = newConvertGaugeToSumFactory() expected["extract_sum_metric"] = newExtractSumMetricFactory() expected["extract_count_metric"] = newExtractCountMetricFactory() actual := MetricFunctions() From 1207dab652873050eaad250f74dde2dbefaea3da Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Wed, 15 Nov 2023 18:59:29 -0500 Subject: [PATCH 03/27] rename gate for using metric context for conversion between sum and gauge --- .../transformprocessor/internal/metrics/functions.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/processor/transformprocessor/internal/metrics/functions.go b/processor/transformprocessor/internal/metrics/functions.go index d6f0db73c9f2..5c993ff3d417 100644 --- a/processor/transformprocessor/internal/metrics/functions.go +++ b/processor/transformprocessor/internal/metrics/functions.go @@ -12,10 +12,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" ) -var useConvertSumToGaugeMetricContext = featuregate.GlobalRegistry().MustRegister( - "processor.transform.ConvertSumToGaugeMetricContext", +var useConvertBetweenSumAndGaugeMetricContext = featuregate.GlobalRegistry().MustRegister( + "processor.transform.ConvertBetweenSumAndGaugeMetricContext", featuregate.StageAlpha, - featuregate.WithRegisterDescription("When enabled will use metric context for convert_sum_to_gauge"), + featuregate.WithRegisterDescription("When enabled will use metric context for conversion between sum and gauge"), ) func DataPointFunctions() map[string]ottl.Factory[ottldatapoint.TransformContext] { @@ -26,7 +26,7 @@ func DataPointFunctions() map[string]ottl.Factory[ottldatapoint.TransformContext newConvertSummaryCountValToSumFactory(), ) - if !useConvertSumToGaugeMetricContext.IsEnabled() { + if !useConvertBetweenSumAndGaugeMetricContext.IsEnabled() { for _, f := range []ottl.Factory[ottldatapoint.TransformContext]{ newConvertDatapointSumToGaugeFactory(), newConvertDatapointGaugeToSumFactory(), @@ -50,7 +50,7 @@ func MetricFunctions() map[string]ottl.Factory[ottlmetric.TransformContext] { newExtractCountMetricFactory(), ) - if useConvertSumToGaugeMetricContext.IsEnabled() { + if useConvertBetweenSumAndGaugeMetricContext.IsEnabled() { for _, f := range []ottl.Factory[ottlmetric.TransformContext]{ newConvertSumToGaugeFactory(), newConvertGaugeToSumFactory(), From 7c0098b129246925d01ca34a325462ea5f50c11f Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Wed, 15 Nov 2023 19:04:55 -0500 Subject: [PATCH 04/27] add changelog entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c99ff75a11e3..b71363cfd081 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ If you are looking for developer-facing changes, check out [CHANGELOG-API.md](./ - `signalfxexporter`: Add an option to control the dimension client timeout (#27815) - `signalfxexporter`: Add the build version to the user agent of the SignalFx exporter (#16841) - `splunkentreceiver`: Users can now use auth settings and basicauth extension to connect to their Splunk enterprise deployments (#27026) +- `processor/transform`: Convert between sum and gauge in metric context when alpha feature gate `processor.transform.ConvertBetweenSumAndGaugeMetricContext` enabled (#29091) ### 🧰 Bug fixes 🧰 From 703b612b73089ca678b41c4ed857739526f4978e Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Wed, 15 Nov 2023 19:07:37 -0500 Subject: [PATCH 05/27] add missing files --- .../func_convert_gauge_to_sum_datapoint.go | 62 ++++++++ ...unc_convert_gauge_to_sum_datapoint_test.go | 149 ++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go create mode 100644 processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint_test.go diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go new file mode 100644 index 000000000000..adfe9f37f253 --- /dev/null +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/metrics" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +type convertDatapointGaugeToSumArguments struct { + StringAggTemp string + Monotonic bool +} + +func newConvertDatapointGaugeToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { + return ottl.NewFactory("convert_gauge_to_sum", &convertGaugeToSumArguments{}, createConvertDatapointGaugeToSumFunction) +} + +func createConvertDatapointGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + args, ok := oArgs.(*convertGaugeToSumArguments) + + if !ok { + return nil, fmt.Errorf("ConvertGaugeToSumFactory args must be of type *ConvertGaugeToSumArguments") + } + + return convertDatapointGaugeToSum(args.StringAggTemp, args.Monotonic) +} + +func convertDatapointGaugeToSum(stringAggTemp string, monotonic bool) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + var aggTemp pmetric.AggregationTemporality + switch stringAggTemp { + case "delta": + aggTemp = pmetric.AggregationTemporalityDelta + case "cumulative": + aggTemp = pmetric.AggregationTemporalityCumulative + default: + return nil, fmt.Errorf("unknown aggregation temporality: %s", stringAggTemp) + } + + return func(_ context.Context, tCtx ottldatapoint.TransformContext) (any, error) { + metric := tCtx.GetMetric() + if metric.Type() != pmetric.MetricTypeGauge { + return nil, nil + } + + dps := metric.Gauge().DataPoints() + + metric.SetEmptySum().SetAggregationTemporality(aggTemp) + metric.Sum().SetIsMonotonic(monotonic) + + // Setting the data type removed all the data points, so we must copy them back to the metric. + dps.CopyTo(metric.Sum().DataPoints()) + + return nil, nil + }, nil +} diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint_test.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint_test.go new file mode 100644 index 000000000000..e0a030a3bc8c --- /dev/null +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint_test.go @@ -0,0 +1,149 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" +) + +func Test_convertDatapointGaugeToSum(t *testing.T) { + gaugeInput := pmetric.NewMetric() + + dp1 := gaugeInput.SetEmptyGauge().DataPoints().AppendEmpty() + dp1.SetIntValue(10) + + dp2 := gaugeInput.Gauge().DataPoints().AppendEmpty() + dp2.SetDoubleValue(14.5) + + sumInput := pmetric.NewMetric() + sumInput.SetEmptySum() + + histogramInput := pmetric.NewMetric() + histogramInput.SetEmptyHistogram() + + expoHistogramInput := pmetric.NewMetric() + expoHistogramInput.SetEmptyHistogram() + + summaryInput := pmetric.NewMetric() + summaryInput.SetEmptySummary() + + tests := []struct { + name string + stringAggTemp string + monotonic bool + input pmetric.Metric + want func(pmetric.Metric) + }{ + { + name: "convert gauge to cumulative sum", + stringAggTemp: "cumulative", + monotonic: false, + input: gaugeInput, + want: func(metric pmetric.Metric) { + gaugeInput.CopyTo(metric) + + dps := gaugeInput.Gauge().DataPoints() + + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + metric.Sum().SetIsMonotonic(false) + + dps.CopyTo(metric.Sum().DataPoints()) + }, + }, + { + name: "convert gauge to delta sum", + stringAggTemp: "delta", + monotonic: true, + input: gaugeInput, + want: func(metric pmetric.Metric) { + gaugeInput.CopyTo(metric) + + dps := gaugeInput.Gauge().DataPoints() + + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + metric.Sum().SetIsMonotonic(true) + + dps.CopyTo(metric.Sum().DataPoints()) + }, + }, + { + name: "noop for sum", + stringAggTemp: "delta", + monotonic: true, + input: sumInput, + want: func(metric pmetric.Metric) { + sumInput.CopyTo(metric) + }, + }, + { + name: "noop for histogram", + stringAggTemp: "delta", + monotonic: true, + input: histogramInput, + want: func(metric pmetric.Metric) { + histogramInput.CopyTo(metric) + }, + }, + { + name: "noop for exponential histogram", + stringAggTemp: "delta", + monotonic: true, + input: expoHistogramInput, + want: func(metric pmetric.Metric) { + expoHistogramInput.CopyTo(metric) + }, + }, + { + name: "noop for summary", + stringAggTemp: "delta", + monotonic: true, + input: summaryInput, + want: func(metric pmetric.Metric) { + summaryInput.CopyTo(metric) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := pmetric.NewMetric() + tt.input.CopyTo(metric) + + ctx := ottldatapoint.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) + + exprFunc, _ := convertDatapointGaugeToSum(tt.stringAggTemp, tt.monotonic) + + _, err := exprFunc(nil, ctx) + assert.Nil(t, err) + + expected := pmetric.NewMetric() + tt.want(expected) + + assert.Equal(t, expected, metric) + }) + } +} + +func Test_convertDatapointGaugeToSum_validation(t *testing.T) { + tests := []struct { + name string + stringAggTemp string + }{ + { + name: "invalid aggregation temporality", + stringAggTemp: "not a real aggregation temporality", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := convertDatapointGaugeToSum(tt.stringAggTemp, true) + assert.Error(t, err, "unknown aggregation temporality: not a real aggregation temporality") + }) + } +} From 222a4ad914779cce69b13e17640a21e371877318 Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Fri, 17 Nov 2023 08:12:50 -0500 Subject: [PATCH 06/27] adjust test setup for metric context --- .../internal/metrics/func_convert_gauge_to_sum_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go index feaeb18dc035..61b74bee5b7e 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_test.go @@ -115,7 +115,7 @@ func Test_convertGaugeToSum(t *testing.T) { metric := pmetric.NewMetric() tt.input.CopyTo(metric) - ctx := ottlmetric.NewTransformContext(pmetric.NewNumberDataPoint(), metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) + ctx := ottlmetric.NewTransformContext(metric, pmetric.NewMetricSlice(), pcommon.NewInstrumentationScope(), pcommon.NewResource()) exprFunc, _ := convertGaugeToSum(tt.stringAggTemp, tt.monotonic) From f386c46170e566bf9e90b3c984664e2af2d32b91 Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Fri, 17 Nov 2023 08:35:24 -0500 Subject: [PATCH 07/27] toggle feature gate during test per review feedback --- processor/transformprocessor/go.mod | 1 + processor/transformprocessor/go.sum | 2 ++ .../transformprocessor/internal/metrics/functions_test.go | 3 +++ 3 files changed, 6 insertions(+) diff --git a/processor/transformprocessor/go.mod b/processor/transformprocessor/go.mod index 0cb8dd7eeb0a..9c58dfb31f38 100644 --- a/processor/transformprocessor/go.mod +++ b/processor/transformprocessor/go.mod @@ -32,6 +32,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/processor/transformprocessor/go.sum b/processor/transformprocessor/go.sum index f297157038e5..80befca5105a 100644 --- a/processor/transformprocessor/go.sum +++ b/processor/transformprocessor/go.sum @@ -82,6 +82,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 h1:stB4V0yU6htEVWxoNOVuiIPDUetbRLlpP4m1Rcn03G8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0/go.mod h1:mrkZwYA2MKZaidETgwMffAyPzsLjOq5fEJB58TIXa0I= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= diff --git a/processor/transformprocessor/internal/metrics/functions_test.go b/processor/transformprocessor/internal/metrics/functions_test.go index c35a8e4935f3..95af5438e944 100644 --- a/processor/transformprocessor/internal/metrics/functions_test.go +++ b/processor/transformprocessor/internal/metrics/functions_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" @@ -35,6 +36,8 @@ func Test_MetricFunctions(t *testing.T) { expected["convert_gauge_to_sum"] = newConvertGaugeToSumFactory() expected["extract_sum_metric"] = newExtractSumMetricFactory() expected["extract_count_metric"] = newExtractCountMetricFactory() + + defer testutil.SetFeatureGateForTest(t, useConvertBetweenSumAndGaugeMetricContext, true)() actual := MetricFunctions() require.Equal(t, len(expected), len(actual)) for k := range actual { From d6de55d047c6d5ddd785e228c5c12a1ed40f158d Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Fri, 17 Nov 2023 08:40:38 -0500 Subject: [PATCH 08/27] Replace manual changelog entry with chloggen --- .chloggen/metric-conversion-context.yaml | 27 ++++++++++++++++++++++++ CHANGELOG.md | 1 - 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100755 .chloggen/metric-conversion-context.yaml diff --git a/.chloggen/metric-conversion-context.yaml b/.chloggen/metric-conversion-context.yaml new file mode 100755 index 000000000000..27772a727518 --- /dev/null +++ b/.chloggen/metric-conversion-context.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/transform + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Convert between sum and gauge in metric context when alpha feature gate `processor.transform.ConvertBetweenSumAndGaugeMetricContext` enabled + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [20773] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fd67379151d..b0b31c3fe868 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -210,7 +210,6 @@ If you are looking for developer-facing changes, check out [CHANGELOG-API.md](./ - `signalfxexporter`: Add an option to control the dimension client timeout (#27815) - `signalfxexporter`: Add the build version to the user agent of the SignalFx exporter (#16841) - `splunkentreceiver`: Users can now use auth settings and basicauth extension to connect to their Splunk enterprise deployments (#27026) -- `processor/transform`: Convert between sum and gauge in metric context when alpha feature gate `processor.transform.ConvertBetweenSumAndGaugeMetricContext` enabled (#29091) ### 🧰 Bug fixes 🧰 From 711839534a0c6f31dbf3e041cd7158a47fc35f45 Mon Sep 17 00:00:00 2001 From: Faith Chikwekwe Date: Thu, 16 Nov 2023 10:53:18 -0500 Subject: [PATCH 09/27] feat: add ability to filter and export markers (#27862) **Description:** This add logic to filter logs based on log conditions and sent desired logs as event markers to the honeycomb marker api. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27666 **Testing:** Unit testing for log exporter and config. Added component testing to `otelcontribcol`. **Documentation:** README describing component usage Screenshot of exported markers showing up in Honeycomb Screenshot 2023-11-14 at 1 27 49 PM --------- Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> --- cmd/otelcontribcol/builder-config.yaml | 2 + cmd/otelcontribcol/components.go | 2 + cmd/otelcontribcol/exporters_test.go | 13 + cmd/otelcontribcol/go.mod | 3 + exporter/honeycombmarkerexporter/README.md | 43 ++- exporter/honeycombmarkerexporter/config.go | 50 ++-- .../honeycombmarkerexporter/config_test.go | 44 +-- exporter/honeycombmarkerexporter/factory.go | 11 +- exporter/honeycombmarkerexporter/go.mod | 14 + exporter/honeycombmarkerexporter/go.sum | 27 ++ .../honeycombmarkerexporter/logs_exporter.go | 121 +++++++- .../logs_exporter_test.go | 272 ++++++++++++++++++ .../testdata/config.yaml | 78 +++-- 13 files changed, 551 insertions(+), 129 deletions(-) create mode 100644 exporter/honeycombmarkerexporter/logs_exporter_test.go diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index 7d6f19ad8c01..9349963ff931 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -56,6 +56,7 @@ exporters: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.89.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudpubsubexporter v0.89.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter v0.89.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter v0.89.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter v0.89.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter v0.89.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.89.0 @@ -389,6 +390,7 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver => ../../receiver/googlecloudpubsubreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter => ../../exporter/sumologicexporter - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter => ../../exporter/instanaexporter + - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter => ../../exporter/honeycombmarkerexporter - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver => ../../receiver/otlpjsonfilereceiver - github.com/open-telemetry/opentelemetry-collector-contrib/processor/redactionprocessor => ../../processor/redactionprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling => ../../extension/jaegerremotesampling diff --git a/cmd/otelcontribcol/components.go b/cmd/otelcontribcol/components.go index 449cd50ad7e3..619077fc7b12 100644 --- a/cmd/otelcontribcol/components.go +++ b/cmd/otelcontribcol/components.go @@ -47,6 +47,7 @@ import ( googlecloudexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter" googlecloudpubsubexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudpubsubexporter" googlemanagedprometheusexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter" + honeycombmarkerexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter" influxdbexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter" instanaexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter" kafkaexporter "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" @@ -348,6 +349,7 @@ func components() (otelcol.Factories, error) { googlecloudexporter.NewFactory(), googlecloudpubsubexporter.NewFactory(), googlemanagedprometheusexporter.NewFactory(), + honeycombmarkerexporter.NewFactory(), influxdbexporter.NewFactory(), instanaexporter.NewFactory(), kafkaexporter.NewFactory(), diff --git a/cmd/otelcontribcol/exporters_test.go b/cmd/otelcontribcol/exporters_test.go index ea2443a4456f..c1db9d66ee6c 100644 --- a/cmd/otelcontribcol/exporters_test.go +++ b/cmd/otelcontribcol/exporters_test.go @@ -40,6 +40,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/f5cloudexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" @@ -421,6 +422,18 @@ func TestDefaultExporters(t *testing.T) { exporter: "googlecloudpubsub", skipLifecycle: true, }, + { + exporter: "honeycombmarker", + getConfigFn: func() component.Config { + cfg := expFactories["honeycombmarker"].CreateDefaultConfig().(*honeycombmarkerexporter.Config) + cfg.Endpoint = "http://" + endpoint + // disable queue to validate passing the test data synchronously + cfg.QueueSettings.Enabled = false + cfg.RetrySettings.Enabled = false + return cfg + }, + expectConsumeErr: true, + }, { exporter: "influxdb", getConfigFn: func() component.Config { diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 12a8f49aa01a..d50dd3469d5a 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -32,6 +32,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudpubsubexporter v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter v0.89.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.89.0 @@ -1082,6 +1083,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumol replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter => ../../exporter/instanaexporter +replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter => ../../exporter/honeycombmarkerexporter + replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver => ../../receiver/otlpjsonfilereceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/redactionprocessor => ../../processor/redactionprocessor diff --git a/exporter/honeycombmarkerexporter/README.md b/exporter/honeycombmarkerexporter/README.md index c11819579877..e6a4cfb04c6a 100644 --- a/exporter/honeycombmarkerexporter/README.md +++ b/exporter/honeycombmarkerexporter/README.md @@ -5,38 +5,27 @@ This exporter allows creating markers, via the Honeycomb Markers API, based on t The following configuration options are supported: -* `api_key` (Required): This is the API key (also called Write Key) for your Honeycomb account. -* `api_url` (Required): You can set the hostname to send marker data to. -* `markers` (Required): This specifies the exact configuration to create an event marker. - * `type`: Specifies the marker type. Markers with the same type will appear in the same color in Honeycomb. MarkerType or MarkerColor should be set. - * `color`: Specifies the marker color. Will only be used if MarkerType is not set. - * `messagefield`: This is the attribute that will be used as the message. If necessary the value will be converted to a string. - * `urlfield`: This is the attribute that will be used as the url. If necessary the value will be converted to a string. - * `rules`: This is a list of OTTL rules that determine when to create an event marker. - * `resourceconditions`: A list of ottlresource conditions that determine a match - * `logconditions`: A list of ottllog conditions that determine a match +* `api_key` (Required): This is the API key for your Honeycomb account. +* `api_url` (Required): This sets the hostname to send marker data to. +* `markers` (Required): This is a list of configurations to create an event marker. + * `type` (Required): Specifies the marker type. + * `message_key`: This attribute will be used as the message. It describes the event marker. If necessary the value will be converted to a string. + * `url_key`: This attribute will be used as the url. It can be accessed through the event marker in Honeycomb. If necessary the value will be converted to a string. + * `rules` (Required): This is a list of OTTL rules that determine when to create an event marker. + * `log_conditions` (Required): A list of ottllog conditions that determine a match Example: ```yaml exporters: - honeycomb: - api_key: "my-api-key" - api_url: "https://api.testhost.io" + honeycombmarker: + api_key: "environment-api-key" + api_url: "https://api.honeycomb.io" markers: - - type: "fooType", - messagefield: "test message", - urlfield: "https://api.testhost.io", + - type: "marker-type" + message_key: "marker-message" + url_key: "marker-url" + dataset_slug: "__all__" rules: - - resourceconditions: - - IsMatch(attributes["test"], ".*") - - logconditions: - - body == "test" - - color: "green", - messagefield: "another test message", - urlfield: "https://api.testhost.io", - rules: - - resourceconditions: - - IsMatch(attributes["test"], ".*") - - logconditions: + - log_conditions: - body == "test" ``` \ No newline at end of file diff --git a/exporter/honeycombmarkerexporter/config.go b/exporter/honeycombmarkerexporter/config.go index bc9bf7d2987c..d94268f08f4b 100644 --- a/exporter/honeycombmarkerexporter/config.go +++ b/exporter/honeycombmarkerexporter/config.go @@ -7,11 +7,15 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/exporter/exporterhelper" "go.uber.org/zap" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" ) // Config defines configuration for the Honeycomb Marker exporter. @@ -24,58 +28,60 @@ type Config struct { // Markers is the list of markers to create Markers []Marker `mapstructure:"markers"` + + confighttp.HTTPClientSettings `mapstructure:",squash"` + exporterhelper.QueueSettings `mapstructure:"sending_queue"` + exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` } type Marker struct { - // Type defines the type of Marker. Markers with the same type appear in Honeycomb with the same color + // Type defines the type of Marker. Type string `mapstructure:"type"` - // Color is the color of the Marker. Will only be used if the Type does not already exist. - Color string `mapstructure:"color"` - - // MessageField is the attribute that will be used as the message. + // MessageKey is the attribute that will be used as the message. // If necessary the value will be converted to a string. - MessageField string `mapstructure:"message_field"` + MessageKey string `mapstructure:"message_key"` - // URLField is the attribute that will be used as the url. + // URLKey is the attribute that will be used as the url. // If necessary the value will be converted to a string. - URLField string `mapstructure:"url_field"` + URLKey string `mapstructure:"url_key"` // Rules are the OTTL rules that determine when a piece of telemetry should be turned into a Marker Rules Rules `mapstructure:"rules"` + + // DatasetSlug is the endpoint that specifies the Honeycomb environment + DatasetSlug string `mapstructure:"dataset_slug"` } type Rules struct { - // ResourceConditions is the list of ottlresource conditions that determine a match - ResourceConditions []string `mapstructure:"resource_conditions"` - // LogConditions is the list of ottllog conditions that determine a match LogConditions []string `mapstructure:"log_conditions"` + + logBoolExpr expr.BoolExpr[ottllog.TransformContext] } -var defaultCfg = createDefaultConfig().(*Config) +var _ component.Config = (*Config)(nil) func (cfg *Config) Validate() error { - if cfg == nil { - cfg = defaultCfg - } - if cfg.APIKey == "" { return fmt.Errorf("invalid API Key") } if len(cfg.Markers) != 0 { for _, m := range cfg.Markers { - if len(m.Rules.ResourceConditions) == 0 && len(m.Rules.LogConditions) == 0 { - return fmt.Errorf("no rules supplied for Marker %v", m) + if m.Type == "" { + return fmt.Errorf("marker must have a type %v", m) } - _, err := filterottl.NewBoolExprForResource(m.Rules.ResourceConditions, filterottl.StandardResourceFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) - if err != nil { - return err + if m.DatasetSlug == "" { + return fmt.Errorf("marker must have a dataset slug %v", m) + } + + if len(m.Rules.LogConditions) == 0 { + return fmt.Errorf("marker must have rules %v", m) } - _, err = filterottl.NewBoolExprForLog(m.Rules.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + _, err := filterottl.NewBoolExprForLog(m.Rules.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) if err != nil { return err } diff --git a/exporter/honeycombmarkerexporter/config_test.go b/exporter/honeycombmarkerexporter/config_test.go index 9fbdc724db56..85a33fc701bb 100644 --- a/exporter/honeycombmarkerexporter/config_test.go +++ b/exporter/honeycombmarkerexporter/config_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/exporter/exporterhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter/internal/metadata" ) @@ -26,45 +27,23 @@ func TestLoadConfig(t *testing.T) { expected component.Config }{ { - id: component.NewIDWithName("honeycomb", ""), + id: component.NewIDWithName(metadata.Type, ""), expected: &Config{ - APIKey: "test-apikey", - APIURL: "https://api.testhost.io", + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + APIKey: "test-apikey", + APIURL: "https://api.testhost.io", Markers: []Marker{ { - Type: "fooType", - MessageField: "test message", - URLField: "https://api.testhost.io", + Type: "fooType", + MessageKey: "test message", + URLKey: "https://api.testhost.io", Rules: Rules{ - ResourceConditions: []string{ - `IsMatch(attributes["test"], ".*")`, - }, - LogConditions: []string{ - `body == "test"`, - }, - }, - }, - }, - }, - }, - { - id: component.NewIDWithName("honeycomb", "color_no_type"), - expected: &Config{ - APIKey: "test-apikey", - APIURL: "https://api.testhost.io", - Markers: []Marker{ - { - Color: "green", - MessageField: "test message", - URLField: "https://api.testhost.io", - Rules: Rules{ - ResourceConditions: []string{ - `IsMatch(attributes["test"], ".*")`, - }, LogConditions: []string{ `body == "test"`, }, }, + DatasetSlug: "__all__", }, }, }, @@ -81,6 +60,9 @@ func TestLoadConfig(t *testing.T) { { id: component.NewIDWithName(metadata.Type, "no_markers_supplied"), }, + { + id: component.NewIDWithName(metadata.Type, "no_dataset_slug"), + }, } for _, tt := range tests { diff --git a/exporter/honeycombmarkerexporter/factory.go b/exporter/honeycombmarkerexporter/factory.go index 36e26f4d7e7f..0d4adb2f947b 100644 --- a/exporter/honeycombmarkerexporter/factory.go +++ b/exporter/honeycombmarkerexporter/factory.go @@ -36,12 +36,19 @@ func createLogsExporter( ) (exporter.Logs, error) { cf := cfg.(*Config) - exporter := newLogsExporter(set.Logger, cf) + logsExp, err := newHoneycombLogsExporter(set.TelemetrySettings, cf) + if err != nil { + return nil, err + } return exporterhelper.NewLogsExporter( ctx, set, cfg, - exporter.exportLogs, + logsExp.exportMarkers, + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(cf.RetrySettings), + exporterhelper.WithQueue(cf.QueueSettings), + exporterhelper.WithStart(logsExp.start), ) } diff --git a/exporter/honeycombmarkerexporter/go.mod b/exporter/honeycombmarkerexporter/go.mod index 71db80799c9d..983db14ff335 100644 --- a/exporter/honeycombmarkerexporter/go.mod +++ b/exporter/honeycombmarkerexporter/go.mod @@ -7,6 +7,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.89.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/collector/component v0.89.0 + go.opentelemetry.io/collector/config/confighttp v0.89.0 go.opentelemetry.io/collector/config/configopaque v0.89.0 go.opentelemetry.io/collector/confmap v0.89.0 go.opentelemetry.io/collector/exporter v0.89.0 @@ -18,14 +19,20 @@ require ( github.com/alecthomas/participle/v2 v2.1.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/uuid v1.4.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect @@ -36,13 +43,20 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/cors v1.10.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.89.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.89.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.89.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.89.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.89.0 // indirect + go.opentelemetry.io/collector/config/internal v0.89.0 // indirect go.opentelemetry.io/collector/consumer v0.89.0 // indirect go.opentelemetry.io/collector/extension v0.89.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.89.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 // indirect go.opentelemetry.io/collector/receiver v0.89.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel v1.20.0 // indirect go.opentelemetry.io/otel/metric v1.20.0 // indirect go.opentelemetry.io/otel/trace v1.20.0 // indirect diff --git a/exporter/honeycombmarkerexporter/go.sum b/exporter/honeycombmarkerexporter/go.sum index b95c66eb5230..6aa813c0b68a 100644 --- a/exporter/honeycombmarkerexporter/go.sum +++ b/exporter/honeycombmarkerexporter/go.sum @@ -19,10 +19,17 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -44,6 +51,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -65,6 +74,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= @@ -94,6 +105,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -111,10 +124,20 @@ go.opentelemetry.io/collector v0.89.0 h1:lzpfD9NTHh+1M+qzcoYUH+i2rOgFSox3bGQFUI5 go.opentelemetry.io/collector v0.89.0/go.mod h1:UZUtmQ3kai0CLPWvPmHKpmwqqEoo50n1bwzYYhXX0eA= go.opentelemetry.io/collector/component v0.89.0 h1:PoQJX86BpaSZhzx0deQXHh3QMuW6XKVmolSdTKE506c= go.opentelemetry.io/collector/component v0.89.0/go.mod h1:ZZncnMVaNs++JIbAMiemUIWLZrZ3PMEzI3S3K8pnkws= +go.opentelemetry.io/collector/config/configauth v0.89.0 h1:F082cy1OwrjyucI0wgEO2lRPTWJlgJzM/I5d0BoVgp4= +go.opentelemetry.io/collector/config/configauth v0.89.0/go.mod h1:yRJj70B3MyfbyGuyKO1I+5LtGuvx/WLUh8kuQ/XX6RE= +go.opentelemetry.io/collector/config/configcompression v0.89.0 h1:Z4LG045HwoNqXaibVbAQkcAQGmvY4OHrY4eCppoAzoQ= +go.opentelemetry.io/collector/config/configcompression v0.89.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= +go.opentelemetry.io/collector/config/confighttp v0.89.0 h1:RatLdeZkCu3uLtCjbS8g5Aec2JB3/CSpB6O7P081Bhg= +go.opentelemetry.io/collector/config/confighttp v0.89.0/go.mod h1:R5BIbvqlxSDQGpCRWd2HBZIWijfSIWRpLeSpZjkKkag= go.opentelemetry.io/collector/config/configopaque v0.89.0 h1:Ad6yGcGBHs+J9SNjkedY68JsLZ1vBn4kKzdqKuTCRsE= go.opentelemetry.io/collector/config/configopaque v0.89.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= go.opentelemetry.io/collector/config/configtelemetry v0.89.0 h1:NtRknYDfMgP1r8mnByo6qQQK8IBw/lF9Qke5f7VhGZ0= go.opentelemetry.io/collector/config/configtelemetry v0.89.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= +go.opentelemetry.io/collector/config/configtls v0.89.0 h1:XDeUaTU7LYwnEXz/CSdjbCStJa7n0YR1q0QpK0Vtw9w= +go.opentelemetry.io/collector/config/configtls v0.89.0/go.mod h1:NlE4elqXoyFfzQvYfzgH6uOU1zNVa+5tt6EIq52TJ9Y= +go.opentelemetry.io/collector/config/internal v0.89.0 h1:fs7LJTJd1EF76pjK7ZZZMWNxze0+pDXq3mfRwhm0P0g= +go.opentelemetry.io/collector/config/internal v0.89.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= go.opentelemetry.io/collector/confmap v0.89.0 h1:N5Vg1+FXEFBHHlGIPg4OSlM9uTHjCI7RlWWrKjtOzWQ= go.opentelemetry.io/collector/confmap v0.89.0/go.mod h1:D8FMPvuihtVxwXaz/qp5q9X2lq9l97QyjfsdZD1spmc= go.opentelemetry.io/collector/consumer v0.89.0 h1:MteKhkudX2L1ylbtdpSazO8SwyHSxl6fUEElc0rRLDQ= @@ -123,12 +146,16 @@ go.opentelemetry.io/collector/exporter v0.89.0 h1:8sYpJdKDQ2RuYOPKDsMz/lMJqp4WEW go.opentelemetry.io/collector/exporter v0.89.0/go.mod h1:zR8PFXMHlG0qPIEdRPNaefxDNj4UVP47uJ4vbHs+YME= go.opentelemetry.io/collector/extension v0.89.0 h1:iiaWIPPFqP4T0FSgl6+D1xRUhVnhsk88uk2BxCFqt7E= go.opentelemetry.io/collector/extension v0.89.0/go.mod h1:tBh5wD4AZ3xFO6M1CjkEEx2urexTqcAcgi9cJSPME3E= +go.opentelemetry.io/collector/extension/auth v0.89.0 h1:eo9JoWklZdSManEPLm1LqlwEq5v/YIsOupjZHdRYm3I= +go.opentelemetry.io/collector/extension/auth v0.89.0/go.mod h1:TzC5WYGMgsZvkpYSU1Jlwxh46tSDmWRLFsc9awXaedk= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 h1:iK4muX3KIMqKk0xwKcRzu4ravgCtUdzsvuxxdz6A27g= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018/go.mod h1:xGbRuw+GbutRtVVSEy3YR2yuOlEyiUMhN2M9DJljgqY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 h1:a2IHOZKphRzPagcvOHQHHUE0DlITFSKlIBwaWhPZpl4= go.opentelemetry.io/collector/pdata v1.0.0-rcv0018/go.mod h1:oNIcTRyEJYIfMcRYyyh5lquDU0Vl+ktTL6ka+p+dYvg= go.opentelemetry.io/collector/receiver v0.89.0 h1:wC/FB8e2Ej06jjNW2OiuZoyiSyB8TQNIzYyPlh9oRqI= go.opentelemetry.io/collector/receiver v0.89.0/go.mod h1:Rk7Bkz45fVdrcJaVDsPTnHa97ZfSs1ULO76LXc4kLN0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= go.opentelemetry.io/otel/exporters/prometheus v0.43.0 h1:Skkl6akzvdWweXX6LLAY29tyFSO6hWZ26uDbVGTDXe8= diff --git a/exporter/honeycombmarkerexporter/logs_exporter.go b/exporter/honeycombmarkerexporter/logs_exporter.go index 3dfe36d5d9bc..608fe4ef0c9f 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter.go +++ b/exporter/honeycombmarkerexporter/logs_exporter.go @@ -4,25 +4,134 @@ package honeycombmarkerexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter" import ( + "bytes" "context" + "encoding/json" + "fmt" + "io" + "net/http" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/plog" - "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" ) type honeycombLogsExporter struct { - logger *zap.Logger + set component.TelemetrySettings markers []Marker + client *http.Client + config *Config } -func newLogsExporter(logger *zap.Logger, config *Config) *honeycombLogsExporter { +func newHoneycombLogsExporter(set component.TelemetrySettings, config *Config) (*honeycombLogsExporter, error) { + if config == nil { + return nil, fmt.Errorf("unable to create honeycombLogsExporter without config") + } + + for i, m := range config.Markers { + matchLogConditions, err := filterottl.NewBoolExprForLog(m.Rules.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, set) + if err != nil { + return nil, fmt.Errorf("failed to parse log conditions: %w", err) + } + + config.Markers[i].Rules.logBoolExpr = matchLogConditions + } logsExp := &honeycombLogsExporter{ - logger: logger, + set: set, markers: config.Markers, + config: config, } - return logsExp + return logsExp, nil } -func (e *honeycombLogsExporter) exportLogs(_ context.Context, _ plog.Logs) error { +func (e *honeycombLogsExporter) exportMarkers(ctx context.Context, ld plog.Logs) error { + for i := 0; i < ld.ResourceLogs().Len(); i++ { + rlogs := ld.ResourceLogs().At(i) + for j := 0; j < rlogs.ScopeLogs().Len(); j++ { + slogs := rlogs.ScopeLogs().At(j) + logs := slogs.LogRecords() + for k := 0; k < logs.Len(); k++ { + logRecord := logs.At(k) + tCtx := ottllog.NewTransformContext(logRecord, slogs.Scope(), rlogs.Resource()) + for _, m := range e.markers { + match, err := m.Rules.logBoolExpr.Eval(ctx, tCtx) + if err != nil { + return err + } + if match { + err := e.sendMarker(ctx, m, logRecord) + if err != nil { + return err + } + } + } + + } + } + } + return nil +} + +func (e *honeycombLogsExporter) sendMarker(ctx context.Context, marker Marker, logRecord plog.LogRecord) error { + requestMap := map[string]string{ + "type": marker.Type, + } + + messageValue, found := logRecord.Attributes().Get(marker.MessageKey) + if found { + requestMap["message"] = messageValue.AsString() + } + + URLValue, found := logRecord.Attributes().Get(marker.URLKey) + if found { + requestMap["url"] = URLValue.AsString() + } + + request, err := json.Marshal(requestMap) + if err != nil { + return err + } + + url := fmt.Sprintf("%s/1/markers/%s", e.config.APIURL, marker.DatasetSlug) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Honeycomb-Team", fmt.Sprint(e.config.APIKey)) + + resp, err := e.client.Do(req) + if err != nil { + return fmt.Errorf("failed to send a request: %w", err) + } + + defer func() { + _ = resp.Body.Close() + }() + + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest { + b, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("marker creation failed with %s and unable to read response body: %w", resp.Status, err) + } + return fmt.Errorf("marker creation failed with %s and message: %s", resp.Status, b) + } + + return nil +} + +func (e *honeycombLogsExporter) start(_ context.Context, host component.Host) (err error) { + client, err := e.config.HTTPClientSettings.ToClient(host, e.set) + + if err != nil { + return err + } + + e.client = client + return nil } diff --git a/exporter/honeycombmarkerexporter/logs_exporter_test.go b/exporter/honeycombmarkerexporter/logs_exporter_test.go new file mode 100644 index 000000000000..a7e3cd8aad0f --- /dev/null +++ b/exporter/honeycombmarkerexporter/logs_exporter_test.go @@ -0,0 +1,272 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package honeycombmarkerexporter + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pdata/plog" +) + +func TestExportMarkers(t *testing.T) { + tests := []struct { + name string + config Config + attributeMap map[string]string + }{ + { + name: "all fields", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + MessageKey: "message", + URLKey: "url", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + attributeMap: map[string]string{ + "message": "this is a test message", + "url": "https://api.testhost.io", + "type": "test-type", + }, + }, + { + name: "no message key", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + URLKey: "url", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + attributeMap: map[string]string{ + "url": "https://api.testhost.io", + "type": "test-type", + }, + }, + { + name: "no url", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + MessageKey: "message", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + attributeMap: map[string]string{ + "message": "this is a test message", + "type": "test-type", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + markerServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + decodedBody := map[string]any{} + err := json.NewDecoder(req.Body).Decode(&decodedBody) + + require.NoError(t, err) + + assert.Equal(t, len(decodedBody), len(tt.attributeMap)) + + for attr := range tt.attributeMap { + assert.Equal(t, decodedBody[attr], tt.attributeMap[attr]) + } + assert.Contains(t, req.URL.Path, tt.config.Markers[0].DatasetSlug) + + apiKey := req.Header.Get("X-Honeycomb-Team") + assert.Equal(t, apiKey, string(tt.config.APIKey)) + + rw.WriteHeader(http.StatusAccepted) + })) + defer markerServer.Close() + + config := tt.config + config.APIURL = markerServer.URL + + f := NewFactory() + exp, err := f.CreateLogsExporter(context.Background(), exportertest.NewNopCreateSettings(), &config) + require.NoError(t, err) + + err = exp.Start(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + logs := constructLogs(tt.attributeMap) + err = exp.ConsumeLogs(context.Background(), logs) + assert.NoError(t, err) + }) + } +} + +func constructLogs(attributes map[string]string) plog.Logs { + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + sl := rl.ScopeLogs().AppendEmpty() + l := sl.LogRecords().AppendEmpty() + + l.Body().SetStr("test") + for attr, attrVal := range attributes { + l.Attributes().PutStr(attr, attrVal) + } + return logs +} + +func TestExportMarkers_Error(t *testing.T) { + tests := []struct { + name string + config Config + responseCode int + errorMessage string + }{ + { + name: "unauthorized greater than 400", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + MessageKey: "message", + URLKey: "https://api.testhost.io", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + responseCode: http.StatusUnauthorized, + errorMessage: "marker creation failed with 401", + }, + { + name: "continue less than 200", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + MessageKey: "message", + URLKey: "https://api.testhost.io", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + responseCode: http.StatusSwitchingProtocols, + errorMessage: "marker creation failed with 101", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + markerServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.WriteHeader(tt.responseCode) + })) + defer markerServer.Close() + + config := tt.config + config.APIURL = markerServer.URL + + f := NewFactory() + exp, err := f.CreateLogsExporter(context.Background(), exportertest.NewNopCreateSettings(), &config) + require.NoError(t, err) + + err = exp.Start(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + logs := constructLogs(map[string]string{}) + err = exp.ConsumeLogs(context.Background(), logs) + assert.ErrorContains(t, err, tt.errorMessage) + }) + } +} + +func TestExportMarkers_NoAPICall(t *testing.T) { + tests := []struct { + name string + config Config + }{ + { + name: "all fields", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + MessageKey: "message", + URLKey: "url", + DatasetSlug: "test-dataset", + Rules: Rules{ + LogConditions: []string{ + `body == "foo"`, + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + markerServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + assert.Fail(t, "should not call the markers api") + rw.WriteHeader(http.StatusBadRequest) // 400 + })) + defer markerServer.Close() + + config := tt.config + config.APIURL = markerServer.URL + + f := NewFactory() + exp, err := f.CreateLogsExporter(context.Background(), exportertest.NewNopCreateSettings(), &config) + require.NoError(t, err) + + err = exp.Start(context.Background(), componenttest.NewNopHost()) + assert.NoError(t, err) + + logs := constructLogs(map[string]string{}) + err = exp.ConsumeLogs(context.Background(), logs) + assert.NoError(t, err) + }) + } +} diff --git a/exporter/honeycombmarkerexporter/testdata/config.yaml b/exporter/honeycombmarkerexporter/testdata/config.yaml index 3ca6508c9ffb..b3ec158d70b8 100644 --- a/exporter/honeycombmarkerexporter/testdata/config.yaml +++ b/exporter/honeycombmarkerexporter/testdata/config.yaml @@ -1,78 +1,74 @@ -honeycomb: +honeycombmarker: api_key: "test-apikey" api_url: "https://api.testhost.io" + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 1000 + retry_on_failure: + enabled: true + initial_interval: 5000000000 + randomization_factor: 0.5 + multiplier: 1.5 + max_interval: 30000000000 + max_elapsed_time: 300000000000 markers: - type: "fooType" - message_field: "test message" - url_field: "https://api.testhost.io" + message_key: "test message" + url_key: "https://api.testhost.io" + dataset_slug: "__all__" rules: - resource_conditions: - - IsMatch(attributes["test"], ".*") log_conditions: - body == "test" -honeycomb/color_no_type: - api_key: "test-apikey" - api_url: "https://api.testhost.io" - markers: - - color: "green" - message_field: "test message" - url_field: "https://api.testhost.io" - rules: - resource_conditions: - - IsMatch(attributes["test"], ".*") - log_conditions: - - body == "test" - -honeycomb/bad_syntax_log: +honeycombmarker/bad_syntax_log: api_key: "test-apikey" api_url: "https://api.testhost.io" markers: - type: "fooType" - message_field: "test message" - url_field: "https://api.testhost.io" + message_key: "test message" + url_key: "https://api.testhost.io" + dataset_slug: "__all__" rules: log_conditions: - body == "test" - set(attributes["body"], body) -honeycomb/no_conditions: +honeycombmarker/no_conditions: api_key: "test-apikey" api_url: "https://api.testhost.io" markers: - - color: "green" - message_field: "test message" - url_field: "https://api.testhost.io" + - type: "test-apikey" + message_key: "test message" + url_key: "https://api.testhost.io" + dataset_slug: "__all__" rules: -honeycomb/no_api_key: +honeycombmarker/no_api_key: api_key: "" api_url: "https://api.testhost.io" markers: - type: "fooType" - message_field: "test message" - url_field: "https://api.testhost.io" + message_key: "test message" + url_key: "https://api.testhost.io" + dataset_slug: "__all__" rules: - resource_conditions: - - IsMatch(attributes["test"], ".*") log_conditions: - body == "test" -honeycomb/no_api_url: +honeycombmarker/no_markers_supplied: + api_key: "test-apikey" + api_url: "https://api.testhost.io" + markers: + +honeycombmarker/no_dataset_slug: api_key: "test-apikey" - api_url: "" + api_url: "https://api.testhost.io" markers: - type: "fooType" - message_field: "test message" - url_field: "https://api.testhost.io" + message_key: "test message" + url_key: "https://api.testhost.io" rules: - resource_conditions: - - IsMatch(attributes["test"], ".*") log_conditions: - body == "test" -honeycomb/no_markers_supplied: - api_key: "test-apikey" - api_url: "https://api.testhost.io" - markers: - From e02abd813e30c40ddd71eea8608e22dee4e97eb5 Mon Sep 17 00:00:00 2001 From: Andreas Thaler Date: Thu, 16 Nov 2023 18:28:57 +0100 Subject: [PATCH 10/27] [receiver/receivercreator] Add support for k8s service discovery (#29022) **Description:** - Added a new watch to the k8s_observer extension for k8s services, which can be enabled using a new flag "observe_services". - Discovered entities are transformed into a new endpoint type `k8s.service`. - Adjusted the receivercreator to support the new type `k8s.service` **Link to tracking Issue:** [#29021](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29021) **Testing:** Added unit tests analogue to the available tests **Documentation:** Adjusted readme's of k8s_observer and receivercreator. Added description of new flags and typers. **Note:** Current implementation is working as described in the linked ticket. Please check the potential discussion points mentioned in the ticket: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29021#issuecomment-1801290614 --------- Co-authored-by: Antoine Toulme --- ...creator-support-k8s-service-discovery.yaml | 27 ++++++ extension/observer/endpoints.go | 37 ++++++++ extension/observer/endpoints_test.go | 38 +++++++- extension/observer/k8sobserver/README.md | 4 +- extension/observer/k8sobserver/config.go | 6 +- extension/observer/k8sobserver/config_test.go | 11 +-- extension/observer/k8sobserver/extension.go | 47 ++++++---- .../observer/k8sobserver/extension_test.go | 88 +++++++++++++++++++ extension/observer/k8sobserver/handler.go | 21 +++++ .../observer/k8sobserver/handler_test.go | 61 +++++++++++++ .../observer/k8sobserver/k8s_fixtures_test.go | 30 +++++++ .../observer/k8sobserver/service_endpoint.go | 40 +++++++++ .../k8sobserver/service_endpoint_test.go | 31 +++++++ .../observer/k8sobserver/testdata/config.yaml | 2 + receiver/receivercreator/README.md | 31 ++++++- receiver/receivercreator/config.go | 2 +- receiver/receivercreator/config_test.go | 11 +-- receiver/receivercreator/factory.go | 3 + receiver/receivercreator/fixtures_test.go | 19 ++++ receiver/receivercreator/rules.go | 2 +- receiver/receivercreator/rules_test.go | 1 + receiver/receivercreator/testdata/config.yaml | 2 + 22 files changed, 482 insertions(+), 32 deletions(-) create mode 100644 .chloggen/receivercreator-support-k8s-service-discovery.yaml create mode 100644 extension/observer/k8sobserver/service_endpoint.go create mode 100644 extension/observer/k8sobserver/service_endpoint_test.go diff --git a/.chloggen/receivercreator-support-k8s-service-discovery.yaml b/.chloggen/receivercreator-support-k8s-service-discovery.yaml new file mode 100644 index 000000000000..d6b1a4fd25f7 --- /dev/null +++ b/.chloggen/receivercreator-support-k8s-service-discovery.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receivercreator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added support for discovery of endpoints based on K8s services + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29022] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: By discovering endpoints based on K8s services, a dynamic probing of K8s service leveraging for example the httpcheckreceiver get enabled + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/extension/observer/endpoints.go b/extension/observer/endpoints.go index 7754841bc758..89a57b255a89 100644 --- a/extension/observer/endpoints.go +++ b/extension/observer/endpoints.go @@ -23,6 +23,8 @@ const ( PortType EndpointType = "port" // PodType is a pod endpoint. PodType EndpointType = "pod" + // K8sServiceType is a service endpoint. + K8sServiceType EndpointType = "k8s.service" // K8sNodeType is a Kubernetes Node endpoint. K8sNodeType EndpointType = "k8s.node" // HostPortType is a hostport endpoint. @@ -34,6 +36,7 @@ const ( var ( _ EndpointDetails = (*Pod)(nil) _ EndpointDetails = (*Port)(nil) + _ EndpointDetails = (*K8sService)(nil) _ EndpointDetails = (*K8sNode)(nil) _ EndpointDetails = (*HostPort)(nil) _ EndpointDetails = (*Container)(nil) @@ -92,6 +95,40 @@ func (e Endpoint) equals(other Endpoint) bool { } } +// K8sService is a discovered k8s service. +type K8sService struct { + // Name of the service. + Name string + // UID is the unique ID in the cluster for the service. + UID string + // Labels is a map of user-specified metadata. + Labels map[string]string + // Annotations is a map of user-specified metadata. + Annotations map[string]string + // Namespace must be unique for services with same name. + Namespace string + // ClusterIP is the IP under which the service is reachable within the cluster. + ClusterIP string + // ServiceType is the type of the service: ClusterIP, NodePort, LoadBalancer, ExternalName + ServiceType string +} + +func (s *K8sService) Env() EndpointEnv { + return map[string]any{ + "uid": s.UID, + "name": s.Name, + "labels": s.Labels, + "annotations": s.Annotations, + "namespace": s.Namespace, + "cluster_ip": s.ClusterIP, + "service_type": s.ServiceType, + } +} + +func (s *K8sService) Type() EndpointType { + return K8sServiceType +} + // Pod is a discovered k8s pod. type Pod struct { // Name of the pod. diff --git a/extension/observer/endpoints_test.go b/extension/observer/endpoints_test.go index b81e9440049b..c90be0900d34 100644 --- a/extension/observer/endpoints_test.go +++ b/extension/observer/endpoints_test.go @@ -49,7 +49,7 @@ func TestEndpointEnv(t *testing.T) { }, }, { - name: "K8s port", + name: "K8s pod port", endpoint: Endpoint{ ID: EndpointID("port_id"), Target: "192.68.73.2", @@ -90,6 +90,42 @@ func TestEndpointEnv(t *testing.T) { "transport": ProtocolTCP, }, }, + { + name: "Service", + endpoint: Endpoint{ + ID: EndpointID("service_id"), + Target: "service.namespace", + Details: &K8sService{ + Name: "service_name", + UID: "service-uid", + Labels: map[string]string{ + "label_key": "label_val", + }, + Annotations: map[string]string{ + "annotation_1": "value_1", + }, + Namespace: "service-namespace", + ServiceType: "LoadBalancer", + ClusterIP: "192.68.73.2", + }, + }, + want: EndpointEnv{ + "type": "k8s.service", + "endpoint": "service.namespace", + "id": "service_id", + "name": "service_name", + "labels": map[string]string{ + "label_key": "label_val", + }, + "annotations": map[string]string{ + "annotation_1": "value_1", + }, + "uid": "service-uid", + "namespace": "service-namespace", + "cluster_ip": "192.68.73.2", + "service_type": "LoadBalancer", + }, + }, { name: "Host port", endpoint: Endpoint{ diff --git a/extension/observer/k8sobserver/README.md b/extension/observer/k8sobserver/README.md index 3f31e393ed18..4945ff0460ef 100644 --- a/extension/observer/k8sobserver/README.md +++ b/extension/observer/k8sobserver/README.md @@ -15,7 +15,7 @@ The `k8s_observer` is a [Receiver Creator](../../../receiver/receivercreator/README.md)-compatible "watch observer" that will detect and report -Kubernetes pod, port, and node endpoints via the Kubernetes API. +Kubernetes pod, port, service and node endpoints via the Kubernetes API. ## Example Config @@ -26,6 +26,7 @@ extensions: node: ${env:K8S_NODE_NAME} observe_pods: true observe_nodes: true + observe_services: true receivers: receiver_creator: @@ -71,3 +72,4 @@ All fields are optional. | node | string | | The node name to limit the discovery of pod, port, and node endpoints. Providing no value (the default) results in discovering endpoints for all available nodes. | | observe_pods | bool | `true` | Whether to report observer pod and port endpoints. If `true` and `node` is specified it will only discover pod and port endpoints whose `spec.nodeName` matches the provided node name. If `true` and `node` isn't specified, it will discover all available pod and port endpoints. Please note that Collector connectivity to pods from other nodes is dependent on your cluster configuration and isn't guaranteed. | | observe_nodes | bool | `false` | Whether to report observer k8s.node endpoints. If `true` and `node` is specified it will only discover node endpoints whose `metadata.name` matches the provided node name. If `true` and `node` isn't specified, it will discover all available node endpoints. Please note that Collector connectivity to nodes is dependent on your cluster configuration and isn't guaranteed.| +| observe_services | bool | `false` | Whether to report observer k8s.service endpoints.| diff --git a/extension/observer/k8sobserver/config.go b/extension/observer/k8sobserver/config.go index 8dbfa38efaea..9f202be4c246 100644 --- a/extension/observer/k8sobserver/config.go +++ b/extension/observer/k8sobserver/config.go @@ -34,12 +34,14 @@ type Config struct { // it will only discover node endpoints whose `metadata.name` matches the provided node name. If `true` and // Node isn't specified, it will discover all available node endpoints. `false` by default. ObserveNodes bool `mapstructure:"observe_nodes"` + // ObserveServices determines whether to report observer service and port endpoints. `false` by default. + ObserveServices bool `mapstructure:"observe_services"` } // Validate checks if the extension configuration is valid func (cfg *Config) Validate() error { - if !cfg.ObservePods && !cfg.ObserveNodes { - return fmt.Errorf("one of observe_pods and observe_nodes must be true") + if !cfg.ObservePods && !cfg.ObserveNodes && !cfg.ObserveServices { + return fmt.Errorf("one of observe_pods, observe_nodes and observe_services must be true") } return nil } diff --git a/extension/observer/k8sobserver/config_test.go b/extension/observer/k8sobserver/config_test.go index 28db85945589..20f29407a4da 100644 --- a/extension/observer/k8sobserver/config_test.go +++ b/extension/observer/k8sobserver/config_test.go @@ -39,10 +39,11 @@ func TestLoadConfig(t *testing.T) { { id: component.NewIDWithName(metadata.Type, "observe-all"), expected: &Config{ - Node: "", - APIConfig: k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeNone}, - ObservePods: true, - ObserveNodes: true, + Node: "", + APIConfig: k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeNone}, + ObservePods: true, + ObserveNodes: true, + ObserveServices: true, }, }, { @@ -51,7 +52,7 @@ func TestLoadConfig(t *testing.T) { }, { id: component.NewIDWithName(metadata.Type, "invalid_no_observing"), - expectedErr: "one of observe_pods and observe_nodes must be true", + expectedErr: "one of observe_pods, observe_nodes and observe_services must be true", }, } for _, tt := range tests { diff --git a/extension/observer/k8sobserver/extension.go b/extension/observer/k8sobserver/extension.go index 4ee3579e920e..80f4a4ad3efd 100644 --- a/extension/observer/k8sobserver/extension.go +++ b/extension/observer/k8sobserver/extension.go @@ -25,13 +25,14 @@ var _ observer.Observable = (*k8sObserver)(nil) type k8sObserver struct { *observer.EndpointsWatcher - telemetry component.TelemetrySettings - podListerWatcher cache.ListerWatcher - nodeListerWatcher cache.ListerWatcher - handler *handler - once *sync.Once - stop chan struct{} - config *Config + telemetry component.TelemetrySettings + podListerWatcher cache.ListerWatcher + serviceListerWatcher cache.ListerWatcher + nodeListerWatcher cache.ListerWatcher + handler *handler + once *sync.Once + stop chan struct{} + config *Config } // Start will populate the cache.SharedInformers for pods and nodes as configured and run them as goroutines. @@ -52,6 +53,14 @@ func (k *k8sObserver) Start(_ context.Context, _ component.Host) error { } go podInformer.Run(k.stop) } + if k.serviceListerWatcher != nil { + k.telemetry.Logger.Debug("creating and starting service informer") + serviceInformer := cache.NewSharedInformer(k.serviceListerWatcher, &v1.Service{}, 0) + if _, err := serviceInformer.AddEventHandler(k.handler); err != nil { + k.telemetry.Logger.Error("error adding event handler to service informer", zap.Error(err)) + } + go serviceInformer.Run(k.stop) + } if k.nodeListerWatcher != nil { k.telemetry.Logger.Debug("creating and starting node informer") nodeInformer := cache.NewSharedInformer(k.nodeListerWatcher, &v1.Node{}, 0) @@ -90,6 +99,13 @@ func newObserver(config *Config, set extension.CreateSettings) (extension.Extens podListerWatcher = cache.NewListWatchFromClient(restClient, "pods", v1.NamespaceAll, podSelector) } + var serviceListerWatcher cache.ListerWatcher + if config.ObserveServices { + var serviceSelector = fields.Everything() + set.Logger.Debug("observing services") + serviceListerWatcher = cache.NewListWatchFromClient(restClient, "services", v1.NamespaceAll, serviceSelector) + } + var nodeListerWatcher cache.ListerWatcher if config.ObserveNodes { var nodeSelector fields.Selector @@ -103,14 +119,15 @@ func newObserver(config *Config, set extension.CreateSettings) (extension.Extens } h := &handler{idNamespace: set.ID.String(), endpoints: &sync.Map{}, logger: set.TelemetrySettings.Logger} obs := &k8sObserver{ - EndpointsWatcher: observer.NewEndpointsWatcher(h, time.Second, set.TelemetrySettings.Logger), - telemetry: set.TelemetrySettings, - podListerWatcher: podListerWatcher, - nodeListerWatcher: nodeListerWatcher, - stop: make(chan struct{}), - config: config, - handler: h, - once: &sync.Once{}, + EndpointsWatcher: observer.NewEndpointsWatcher(h, time.Second, set.TelemetrySettings.Logger), + telemetry: set.TelemetrySettings, + podListerWatcher: podListerWatcher, + serviceListerWatcher: serviceListerWatcher, + nodeListerWatcher: nodeListerWatcher, + stop: make(chan struct{}), + config: config, + handler: h, + once: &sync.Once{}, } return obs, nil diff --git a/extension/observer/k8sobserver/extension_test.go b/extension/observer/k8sobserver/extension_test.go index 726ec58382e6..028849a23ea6 100644 --- a/extension/observer/k8sobserver/extension_test.go +++ b/extension/observer/k8sobserver/extension_test.go @@ -40,6 +40,94 @@ func TestNewExtension(t *testing.T) { require.NotNil(t, ext) } +func TestExtensionObserveServices(t *testing.T) { + factory := NewFactory() + config := factory.CreateDefaultConfig().(*Config) + mockServiceHost(t, config) + + set := extensiontest.NewNopCreateSettings() + set.ID = component.NewID(metadata.Type) + ext, err := newObserver(config, set) + require.NoError(t, err) + require.NotNil(t, ext) + + obs := ext.(*k8sObserver) + serviceListerWatcher := framework.NewFakeControllerSource() + obs.serviceListerWatcher = serviceListerWatcher + + serviceListerWatcher.Add(serviceWithClusterIP) + + require.NoError(t, ext.Start(context.Background(), componenttest.NewNopHost())) + + sink := &endpointSink{} + obs.ListAndWatch(sink) + + requireSink(t, sink, func() bool { + return len(sink.added) == 1 + }) + + assert.Equal(t, observer.Endpoint{ + ID: "k8s_observer/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{ + "env": "prod", + }, + ClusterIP: "1.2.3.4", + ServiceType: "ClusterIP", + }, + }, sink.added[0]) + + serviceListerWatcher.Modify(serviceWithClusterIPV2) + + requireSink(t, sink, func() bool { + return len(sink.changed) == 1 + }) + + assert.Equal(t, observer.Endpoint{ + ID: "k8s_observer/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{ + "env": "prod", + "service-version": "2", + }, + ClusterIP: "1.2.3.4", + ServiceType: "ClusterIP", + }, + }, sink.changed[0]) + + serviceListerWatcher.Delete(serviceWithClusterIPV2) + + requireSink(t, sink, func() bool { + return len(sink.removed) == 1 + }) + + assert.Equal(t, observer.Endpoint{ + ID: "k8s_observer/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{ + "env": "prod", + "service-version": "2", + }, + ClusterIP: "1.2.3.4", + ServiceType: "ClusterIP", + }, + }, sink.removed[0]) + + require.NoError(t, ext.Shutdown(context.Background())) +} + func TestExtensionObservePods(t *testing.T) { factory := NewFactory() config := factory.CreateDefaultConfig().(*Config) diff --git a/extension/observer/k8sobserver/handler.go b/extension/observer/k8sobserver/handler.go index d39dabbafe26..859738a5c9c0 100644 --- a/extension/observer/k8sobserver/handler.go +++ b/extension/observer/k8sobserver/handler.go @@ -47,6 +47,8 @@ func (h *handler) OnAdd(objectInterface any, _ bool) { switch object := objectInterface.(type) { case *v1.Pod: endpoints = convertPodToEndpoints(h.idNamespace, object) + case *v1.Service: + endpoints = convertServiceToEndpoints(h.idNamespace, object) case *v1.Node: endpoints = append(endpoints, convertNodeToEndpoint(h.idNamespace, object)) default: // unsupported @@ -67,6 +69,7 @@ func (h *handler) OnUpdate(oldObjectInterface, newObjectInterface any) { case *v1.Pod: newPod, ok := newObjectInterface.(*v1.Pod) if !ok { + h.logger.Warn("skip updating endpoint for pod as the update is of different type", zap.Any("oldPod", oldObjectInterface), zap.Any("newObject", newObjectInterface)) return } for _, e := range convertPodToEndpoints(h.idNamespace, oldObject) { @@ -76,9 +79,23 @@ func (h *handler) OnUpdate(oldObjectInterface, newObjectInterface any) { newEndpoints[e.ID] = e } + case *v1.Service: + newService, ok := newObjectInterface.(*v1.Service) + if !ok { + h.logger.Warn("skip updating endpoint for service as the update is of different type", zap.Any("oldService", oldObjectInterface), zap.Any("newObject", newObjectInterface)) + return + } + for _, e := range convertServiceToEndpoints(h.idNamespace, oldObject) { + oldEndpoints[e.ID] = e + } + for _, e := range convertServiceToEndpoints(h.idNamespace, newService) { + newEndpoints[e.ID] = e + } + case *v1.Node: newNode, ok := newObjectInterface.(*v1.Node) if !ok { + h.logger.Warn("skip updating endpoint for node as the update is of different type", zap.Any("oldNode", oldObjectInterface), zap.Any("newObject", newObjectInterface)) return } oldEndpoint := convertNodeToEndpoint(h.idNamespace, oldObject) @@ -144,6 +161,10 @@ func (h *handler) OnDelete(objectInterface any) { if object != nil { endpoints = convertPodToEndpoints(h.idNamespace, object) } + case *v1.Service: + if object != nil { + endpoints = convertServiceToEndpoints(h.idNamespace, object) + } case *v1.Node: if object != nil { endpoints = append(endpoints, convertNodeToEndpoint(h.idNamespace, object)) diff --git a/extension/observer/k8sobserver/handler_test.go b/extension/observer/k8sobserver/handler_test.go index 793800907567..402b5fcc4213 100644 --- a/extension/observer/k8sobserver/handler_test.go +++ b/extension/observer/k8sobserver/handler_test.go @@ -104,6 +104,67 @@ func TestPodEndpointsChanged(t *testing.T) { }, th.ListEndpoints()) } +func TestServiceEndpointsAdded(t *testing.T) { + th := newTestHandler() + th.OnAdd(serviceWithClusterIP, true) + assert.ElementsMatch(t, []observer.Endpoint{ + { + ID: "test-1/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{"env": "prod"}, + ServiceType: "ClusterIP", + ClusterIP: "1.2.3.4", + }, + }}, th.ListEndpoints()) +} + +func TestServiceEndpointsRemoved(t *testing.T) { + th := newTestHandler() + th.OnAdd(serviceWithClusterIP, true) + th.OnDelete(serviceWithClusterIP) + assert.Empty(t, th.ListEndpoints()) +} + +func TestServiceEndpointsChanged(t *testing.T) { + th := newTestHandler() + // Nothing changed. + th.OnUpdate(serviceWithClusterIP, serviceWithClusterIP) + require.Empty(t, th.ListEndpoints()) + + // Labels changed. + changedLabels := serviceWithClusterIP.DeepCopy() + changedLabels.Labels["new-label"] = "value" + th.OnUpdate(serviceWithClusterIP, changedLabels) + + endpoints := th.ListEndpoints() + require.ElementsMatch(t, + []observer.EndpointID{"test-1/service-1-UID"}, + []observer.EndpointID{endpoints[0].ID}, + ) + + // Running state changed, one added and one removed. + updatedService := serviceWithClusterIP.DeepCopy() + updatedService.Labels["updated-label"] = "true" + th.OnUpdate(serviceWithClusterIP, updatedService) + require.ElementsMatch(t, []observer.Endpoint{ + { + ID: "test-1/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{"env": "prod", "updated-label": "true"}, + ServiceType: "ClusterIP", + ClusterIP: "1.2.3.4", + }}, + }, th.ListEndpoints()) +} + func TestNodeEndpointsAdded(t *testing.T) { th := newTestHandler() th.OnAdd(node1V1, true) diff --git a/extension/observer/k8sobserver/k8s_fixtures_test.go b/extension/observer/k8sobserver/k8s_fixtures_test.go index 10153c4d6a52..4f0acd4aa273 100644 --- a/extension/observer/k8sobserver/k8s_fixtures_test.go +++ b/extension/observer/k8sobserver/k8s_fixtures_test.go @@ -104,6 +104,36 @@ func pointerBool(val bool) *bool { return &val } +// newService is a helper function for creating Services for testing. +func newService(name string) *v1.Service { + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: name, + UID: types.UID(name + "-UID"), + Labels: map[string]string{ + "env": "prod", + }, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + ClusterIP: "1.2.3.4", + }, + } + + return service +} + +var serviceWithClusterIP = func() *v1.Service { + return newService("service-1") +}() + +var serviceWithClusterIPV2 = func() *v1.Service { + service := serviceWithClusterIP.DeepCopy() + service.Labels["service-version"] = "2" + return service +}() + // newNode is a helper function for creating Nodes for testing. func newNode(name, hostname string) *v1.Node { return &v1.Node{ diff --git a/extension/observer/k8sobserver/service_endpoint.go b/extension/observer/k8sobserver/service_endpoint.go new file mode 100644 index 000000000000..ee8957cd3ef4 --- /dev/null +++ b/extension/observer/k8sobserver/service_endpoint.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package k8sobserver // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver" + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer" +) + +// convertServiceToEndpoints converts a service instance into a slice of endpoints. The endpoints +// include the service itself only. +func convertServiceToEndpoints(idNamespace string, service *v1.Service) []observer.Endpoint { + serviceID := observer.EndpointID(fmt.Sprintf("%s/%s", idNamespace, service.UID)) + + serviceDetails := observer.K8sService{ + UID: string(service.UID), + Annotations: service.Annotations, + Labels: service.Labels, + Name: service.Name, + Namespace: service.Namespace, + ClusterIP: service.Spec.ClusterIP, + ServiceType: string(service.Spec.Type), + } + + endpoints := []observer.Endpoint{{ + ID: serviceID, + Target: generateServiceTarget(&serviceDetails), + Details: &serviceDetails, + }} + + return endpoints +} + +func generateServiceTarget(service *observer.K8sService) string { + return fmt.Sprintf("%s.%s.svc.cluster.local", service.Name, service.Namespace) +} diff --git a/extension/observer/k8sobserver/service_endpoint_test.go b/extension/observer/k8sobserver/service_endpoint_test.go new file mode 100644 index 000000000000..391fd448b633 --- /dev/null +++ b/extension/observer/k8sobserver/service_endpoint_test.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package k8sobserver // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver" + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer" +) + +func TestServiceObjectToEndpoint(t *testing.T) { + expectedEndpoints := []observer.Endpoint{ + { + ID: "namespace/service-1-UID", + Target: "service-1.default.svc.cluster.local", + Details: &observer.K8sService{ + Name: "service-1", + Namespace: "default", + UID: "service-1-UID", + Labels: map[string]string{"env": "prod"}, + ServiceType: "ClusterIP", + ClusterIP: "1.2.3.4", + }}, + } + + endpoints := convertServiceToEndpoints("namespace", serviceWithClusterIP) + require.Equal(t, expectedEndpoints, endpoints) +} diff --git a/extension/observer/k8sobserver/testdata/config.yaml b/extension/observer/k8sobserver/testdata/config.yaml index b1f1205cce8c..8e6163829025 100644 --- a/extension/observer/k8sobserver/testdata/config.yaml +++ b/extension/observer/k8sobserver/testdata/config.yaml @@ -6,8 +6,10 @@ k8s_observer/observe-all: auth_type: none observe_nodes: true observe_pods: true + observe_services: true k8s_observer/invalid_auth: auth_type: not a real auth type k8s_observer/invalid_no_observing: observe_nodes: false observe_pods: false + observe_services: false diff --git a/receiver/receivercreator/README.md b/receiver/receivercreator/README.md index 2ffa14f7723e..c8e59cfee632 100644 --- a/receiver/receivercreator/README.md +++ b/receiver/receivercreator/README.md @@ -122,6 +122,12 @@ Note that the backticks below are not typos--they indicate the value is set dyna None +`type == "k8s.service"` + +| Resource Attribute | Default | +|--------------------|-------------------| +| k8s.namespace.name | \`namespace\` | + `type == "k8s.node"` | Resource Attribute | Default | @@ -145,7 +151,7 @@ Similar to the per-endpoint type `resource_attributes` described above but for i ## Rule Expressions -Each rule must start with `type == ("pod"|"port"|"hostport"|"container"|"k8s.node") &&` such that the rule matches +Each rule must start with `type == ("pod"|"port"|"hostport"|"container"|"k8s.service"|"k8s.node") &&` such that the rule matches only one endpoint type. Depending on the type of endpoint the rule is targeting it will have different variables available. @@ -204,6 +210,20 @@ targeting it will have different variables available. | transport | Transport protocol used by the endpoint (TCP or UDP) | | labels | User-specified metadata labels on the container | +### Kubernetes Service + +| Variable | Description | +|----------------|-------------------------------------------------------------------| +| type | `"k8s.service"` | +| id | ID of source endpoint | +| name | The name of the Kubernetes service | +| namespace | The namespace of the service | +| uid | The unique ID for the service | +| labels | The map of labels set on the service | +| annotations | The map of annotations set on the service | +| service_type | The type of the kubernetes service: ClusterIP, NodePort, LoadBalancer, ExternalName | +| cluster_ip | The cluster IP assigned to the service | + ### Kubernetes Node | Variable | Description | @@ -290,6 +310,15 @@ receivers: - container - pod - node + receivers: + httpcheck: + # Configure probing if standard prometheus annotations are set on the pod. + rule: type == "k8s.service" && annotations["prometheus.io/probe"] == "true" + config: + targets: + - endpoint: 'http://`endpoint`:`"prometheus.io/port" in annotations ? annotations["prometheus.io/port"] : 9090``"prometheus.io/path" in annotations ? annotations["prometheus.io/path"] : "/health"`' + method: GET + collection_interval: 10s processors: exampleprocessor: diff --git a/receiver/receivercreator/config.go b/receiver/receivercreator/config.go index 68deedf82abe..771f7085efa7 100644 --- a/receiver/receivercreator/config.go +++ b/receiver/receivercreator/config.go @@ -92,7 +92,7 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { for endpointType := range cfg.ResourceAttributes { switch endpointType { - case observer.ContainerType, observer.HostPortType, observer.K8sNodeType, observer.PodType, observer.PortType: + case observer.ContainerType, observer.K8sServiceType, observer.HostPortType, observer.K8sNodeType, observer.PodType, observer.PortType: default: return fmt.Errorf("resource attributes for unsupported endpoint type %q", endpointType) } diff --git a/receiver/receivercreator/config_test.go b/receiver/receivercreator/config_test.go index 2062a93c8c6f..64abb7db65ef 100644 --- a/receiver/receivercreator/config_test.go +++ b/receiver/receivercreator/config_test.go @@ -109,11 +109,12 @@ func TestLoadConfig(t *testing.T) { component.NewIDWithName("mock_observer", "with_name"), }, ResourceAttributes: map[observer.EndpointType]map[string]string{ - observer.ContainerType: {"container.key": "container.value"}, - observer.PodType: {"pod.key": "pod.value"}, - observer.PortType: {"port.key": "port.value"}, - observer.HostPortType: {"hostport.key": "hostport.value"}, - observer.K8sNodeType: {"k8s.node.key": "k8s.node.value"}, + observer.ContainerType: {"container.key": "container.value"}, + observer.PodType: {"pod.key": "pod.value"}, + observer.PortType: {"port.key": "port.value"}, + observer.HostPortType: {"hostport.key": "hostport.value"}, + observer.K8sServiceType: {"k8s.service.key": "k8s.service.value"}, + observer.K8sNodeType: {"k8s.node.key": "k8s.node.value"}, }, }, }, diff --git a/receiver/receivercreator/factory.go b/receiver/receivercreator/factory.go index a8d52d79c277..26c35128a8e7 100644 --- a/receiver/receivercreator/factory.go +++ b/receiver/receivercreator/factory.go @@ -39,6 +39,9 @@ func createDefaultConfig() component.Config { conventions.AttributeK8SPodUID: "`uid`", conventions.AttributeK8SNamespaceName: "`namespace`", }, + observer.K8sServiceType: map[string]string{ + conventions.AttributeK8SNamespaceName: "`namespace`", + }, observer.PortType: map[string]string{ conventions.AttributeK8SPodName: "`pod.name`", conventions.AttributeK8SPodUID: "`pod.uid`", diff --git a/receiver/receivercreator/fixtures_test.go b/receiver/receivercreator/fixtures_test.go index 9931965ddf45..069604d70344 100644 --- a/receiver/receivercreator/fixtures_test.go +++ b/receiver/receivercreator/fixtures_test.go @@ -26,6 +26,25 @@ var podEndpoint = observer.Endpoint{ Details: &pod, } +var service = observer.K8sService{ + UID: "uid-1", + Namespace: "default", + Name: "service-1", + Labels: map[string]string{ + "app": "redis2", + "region": "west-1", + }, + Annotations: map[string]string{ + "scrape": "true", + }, +} + +var serviceEndpoint = observer.Endpoint{ + ID: "service-1", + Target: "localhost", + Details: &service, +} + var portEndpoint = observer.Endpoint{ ID: "port-1", Target: "localhost:1234", diff --git a/receiver/receivercreator/rules.go b/receiver/receivercreator/rules.go index 1ff394a4e6bf..8262adeec7c1 100644 --- a/receiver/receivercreator/rules.go +++ b/receiver/receivercreator/rules.go @@ -22,7 +22,7 @@ type rule struct { // ruleRe is used to verify the rule starts type check. var ruleRe = regexp.MustCompile( - fmt.Sprintf(`^type\s*==\s*(%q|%q|%q|%q|%q)`, observer.PodType, observer.PortType, observer.HostPortType, observer.ContainerType, observer.K8sNodeType), + fmt.Sprintf(`^type\s*==\s*(%q|%q|%q|%q|%q|%q)`, observer.PodType, observer.K8sServiceType, observer.PortType, observer.HostPortType, observer.ContainerType, observer.K8sNodeType), ) // newRule creates a new rule instance. diff --git a/receiver/receivercreator/rules_test.go b/receiver/receivercreator/rules_test.go index b2e0f65ffb0e..4ecd0c8f85f8 100644 --- a/receiver/receivercreator/rules_test.go +++ b/receiver/receivercreator/rules_test.go @@ -28,6 +28,7 @@ func Test_ruleEval(t *testing.T) { {"basic port", args{`type == "port" && name == "http" && pod.labels["app"] == "redis"`, portEndpoint}, true, false}, {"basic hostport", args{`type == "hostport" && port == 1234 && process_name == "splunk"`, hostportEndpoint}, true, false}, {"basic pod", args{`type == "pod" && labels["region"] == "west-1"`, podEndpoint}, true, false}, + {"basic service", args{`type == "k8s.service" && labels["region"] == "west-1"`, serviceEndpoint}, true, false}, {"annotations", args{`type == "pod" && annotations["scrape"] == "true"`, podEndpoint}, true, false}, {"basic container", args{`type == "container" && labels["region"] == "east-1"`, containerEndpoint}, true, false}, {"basic k8s.node", args{`type == "k8s.node" && kubelet_endpoint_port == 10250`, k8sNodeEndpoint}, true, false}, diff --git a/receiver/receivercreator/testdata/config.yaml b/receiver/receivercreator/testdata/config.yaml index 0b5b42b9516d..f0632e39de08 100644 --- a/receiver/receivercreator/testdata/config.yaml +++ b/receiver/receivercreator/testdata/config.yaml @@ -25,5 +25,7 @@ receiver_creator/1: port.key: port.value hostport: hostport.key: hostport.value + k8s.service: + k8s.service.key: k8s.service.value k8s.node: k8s.node.key: k8s.node.value From 988196b62f428759fd594bc6e9d61278c0f25b74 Mon Sep 17 00:00:00 2001 From: Etienne Pelletier Date: Thu, 16 Nov 2023 10:04:58 -0800 Subject: [PATCH 11/27] feat: add IsDouble function (#29076) **Description:** Adds new a new `IsDouble` function to facilitate type checking. Most useful when checking the type of a body to determine if it needs to be parsed or not. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27895 **Testing:** Added unit test **Documentation:** Updated the func readme. Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> --- ...iennep.issue-27895-IsDouble-converter.yaml | 27 ++++++ pkg/ottl/ottlfuncs/README.md | 17 ++++ pkg/ottl/ottlfuncs/func_is_double.go | 45 ++++++++++ pkg/ottl/ottlfuncs/func_is_double_test.go | 84 +++++++++++++++++++ pkg/ottl/ottlfuncs/functions.go | 1 + 5 files changed, 174 insertions(+) create mode 100755 .chloggen/etiennep.issue-27895-IsDouble-converter.yaml create mode 100644 pkg/ottl/ottlfuncs/func_is_double.go create mode 100644 pkg/ottl/ottlfuncs/func_is_double_test.go diff --git a/.chloggen/etiennep.issue-27895-IsDouble-converter.yaml b/.chloggen/etiennep.issue-27895-IsDouble-converter.yaml new file mode 100755 index 000000000000..aa0d5e34c999 --- /dev/null +++ b/.chloggen/etiennep.issue-27895-IsDouble-converter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new IsDouble function to facilitate type checking. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27895] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/pkg/ottl/ottlfuncs/README.md b/pkg/ottl/ottlfuncs/README.md index 554829b6b177..acf2ed70658a 100644 --- a/pkg/ottl/ottlfuncs/README.md +++ b/pkg/ottl/ottlfuncs/README.md @@ -298,6 +298,7 @@ Available Converters: - [Duration](#duration) - [Int](#int) - [IsBool](#isbool) +- [IsDouble](#isdouble) - [IsMap](#ismap) - [IsMatch](#ismatch) - [IsString](#isstring) @@ -507,6 +508,22 @@ Examples: - `IsBool(attributes["any key"])` +### IsDouble + +`IsDouble(value)` + +The `IsDouble` Converter returns true if the given value is a double. + +The `value` is either a path expression to a telemetry field to retrieve, or a literal. + +If `value` is a `float64` or a `pcommon.ValueTypeDouble` then returns `true`, otherwise returns `false`. + +Examples: + +- `IsDouble(body)` + +- `IsDouble(attributes["maybe a double"])` + ### IsMap `IsMap(value)` diff --git a/pkg/ottl/ottlfuncs/func_is_double.go b/pkg/ottl/ottlfuncs/func_is_double.go new file mode 100644 index 000000000000..5ae4f4ac31c4 --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_is_double.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type IsDoubleArguments[K any] struct { + Target ottl.FloatGetter[K] +} + +func NewIsDoubleFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("IsDouble", &IsDoubleArguments[K]{}, createIsDoubleFunction[K]) +} + +func createIsDoubleFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*IsDoubleArguments[K]) + + if !ok { + return nil, fmt.Errorf("IsDoubleFactory args must be of type *IsDoubleArguments[K]") + } + + return isDouble(args.Target), nil +} + +// nolint:errorlint +func isDouble[K any](target ottl.FloatGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + _, err := target.Get(ctx, tCtx) + // Use type assertion because we don't want to check wrapped errors + switch err.(type) { + case ottl.TypeError: + return false, nil + case nil: + return true, nil + default: + return false, err + } + } +} diff --git a/pkg/ottl/ottlfuncs/func_is_double_test.go b/pkg/ottl/ottlfuncs/func_is_double_test.go new file mode 100644 index 000000000000..b4268024b03e --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_is_double_test.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +func Test_IsDouble(t *testing.T) { + tests := []struct { + name string + value any + expected bool + }{ + { + name: "float64", + value: float64(2.7), + expected: true, + }, + { + name: "float64 without decimal", + value: float64(55), + expected: true, + }, + { + name: "an integer", + value: int64(333), + expected: false, + }, + { + name: "ValueTypeDouble", + value: pcommon.NewValueDouble(5.5), + expected: true, + }, + { + name: "not a number", + value: "string", + expected: false, + }, + { + name: "ValueTypeSlice", + value: pcommon.NewValueSlice(), + expected: false, + }, + { + name: "nil", + value: nil, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + exprFunc := isDouble[any](&ottl.StandardFloatGetter[any]{ + Getter: func(context.Context, any) (any, error) { + return tt.value, nil + }, + }) + result, err := exprFunc(context.Background(), nil) + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +// nolint:errorlint +func Test_IsDouble_Error(t *testing.T) { + exprFunc := isString[any](&ottl.StandardStringGetter[any]{ + Getter: func(context.Context, any) (any, error) { + return nil, ottl.TypeError("") + }, + }) + result, err := exprFunc(context.Background(), nil) + assert.Equal(t, false, result) + assert.Error(t, err) + _, ok := err.(ottl.TypeError) + assert.False(t, ok) +} diff --git a/pkg/ottl/ottlfuncs/functions.go b/pkg/ottl/ottlfuncs/functions.go index e892135c45e8..10125e47c6a6 100644 --- a/pkg/ottl/ottlfuncs/functions.go +++ b/pkg/ottl/ottlfuncs/functions.go @@ -43,6 +43,7 @@ func converters[K any]() []ottl.Factory[K] { NewHoursFactory[K](), NewIntFactory[K](), NewIsBoolFactory[K](), + NewIsDoubleFactory[K](), NewIsMapFactory[K](), NewIsMatchFactory[K](), NewIsStringFactory[K](), From 55cdd5fa68384e9594e2a8f47a74b63e1fbf5d86 Mon Sep 17 00:00:00 2001 From: bryan-aguilar <46550959+bryan-aguilar@users.noreply.github.com> Date: Thu, 16 Nov 2023 10:08:04 -0800 Subject: [PATCH 12/27] [exporter/awsemf] Improve summary metric type NaN checks (#28894) **Description:** I have observed some behavior on a personal collector deployment where the EMF Exporter is still returning errors for `NaN` json marshalling. This was in a prometheus -> emf exporter metrics pipeline. I could not find the specific NaN value in the metrics when troubleshooting the error. I curled the `/metrics` endpoint and also tried using the logging exporter to try to get more information. I could not find where the NaN value was coming from so I took another look into the unit tests and found some possible code paths in which NaNs could slip though. **Link to tracking Issue:** Original issue https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26267 **Testing:** Added more unit tests. The summary unit tests got a slight refactor for two reasons. So I could get ride of the unnecessary typecasting and so that we could more easily test out different combinations of quantile values. I have also added a few more histogram unit tests to just verify that all combinations of NaN values are being checked on their own. --- .chloggen/awsemf_SummaryNanCheck.yaml | 27 +++ exporter/awsemfexporter/datapoint.go | 9 + exporter/awsemfexporter/datapoint_test.go | 224 +++++++++++++++++++--- 3 files changed, 238 insertions(+), 22 deletions(-) create mode 100755 .chloggen/awsemf_SummaryNanCheck.yaml diff --git a/.chloggen/awsemf_SummaryNanCheck.yaml b/.chloggen/awsemf_SummaryNanCheck.yaml new file mode 100755 index 000000000000..0ae3767761fb --- /dev/null +++ b/.chloggen/awsemf_SummaryNanCheck.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: awsemfexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Improve NaN value checking for Summary metric types. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [28894] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 54759b047441..b654a26dc5cb 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -351,6 +351,15 @@ func (dps summaryDataPointSlice) IsStaleOrNaN(i int) (bool, pcommon.Map) { if math.IsNaN(metric.Sum()) { return true, metric.Attributes() } + + values := metric.QuantileValues() + for i := 0; i < values.Len(); i++ { + quantile := values.At(i) + if math.IsNaN(quantile.Value()) || math.IsNaN(quantile.Quantile()) { + return true, metric.Attributes() + } + } + return false, metric.Attributes() } diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 638a98737f2c..28147cd92e3c 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -223,11 +223,11 @@ func generateTestSummaryMetricWithNaN(name string) pmetric.Metrics { summaryDatapoint.SetCount(uint64(5 * i)) summaryDatapoint.SetSum(math.NaN()) firstQuantile := summaryDatapoint.QuantileValues().AppendEmpty() - firstQuantile.SetQuantile(0.0) - firstQuantile.SetValue(1) + firstQuantile.SetQuantile(math.NaN()) + firstQuantile.SetValue(math.NaN()) secondQuantile := summaryDatapoint.QuantileValues().AppendEmpty() - secondQuantile.SetQuantile(100.0) - secondQuantile.SetValue(5) + secondQuantile.SetQuantile(math.NaN()) + secondQuantile.SetValue(math.NaN()) } return otelMetrics @@ -543,7 +543,7 @@ func TestIsStaleOrNaN_HistogramDataPointSlice(t *testing.T) { setFlagsFunc func(point pmetric.HistogramDataPoint) pmetric.HistogramDataPoint }{ { - name: "Histogram with NaNs", + name: "Histogram with all NaNs", histogramDPS: func() pmetric.HistogramDataPointSlice { histogramDPS := pmetric.NewHistogramDataPointSlice() histogramDP := histogramDPS.AppendEmpty() @@ -556,6 +556,48 @@ func TestIsStaleOrNaN_HistogramDataPointSlice(t *testing.T) { }(), boolAssertFunc: assert.True, }, + { + name: "Histogram with NaN Sum", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(math.NaN()) + histogramDP.SetMin(1234) + histogramDP.SetMax(1234) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Histogram with NaN Min", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(123) + histogramDP.SetMin(math.NaN()) + histogramDP.SetMax(123) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Histogram with nan Max", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(123) + histogramDP.SetMin(123) + histogramDP.SetMax(math.NaN()) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, { name: "Histogram with min and max", histogramDPS: func() pmetric.HistogramDataPointSlice { @@ -727,6 +769,62 @@ func TestIsStaleOrNaN_ExponentialHistogramDataPointSlice(t *testing.T) { }(), boolAssertFunc: assert.False, }, + { + name: "Exponential histogram with all possible NaN", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(math.NaN()) + histogramDP.SetMin(math.NaN()) + histogramDP.SetMax(math.NaN()) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Exponential histogram with NaN Sum", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(math.NaN()) + histogramDP.SetMin(1245) + histogramDP.SetMax(1234556) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Exponential histogram with NaN Min", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(1255) + histogramDP.SetMin(math.NaN()) + histogramDP.SetMax(12545) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Exponential histogram with NaN Max", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(512444) + histogramDP.SetMin(123) + histogramDP.SetMax(math.NaN()) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, { name: "Exponential histogram with NaNs", histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { @@ -862,49 +960,131 @@ func TestCalculateDeltaDatapoints_SummaryDataPointSlice(t *testing.T) { } func TestIsStaleOrNaN_SummaryDataPointSlice(t *testing.T) { + type qMetricObject struct { + value float64 + quantile float64 + } + type quantileTestObj struct { + sum float64 + count uint64 + qMetrics []qMetricObject + } testCases := []struct { name string - summaryMetricValue map[string]any + summaryMetricValue quantileTestObj expectedBoolAssert assert.BoolAssertionFunc setFlagsFunc func(point pmetric.SummaryDataPoint) pmetric.SummaryDataPoint }{ { - name: "summary with no nan values", - summaryMetricValue: map[string]any{"sum": float64(17.3), "count": uint64(17), "firstQuantile": float64(1), "secondQuantile": float64(5)}, + name: "summary with no nan values", + summaryMetricValue: quantileTestObj{ + sum: 17.3, + count: 17, + qMetrics: []qMetricObject{ + { + value: 1, + quantile: 0.5, + }, + { + value: 5, + quantile: 2.0, + }, + }, + }, expectedBoolAssert: assert.False, }, { - name: "Summary with nan values", - summaryMetricValue: map[string]any{"sum": math.NaN(), "count": uint64(25), "firstQuantile": math.NaN(), "secondQuantile": math.NaN()}, + name: "Summary with nan sum", + summaryMetricValue: quantileTestObj{ + sum: math.NaN(), + count: 17, + qMetrics: []qMetricObject{ + { + value: 1, + quantile: 0.5, + }, + { + value: 5, + quantile: 2.0, + }, + }, + }, expectedBoolAssert: assert.True, }, { - name: "Summary with set flag func", - summaryMetricValue: map[string]any{"sum": math.NaN(), "count": uint64(25), "firstQuantile": math.NaN(), "secondQuantile": math.NaN()}, + name: "Summary with no recorded value flag set to true", + summaryMetricValue: quantileTestObj{ + sum: 1245.65, + count: 17, + qMetrics: []qMetricObject{ + { + value: 1, + quantile: 0.5, + }, + { + value: 5, + quantile: 2.0, + }, + }, + }, expectedBoolAssert: assert.True, setFlagsFunc: func(point pmetric.SummaryDataPoint) pmetric.SummaryDataPoint { point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) return point }, }, + { + name: "Summary with nan quantile value", + summaryMetricValue: quantileTestObj{ + sum: 1245.65, + count: 17, + qMetrics: []qMetricObject{ + { + value: 1, + quantile: 0.5, + }, + { + value: math.NaN(), + quantile: 2.0, + }, + }, + }, + expectedBoolAssert: assert.True, + }, + { + name: "Summary with nan quantile", + summaryMetricValue: quantileTestObj{ + sum: 1245.65, + count: 17, + qMetrics: []qMetricObject{ + { + value: 1, + quantile: 0.5, + }, + { + value: 7.8, + quantile: math.NaN(), + }, + }, + }, + expectedBoolAssert: assert.True, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - // Given the summary datapoints with quantile 0, quantile 100, sum and count summaryDPS := pmetric.NewSummaryDataPointSlice() summaryDP := summaryDPS.AppendEmpty() - summaryDP.SetSum(tc.summaryMetricValue["sum"].(float64)) - summaryDP.SetCount(tc.summaryMetricValue["count"].(uint64)) + summaryDP.SetSum(tc.summaryMetricValue.sum) + summaryDP.SetCount(tc.summaryMetricValue.count) summaryDP.Attributes().PutStr("label1", "value1") - summaryDP.QuantileValues().EnsureCapacity(2) - firstQuantileValue := summaryDP.QuantileValues().AppendEmpty() - firstQuantileValue.SetQuantile(0) - firstQuantileValue.SetValue(tc.summaryMetricValue["firstQuantile"].(float64)) - secondQuantileValue := summaryDP.QuantileValues().AppendEmpty() - secondQuantileValue.SetQuantile(100) - secondQuantileValue.SetValue(tc.summaryMetricValue["secondQuantile"].(float64)) + summaryDP.QuantileValues().EnsureCapacity(len(tc.summaryMetricValue.qMetrics)) + for _, qMetric := range tc.summaryMetricValue.qMetrics { + newQ := summaryDP.QuantileValues().AppendEmpty() + newQ.SetValue(qMetric.value) + newQ.SetQuantile(qMetric.quantile) + } summaryDatapointSlice := summaryDataPointSlice{deltaMetricMetadata{}, summaryDPS} if tc.setFlagsFunc != nil { From 406ab387f1e44ebeaad9757ea15aa018be1740ba Mon Sep 17 00:00:00 2001 From: Rajkumar Rangaraj Date: Thu, 16 Nov 2023 10:09:23 -0800 Subject: [PATCH 13/27] [exporter/azuremonitor] Enhance AAD Documentation. (#29234) **Description:** * Update AAD documentation to use connection string instead of instrumentation key. Follow up to #28854 * Modified the ingestion version from 2.0 to 2.1 **Link to tracking Issue:** **Testing:** Existing tests. Output from manual run ``` json --------- Transmitting 30 items --------- {"kind": "exporter", "data_type": "logs", "name": "azuremonitor"} 2023-11-13T10:50:23.886-0800 debug azuremonitorexporter@v0.88.0/factory.go:139 Telemetry transmitted in 378.439395ms {"kind": "exporter", "data_type": "logs", "name": "azuremonitor"} 2023-11-13T10:50:23.886-0800 debug azuremonitorexporter@v0.88.0/factory.go:139 Response: 200 {"kind": "exporter", "data_type": "logs", "name": "azuremonitor"} 2023-11-13T10:50:23.886-0800 debug azuremonitorexporter@v0.88.0/factory.go:139 Items accepted/received: 30/30 {"kind": "exporter", "data_type": "logs", "name": "azuremonitor"} ``` **Documentation:** * Updated Authentication.md --- .../update-azuremonitor-service-version.yaml | 27 +++++++++++++++++++ .../azuremonitorexporter/AUTHENTICATION.md | 19 +++++++------ .../connection_string_parser.go | 2 +- .../connection_string_parser_test.go | 10 +++---- 4 files changed, 42 insertions(+), 16 deletions(-) create mode 100644 .chloggen/update-azuremonitor-service-version.yaml diff --git a/.chloggen/update-azuremonitor-service-version.yaml b/.chloggen/update-azuremonitor-service-version.yaml new file mode 100644 index 000000000000..a576169d5e6a --- /dev/null +++ b/.chloggen/update-azuremonitor-service-version.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: azuremonitorexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Updated Azure Monitor Exporter service version from v2.0 to v2.1. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29234] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/azuremonitorexporter/AUTHENTICATION.md b/exporter/azuremonitorexporter/AUTHENTICATION.md index 8317edc1fc7f..c2c4b8dc8863 100644 --- a/exporter/azuremonitorexporter/AUTHENTICATION.md +++ b/exporter/azuremonitorexporter/AUTHENTICATION.md @@ -2,42 +2,41 @@ ## Local Authentication -The default authentication mechanism used by the Azure Monitor Exporter is "Local Authentication" - based solely on the Application Insights Instrumentation Key. An example of the exporters section of the configuration is +The default authentication mechanism used by the Azure Monitor Exporter is "Local Authentication", which relies exclusively on the `InstrumentationKey` obtained from the connection string of the Application Insights. Below is an illustrative example of the exporters section in a configuration file: ```yaml exporters: azuremonitor: - instrumentation_key: 00000000-0000-0000-0000-000000000000 + connection_string: "InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://ingestion.azuremonitor.com/" ``` -Where the zero guid is replaced with the instrumentation key of your Application Insights instance. +Use the connection string from your Application Insights instance. The same can be achieved by using an environment variable to hold the key ```yaml exporters: azuremonitor: - instrumentation_key: ${env:APP_INSIGHTS_KEY} + connection_string: ${env:APPLICATIONINSIGHTS_CONNECTION_STRING} ``` ## AAD/Entra Authentication -Local Authentication can be disabled in [Application Insights](https://learn.microsoft.com/en-us/azure/azure-monitor/app/azure-ad-authentication) and an AAD based identity can be used in conjunction with the instrumentation key. The instrumentation key is still required by the Exporter, but it is no long sufficient to just have this in order to import data. +Local Authentication can be disabled in [Application Insights](https://learn.microsoft.com/en-us/azure/azure-monitor/app/azure-ad-authentication) and an AAD based identity can be used in conjunction with the instrumentation key. The Azure Monitor Exporter does not support this approach directly, but it can be used with the [AAD Authentication Proxy](https://github.com/Azure/aad-auth-proxy) from the Azure Monitor product group. The AAD Auth Proxy is a separate container/side-car that proxies calls to the Application Insights ingestion endpoint and attaches a bearer token to each call, asserting an AAD identity. This identity is managed by a certificate in the container that is registered with a Service Principal in AAD. -To use this, both Azure Monitor Exporter and the AAD Auth Proxy require specific configuration. In the Exporter's configuration, it is necessary to override the default endpoint used to send data to. In the following example, it is assumed the AAD Auth Proxy is listening on localhost:8081. +To integrate this setup, both the Azure Monitor Exporter and the AAD Auth Proxy must be configured appropriately. For the Exporter, replace the ingestion endpoint in the connection string with the proxy endpoint. For instance, if the AAD Auth Proxy listens on localhost:8081, configure as follows: ```yaml exporters: azuremonitor: - instrumentation_key: ${env:APP_INSIGHTS_KEY} - endpoint: http://localhost:8081/v2.1/track + connection_string: "InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=http://localhost:8081" ``` -The key difference is this uses the `v2.1` endpoint, not the standard `v2` one. +The original `IngestionEndpoint` from the connection string needs to be set as the `TARGET_HOST` environment variable in the aad-auth-proxy configuration. In the docker compose file for AAD Auth Proxy, the following values need to be set: @@ -60,7 +59,7 @@ azuremonitor-ingestion-proxy: ``` - `AUDIENCE`: value is the generic Azure Monitor Scope. -- `TARGET_HOST`: the Application Insights IngestionEndpoint value from the Connection String, available in the Azure Portal. +- `TARGET_HOST`: the Application Insights `IngestionEndpoint` value from the Connection String, available in the Azure Portal. - `AAD_CLIENT_ID`: client id of the service principal representing the AAD identity to use. - `AAD_TENANT_ID`: id of the AAD Tenant the service principal exists in. - `AAD_CLIENT_CERTIFICATE_PATH`: path to the .pem certificate file containing the CERTIFICATE and PRIVATE KEY parts of the certificate registered with the service principal. diff --git a/exporter/azuremonitorexporter/connection_string_parser.go b/exporter/azuremonitorexporter/connection_string_parser.go index 15b4f57827fc..fd1a7d00e862 100644 --- a/exporter/azuremonitorexporter/connection_string_parser.go +++ b/exporter/azuremonitorexporter/connection_string_parser.go @@ -75,6 +75,6 @@ func getIngestionURL(ingestionEndpoint string) string { ingestionURL, _ = url.Parse(DefaultIngestionEndpoint) } - ingestionURL.Path = path.Join(ingestionURL.Path, "/v2/track") + ingestionURL.Path = path.Join(ingestionURL.Path, "/v2.1/track") return ingestionURL.String() } diff --git a/exporter/azuremonitorexporter/connection_string_parser_test.go b/exporter/azuremonitorexporter/connection_string_parser_test.go index ad8315f49159..cff893d4cd46 100644 --- a/exporter/azuremonitorexporter/connection_string_parser_test.go +++ b/exporter/azuremonitorexporter/connection_string_parser_test.go @@ -27,7 +27,7 @@ func TestParseConnectionString(t *testing.T) { }, want: &ConnectionVars{ InstrumentationKey: "00000000-0000-0000-0000-000000000000", - IngestionURL: "https://ingestion.azuremonitor.com/v2/track", + IngestionURL: "https://ingestion.azuremonitor.com/v2.1/track", }, wantError: false, }, @@ -38,7 +38,7 @@ func TestParseConnectionString(t *testing.T) { }, want: &ConnectionVars{ InstrumentationKey: "00000000-0000-0000-0000-000000000000", - IngestionURL: DefaultIngestionEndpoint + "v2/track", + IngestionURL: DefaultIngestionEndpoint + "v2.1/track", }, wantError: false, }, @@ -49,7 +49,7 @@ func TestParseConnectionString(t *testing.T) { }, want: &ConnectionVars{ InstrumentationKey: "00000000-0000-0000-0000-000000000000", - IngestionURL: "https://ingestion.azuremonitor.com/v2/track", + IngestionURL: "https://ingestion.azuremonitor.com/v2.1/track", }, wantError: false, }, @@ -93,7 +93,7 @@ func TestParseConnectionString(t *testing.T) { }, want: &ConnectionVars{ InstrumentationKey: "00000000-0000-0000-0000-000000000000", - IngestionURL: "https://ingestion.azuremonitor.com/v2/track", + IngestionURL: "https://ingestion.azuremonitor.com/v2.1/track", }, wantError: false, }, @@ -104,7 +104,7 @@ func TestParseConnectionString(t *testing.T) { }, want: &ConnectionVars{ InstrumentationKey: "00000000-0000-0000-0000-000000000000", - IngestionURL: "https://ingestion.azuremonitor.com/v2/track", + IngestionURL: "https://ingestion.azuremonitor.com/v2.1/track", }, wantError: false, }, From 60209828c3055c7b387c5c57dbf9f6a92f9b3d3b Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:02:48 -0700 Subject: [PATCH 14/27] [exporter/honeycombmarker] Fix default url and dataset_slug (#29309) **Description:** Fixes an issue with an incorrect default url. Also fixes issue where dataset slug was required. **Link to tracking Issue:** Related to https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27666 **Testing:** Added new tests and tested manually. **Documentation:** Updated up README --- .chloggen/honeycombmarker-fix-url-bug.yaml | 27 ++++++++++++++++ exporter/honeycombmarkerexporter/README.md | 31 +++++++++---------- exporter/honeycombmarkerexporter/config.go | 4 --- .../honeycombmarkerexporter/config_test.go | 22 ++++++++++--- exporter/honeycombmarkerexporter/factory.go | 4 +-- .../honeycombmarkerexporter/logs_exporter.go | 12 ++++++- .../logs_exporter_test.go | 26 +++++++++++++++- .../testdata/config.yaml | 9 +++++- 8 files changed, 105 insertions(+), 30 deletions(-) create mode 100755 .chloggen/honeycombmarker-fix-url-bug.yaml diff --git a/.chloggen/honeycombmarker-fix-url-bug.yaml b/.chloggen/honeycombmarker-fix-url-bug.yaml new file mode 100755 index 000000000000..87741a4ec702 --- /dev/null +++ b/.chloggen/honeycombmarker-fix-url-bug.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: honeycombmarkerexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix default api_url and dataset_slug + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [29309] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/honeycombmarkerexporter/README.md b/exporter/honeycombmarkerexporter/README.md index e6a4cfb04c6a..19b508eff0a6 100644 --- a/exporter/honeycombmarkerexporter/README.md +++ b/exporter/honeycombmarkerexporter/README.md @@ -1,31 +1,30 @@ @@ -0,0 +1,18 @@ # Honeycomb Marker Exporter -This exporter allows creating markers, via the Honeycomb Markers API, based on the look of incoming telemetry. +This exporter allows creating [markers](https://docs.honeycomb.io/working-with-your-data/markers/), via the [Honeycomb Markers API](https://docs.honeycomb.io/api/tag/Markers#operation/createMarker), based on the look of incoming telemetry. The following configuration options are supported: * `api_key` (Required): This is the API key for your Honeycomb account. -* `api_url` (Required): This sets the hostname to send marker data to. +* `api_url` (Optional): This sets the hostname to send marker data to. If not set, will default to `https://api.honeycomb.io/` * `markers` (Required): This is a list of configurations to create an event marker. - * `type` (Required): Specifies the marker type. - * `message_key`: This attribute will be used as the message. It describes the event marker. If necessary the value will be converted to a string. - * `url_key`: This attribute will be used as the url. It can be accessed through the event marker in Honeycomb. If necessary the value will be converted to a string. - * `rules` (Required): This is a list of OTTL rules that determine when to create an event marker. - * `log_conditions` (Required): A list of ottllog conditions that determine a match - Example: + * `type` (Required): Specifies the marker type. + * `rules` (Required): This is a list of [OTTL](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl) rules that determine when to create an event marker. + * `log_conditions` (Required): A list of [OTTL log](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottllog) conditions that determine a match. The marker will be created if **ANY** condition matches. + * `dataset_slug` (Optional): The dataset in which to create the marker. If not set, will default to `__all__`. + * `message_key` (Optional): The key of the attribute whose value will be used as the marker's message. If necessary the value will be converted to a string. + * `url_key` (Optional): The key of the attribute whose value will be used as the marker's url. If necessary the value will be converted to a string. +Example: ```yaml exporters: honeycombmarker: - api_key: "environment-api-key" - api_url: "https://api.honeycomb.io" + api_key: {{env:HONEYCOMB_API_KEY}} markers: - - type: "marker-type" - message_key: "marker-message" - url_key: "marker-url" - dataset_slug: "__all__" + # Creates a new marker anytime the exporter sees a k8s event with a reason of Backoff + - type: k8s-backoff-events rules: - log_conditions: - - body == "test" -``` \ No newline at end of file + - IsMap(body) and IsMap(body["object"] and body["object"]["reason"] == "Backoff" +``` + diff --git a/exporter/honeycombmarkerexporter/config.go b/exporter/honeycombmarkerexporter/config.go index d94268f08f4b..ecfc9a3db729 100644 --- a/exporter/honeycombmarkerexporter/config.go +++ b/exporter/honeycombmarkerexporter/config.go @@ -73,10 +73,6 @@ func (cfg *Config) Validate() error { return fmt.Errorf("marker must have a type %v", m) } - if m.DatasetSlug == "" { - return fmt.Errorf("marker must have a dataset slug %v", m) - } - if len(m.Rules.LogConditions) == 0 { return fmt.Errorf("marker must have rules %v", m) } diff --git a/exporter/honeycombmarkerexporter/config_test.go b/exporter/honeycombmarkerexporter/config_test.go index 85a33fc701bb..6c46d7a1d1f5 100644 --- a/exporter/honeycombmarkerexporter/config_test.go +++ b/exporter/honeycombmarkerexporter/config_test.go @@ -28,6 +28,23 @@ func TestLoadConfig(t *testing.T) { }{ { id: component.NewIDWithName(metadata.Type, ""), + expected: &Config{ + APIKey: "test-apikey", + APIURL: "https://api.honeycomb.io", + Markers: []Marker{ + { + Type: "fooType", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + }, + { + id: component.NewIDWithName(metadata.Type, "all_fields"), expected: &Config{ QueueSettings: exporterhelper.NewDefaultQueueSettings(), RetrySettings: exporterhelper.NewDefaultRetrySettings(), @@ -43,7 +60,7 @@ func TestLoadConfig(t *testing.T) { `body == "test"`, }, }, - DatasetSlug: "__all__", + DatasetSlug: "testing", }, }, }, @@ -60,9 +77,6 @@ func TestLoadConfig(t *testing.T) { { id: component.NewIDWithName(metadata.Type, "no_markers_supplied"), }, - { - id: component.NewIDWithName(metadata.Type, "no_dataset_slug"), - }, } for _, tt := range tests { diff --git a/exporter/honeycombmarkerexporter/factory.go b/exporter/honeycombmarkerexporter/factory.go index 0d4adb2f947b..e40f3240e83e 100644 --- a/exporter/honeycombmarkerexporter/factory.go +++ b/exporter/honeycombmarkerexporter/factory.go @@ -23,9 +23,7 @@ func NewFactory() exporter.Factory { func createDefaultConfig() component.Config { return &Config{ - APIKey: "", - APIURL: "api.honeycomb.io:443", - Markers: []Marker{}, + APIURL: "https://api.honeycomb.io", } } diff --git a/exporter/honeycombmarkerexporter/logs_exporter.go b/exporter/honeycombmarkerexporter/logs_exporter.go index 608fe4ef0c9f..07c595acbd6e 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter.go +++ b/exporter/honeycombmarkerexporter/logs_exporter.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "net/http" + "strings" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/plog" @@ -19,6 +20,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" ) +const ( + defaultDatasetSlug = "__all__" +) + type honeycombLogsExporter struct { set component.TelemetrySettings markers []Marker @@ -95,7 +100,12 @@ func (e *honeycombLogsExporter) sendMarker(ctx context.Context, marker Marker, l return err } - url := fmt.Sprintf("%s/1/markers/%s", e.config.APIURL, marker.DatasetSlug) + datasetSlug := marker.DatasetSlug + if datasetSlug == "" { + datasetSlug = defaultDatasetSlug + } + + url := fmt.Sprintf("%s/1/markers/%s", strings.TrimRight(e.config.APIURL, "/"), datasetSlug) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) if err != nil { return err diff --git a/exporter/honeycombmarkerexporter/logs_exporter_test.go b/exporter/honeycombmarkerexporter/logs_exporter_test.go index a7e3cd8aad0f..4a69de22b57f 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter_test.go +++ b/exporter/honeycombmarkerexporter/logs_exporter_test.go @@ -22,6 +22,7 @@ func TestExportMarkers(t *testing.T) { name string config Config attributeMap map[string]string + expectedURL string }{ { name: "all fields", @@ -46,6 +47,7 @@ func TestExportMarkers(t *testing.T) { "url": "https://api.testhost.io", "type": "test-type", }, + expectedURL: "/1/markers/test-dataset", }, { name: "no message key", @@ -68,6 +70,7 @@ func TestExportMarkers(t *testing.T) { "url": "https://api.testhost.io", "type": "test-type", }, + expectedURL: "/1/markers/test-dataset", }, { name: "no url", @@ -90,6 +93,27 @@ func TestExportMarkers(t *testing.T) { "message": "this is a test message", "type": "test-type", }, + expectedURL: "/1/markers/test-dataset", + }, + { + name: "no dataset_slug", + config: Config{ + APIKey: "test-apikey", + Markers: []Marker{ + { + Type: "test-type", + Rules: Rules{ + LogConditions: []string{ + `body == "test"`, + }, + }, + }, + }, + }, + attributeMap: map[string]string{ + "type": "test-type", + }, + expectedURL: "/1/markers/__all__", }, } @@ -106,7 +130,7 @@ func TestExportMarkers(t *testing.T) { for attr := range tt.attributeMap { assert.Equal(t, decodedBody[attr], tt.attributeMap[attr]) } - assert.Contains(t, req.URL.Path, tt.config.Markers[0].DatasetSlug) + assert.Contains(t, req.URL.Path, tt.expectedURL) apiKey := req.Header.Get("X-Honeycomb-Team") assert.Equal(t, apiKey, string(tt.config.APIKey)) diff --git a/exporter/honeycombmarkerexporter/testdata/config.yaml b/exporter/honeycombmarkerexporter/testdata/config.yaml index b3ec158d70b8..7e348d40ee77 100644 --- a/exporter/honeycombmarkerexporter/testdata/config.yaml +++ b/exporter/honeycombmarkerexporter/testdata/config.yaml @@ -1,4 +1,11 @@ honeycombmarker: + api_key: "test-apikey" + markers: + - type: "fooType" + rules: + log_conditions: + - body == "test" +honeycombmarker/all_fields: api_key: "test-apikey" api_url: "https://api.testhost.io" sending_queue: @@ -16,7 +23,7 @@ honeycombmarker: - type: "fooType" message_key: "test message" url_key: "https://api.testhost.io" - dataset_slug: "__all__" + dataset_slug: "testing" rules: log_conditions: - body == "test" From f43bb9da544940a00cce8266fb0320eb0b046228 Mon Sep 17 00:00:00 2001 From: Faith Chikwekwe Date: Thu, 16 Nov 2023 14:43:36 -0500 Subject: [PATCH 15/27] feat: change honeycombmarkerexporter to alpha (#29272) **Description:** Update Honeycomb Marker Exporter to alpha status **Link to tracking Issue:** #27666 **Testing:** **Documentation:** --------- Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> --- .chloggen/feat_hny-exporter-alpha.yaml | 27 +++++++++++++++++++ .../internal/metadata/generated_status.go | 2 +- .../honeycombmarkerexporter/metadata.yaml | 2 +- 3 files changed, 29 insertions(+), 2 deletions(-) create mode 100755 .chloggen/feat_hny-exporter-alpha.yaml diff --git a/.chloggen/feat_hny-exporter-alpha.yaml b/.chloggen/feat_hny-exporter-alpha.yaml new file mode 100755 index 000000000000..14e435c48973 --- /dev/null +++ b/.chloggen/feat_hny-exporter-alpha.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: honeycombmarkerexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Change honeycombmarkerexporter to alpha + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27666] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/exporter/honeycombmarkerexporter/internal/metadata/generated_status.go b/exporter/honeycombmarkerexporter/internal/metadata/generated_status.go index e475b1c0f3b6..bcebacec01e5 100644 --- a/exporter/honeycombmarkerexporter/internal/metadata/generated_status.go +++ b/exporter/honeycombmarkerexporter/internal/metadata/generated_status.go @@ -8,5 +8,5 @@ import ( const ( Type = "honeycombmarker" - LogsStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelAlpha ) diff --git a/exporter/honeycombmarkerexporter/metadata.yaml b/exporter/honeycombmarkerexporter/metadata.yaml index 51361e199f72..2c795b8b352e 100644 --- a/exporter/honeycombmarkerexporter/metadata.yaml +++ b/exporter/honeycombmarkerexporter/metadata.yaml @@ -3,7 +3,7 @@ type: honeycombmarker status: class: exporter stability: - development: [logs] + alpha: [logs] distributions: [] codeowners: active: [TylerHelmuth, fchikwekwe] From aae1def309a23c618b50de4f3d042a839d0e8b97 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:15:36 -0700 Subject: [PATCH 16/27] [exporter/honeycombmarker] Add generated README header (#29311) --- exporter/honeycombmarkerexporter/README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/exporter/honeycombmarkerexporter/README.md b/exporter/honeycombmarkerexporter/README.md index 19b508eff0a6..a7b1a9bf487d 100644 --- a/exporter/honeycombmarkerexporter/README.md +++ b/exporter/honeycombmarkerexporter/README.md @@ -1,5 +1,14 @@ -@@ -0,0 +1,18 @@ # Honeycomb Marker Exporter + +| Status | | +| ------------- |-----------| +| Stability | [alpha]: logs | +| Distributions | [] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fhoneycombmarker%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fhoneycombmarker) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fhoneycombmarker%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fhoneycombmarker) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@fchikwekwe](https://www.github.com/fchikwekwe) | + +[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha + This exporter allows creating [markers](https://docs.honeycomb.io/working-with-your-data/markers/), via the [Honeycomb Markers API](https://docs.honeycomb.io/api/tag/Markers#operation/createMarker), based on the look of incoming telemetry. From 6440f03159eefbaa5e6ce986da8f1e3fce6214a7 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Thu, 16 Nov 2023 12:32:35 -0800 Subject: [PATCH 17/27] [chore][pkg/translator/azure] Add missing code owner @cparkins (#28651) **Description:** This fixes inconsistency introduced with the creation of this package. In #25096 @cparkins was added as a code owner in the [metadata.yaml](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/translator/azure/metadata.yaml) but not the top level `CODEOWNERS` file. Co-authored-by: Alex Boten From 2382a664cd2a0ba1cf77efe6c20864674ea6825b Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 16 Nov 2023 14:29:00 -0700 Subject: [PATCH 18/27] [exporter/honeycombmarker] Update distributions (#29312) --- .github/dependabot.yml | 8 ++++---- exporter/honeycombmarkerexporter/README.md | 6 +++--- exporter/honeycombmarkerexporter/metadata.yaml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e4e8b1db1f20..0bd0103459b3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -158,22 +158,22 @@ updates: interval: "weekly" day: "wednesday" - package-ecosystem: "gomod" - directory: "/exporter/influxdbexporter" + directory: "/exporter/honeycombmarkerexporter" schedule: interval: "weekly" day: "wednesday" - package-ecosystem: "gomod" - directory: "/exporter/instanaexporter" + directory: "/exporter/influxdbexporter" schedule: interval: "weekly" day: "wednesday" - package-ecosystem: "gomod" - directory: "/exporter/kafkaexporter" + directory: "/exporter/instanaexporter" schedule: interval: "weekly" day: "wednesday" - package-ecosystem: "gomod" - directory: "/exporter/kineticaexporter" + directory: "/exporter/kafkaexporter" schedule: interval: "weekly" day: "wednesday" diff --git a/exporter/honeycombmarkerexporter/README.md b/exporter/honeycombmarkerexporter/README.md index a7b1a9bf487d..999ad5d0baf4 100644 --- a/exporter/honeycombmarkerexporter/README.md +++ b/exporter/honeycombmarkerexporter/README.md @@ -3,11 +3,12 @@ | Status | | | ------------- |-----------| | Stability | [alpha]: logs | -| Distributions | [] | +| Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fhoneycombmarker%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fhoneycombmarker) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fhoneycombmarker%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fhoneycombmarker) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@fchikwekwe](https://www.github.com/fchikwekwe) | [alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib This exporter allows creating [markers](https://docs.honeycomb.io/working-with-your-data/markers/), via the [Honeycomb Markers API](https://docs.honeycomb.io/api/tag/Markers#operation/createMarker), based on the look of incoming telemetry. @@ -34,6 +35,5 @@ exporters: - type: k8s-backoff-events rules: - log_conditions: - - IsMap(body) and IsMap(body["object"] and body["object"]["reason"] == "Backoff" + - IsMap(body) and IsMap(body["object"]) and body["object"]["reason"] == "Backoff" ``` - diff --git a/exporter/honeycombmarkerexporter/metadata.yaml b/exporter/honeycombmarkerexporter/metadata.yaml index 2c795b8b352e..7261a3465c99 100644 --- a/exporter/honeycombmarkerexporter/metadata.yaml +++ b/exporter/honeycombmarkerexporter/metadata.yaml @@ -4,6 +4,6 @@ status: class: exporter stability: alpha: [logs] - distributions: [] + distributions: [contrib] codeowners: active: [TylerHelmuth, fchikwekwe] From 692589bcdff2a72333dafcf0d917d23a0c6bd3ca Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Thu, 16 Nov 2023 14:31:38 -0800 Subject: [PATCH 19/27] [exporter/influxdb] fix panic on init (#29296) When InfluxDB v1 compatibility is enabled AND username&password are set, the exporter panics. Not any more! Fixes #27084 **Testing:** I've added one regression test. --- .chloggen/influxdb-v1compat-no-panic.yaml | 27 +++++++++++++++++++++++ exporter/influxdbexporter/writer.go | 12 +++++++--- exporter/influxdbexporter/writer_test.go | 21 ++++++++++++++++++ 3 files changed, 57 insertions(+), 3 deletions(-) create mode 100755 .chloggen/influxdb-v1compat-no-panic.yaml diff --git a/.chloggen/influxdb-v1compat-no-panic.yaml b/.chloggen/influxdb-v1compat-no-panic.yaml new file mode 100755 index 000000000000..0fd3edf2f083 --- /dev/null +++ b/.chloggen/influxdb-v1compat-no-panic.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: influxdbexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: When InfluxDB v1 compatibility is enabled AND username&password are set, the exporter panics. Not any more! + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [27084] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/influxdbexporter/writer.go b/exporter/influxdbexporter/writer.go index 5bd9df809d95..1c45767d8312 100644 --- a/exporter/influxdbexporter/writer.go +++ b/exporter/influxdbexporter/writer.go @@ -88,15 +88,21 @@ func composeWriteURL(config *Config) (string, error) { queryValues.Set("db", config.V1Compatibility.DB) if config.V1Compatibility.Username != "" && config.V1Compatibility.Password != "" { - var basicAuth []byte - base64.StdEncoding.Encode(basicAuth, []byte(config.V1Compatibility.Username+":"+string(config.V1Compatibility.Password))) - config.HTTPClientSettings.Headers["Authorization"] = configopaque.String("Basic " + string(basicAuth)) + basicAuth := base64.StdEncoding.EncodeToString( + []byte(config.V1Compatibility.Username + ":" + string(config.V1Compatibility.Password))) + if config.HTTPClientSettings.Headers == nil { + config.HTTPClientSettings.Headers = make(map[string]configopaque.String, 1) + } + config.HTTPClientSettings.Headers["Authorization"] = configopaque.String("Basic " + basicAuth) } } else { queryValues.Set("org", config.Org) queryValues.Set("bucket", config.Bucket) if config.Token != "" { + if config.HTTPClientSettings.Headers == nil { + config.HTTPClientSettings.Headers = make(map[string]configopaque.String, 1) + } config.HTTPClientSettings.Headers["Authorization"] = "Token " + config.Token } } diff --git a/exporter/influxdbexporter/writer_test.go b/exporter/influxdbexporter/writer_test.go index 2cb240a25602..3c1776809f4d 100644 --- a/exporter/influxdbexporter/writer_test.go +++ b/exporter/influxdbexporter/writer_test.go @@ -186,3 +186,24 @@ func Test_influxHTTPWriterBatch_EnqueuePoint_emptyTagValue(t *testing.T) { assert.Equal(t, "m,k=v f=1i 1000000002000", strings.TrimSpace(string(recordedRequestBody))) } } + +func Test_composeWriteURL_doesNotPanic(t *testing.T) { + assert.NotPanics(t, func() { + cfg := &Config{} + _, err := composeWriteURL(cfg) + assert.NoError(t, err) + }) + + assert.NotPanics(t, func() { + cfg := &Config{ + V1Compatibility: V1Compatibility{ + Enabled: true, + DB: "my-db", + Username: "my-username", + Password: "my-password", + }, + } + _, err := composeWriteURL(cfg) + assert.NoError(t, err) + }) +} From 0fd44fb8d8169e8a57054e7f61e20f65c32aee60 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:41:47 -0700 Subject: [PATCH 20/27] [chore] downgrade golangci-lint (#29316) https://github.com/open-telemetry/opentelemetry-collector/issues/8939 Co-authored-by: Alex Boten --- internal/tools/go.mod | 2 +- internal/tools/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 31d8bc6b5785..de47d56eb594 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/client9/misspell v0.3.4 github.com/daixiang0/gci v0.11.2 - github.com/golangci/golangci-lint v1.55.2 + github.com/golangci/golangci-lint v1.55.1 github.com/google/addlicense v1.1.1 github.com/jcchavezs/porto v0.6.0 github.com/jstemmer/go-junit-report v1.0.0 diff --git a/internal/tools/go.sum b/internal/tools/go.sum index dcc30305ee53..4aab7263ab43 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -260,8 +260,8 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= -github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8= -github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM= +github.com/golangci/golangci-lint v1.55.1 h1:DL2j9Eeapg1N3WEkKnQFX5L40SYtjZZJjGVdyEgNrDc= +github.com/golangci/golangci-lint v1.55.1/go.mod h1:z00biPRqjo5MISKV1+RWgONf2KvrPDmfqxHpHKB6bI4= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= From f4d1fe07c2121ffe7ff1e2ee87db02682b503f3c Mon Sep 17 00:00:00 2001 From: Alex Boten Date: Thu, 16 Nov 2023 15:54:54 -0800 Subject: [PATCH 21/27] [chore] fix issuegenerator path (#29318) workflows have been failing and then trying to use `issuegenerator` to create issues, but the path for the tool was incorrect. see https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/6895702499/job/18761957296 as an example Signed-off-by: Alex Boten --- .github/workflows/load-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/load-tests.yml b/.github/workflows/load-tests.yml index c9f3fbdf7dee..0fe7a85f7a4a 100644 --- a/.github/workflows/load-tests.yml +++ b/.github/workflows/load-tests.yml @@ -115,7 +115,7 @@ jobs: - name: GitHub Issue Generator if: ${{ failure() && github.ref == 'refs/heads/main' }} - run: issuegenerator $TEST_RESULTS + run: ./.tools/issuegenerator $TEST_RESULTS update-benchmarks: runs-on: ubuntu-latest From 892b7c73aab028c8ae112deaa396176877391134 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:12:02 -0700 Subject: [PATCH 22/27] [receiver/kafkametrics] skip flaky test (#29317) https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26293 --- receiver/kafkametricsreceiver/integration_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/receiver/kafkametricsreceiver/integration_test.go b/receiver/kafkametricsreceiver/integration_test.go index dcaa40511588..956ba1b8fabe 100644 --- a/receiver/kafkametricsreceiver/integration_test.go +++ b/receiver/kafkametricsreceiver/integration_test.go @@ -26,6 +26,8 @@ const ( ) func TestIntegration(t *testing.T) { + t.Skip("Flaky Test - See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26293") + uid := fmt.Sprintf("-%s", uuid.NewString()) networkName := "kafka-network" + uid zkContainerName := "zookeeper" + uid From 32cbfffa0c0c2b3840fe1045e3c21805872947a7 Mon Sep 17 00:00:00 2001 From: Jon Date: Thu, 16 Nov 2023 22:51:46 -0500 Subject: [PATCH 23/27] [receiver/mongodbatlasreceiver] add metrics project config (#28866) **Description:** This feature adds a Project Config for the metrics to filter by Project name and or clusters. **Link to tracking Issue:** #28865 **Testing:** - Added test for cluster filtering - Tested project name alone, project name with IncludeClusters and project name with ExcludeClusters on a live environment with success. **Documentation:** Added optional project config fields to README --------- Co-authored-by: Daniel Jaglowski --- ...asreceiver-add-metrics-project-config.yaml | 27 ++++ receiver/mongodbatlasreceiver/README.md | 5 + receiver/mongodbatlasreceiver/config.go | 7 ++ receiver/mongodbatlasreceiver/config_test.go | 41 +++++++ receiver/mongodbatlasreceiver/receiver.go | 116 ++++++++++++++---- .../mongodbatlasreceiver/receiver_test.go | 66 ++++++++++ 6 files changed, 239 insertions(+), 23 deletions(-) create mode 100755 .chloggen/mongodbatlasreceiver-add-metrics-project-config.yaml diff --git a/.chloggen/mongodbatlasreceiver-add-metrics-project-config.yaml b/.chloggen/mongodbatlasreceiver-add-metrics-project-config.yaml new file mode 100755 index 000000000000..0e8208e25909 --- /dev/null +++ b/.chloggen/mongodbatlasreceiver-add-metrics-project-config.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/mongodbatlasreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: adds project config to mongodbatlas metrics to filter by project name and clusters. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [28865] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/mongodbatlasreceiver/README.md b/receiver/mongodbatlasreceiver/README.md index 93f675553254..29118d287b6c 100644 --- a/receiver/mongodbatlasreceiver/README.md +++ b/receiver/mongodbatlasreceiver/README.md @@ -42,6 +42,11 @@ MongoDB Atlas [Documentation](https://www.mongodb.com/docs/atlas/reference/api/l - `granularity` (default `PT1M` - See [MongoDB Atlas Documentation](https://docs.atlas.mongodb.com/reference/api/process-measurements/)) - `collection_interval` (default `3m`) This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. - `storage` (optional) The component ID of a storage extension which can be used when polling for `alerts` or `events` . The storage extension prevents duplication of data after a collector restart by remembering which data were previously collected. +- `projects` (optional for metrics) a slice of projects this receiver collects metrics from instead of all projects in an organization + - `name` Name of the project to discover metrics from + - `include_clusters` (default empty, exclusive with `exclude_clusters`) + - `exclude_clusters` (default empty, exclusive with `include_clusters`) + - If both `include_clusters` and `exclude_clusters` are empty, then all clusters in the project will be included - `retry_on_failure` - `enabled` (default true) - `initial_interval` (default 5s) diff --git a/receiver/mongodbatlasreceiver/config.go b/receiver/mongodbatlasreceiver/config.go index b2d6d3c81010..ae159d65044a 100644 --- a/receiver/mongodbatlasreceiver/config.go +++ b/receiver/mongodbatlasreceiver/config.go @@ -28,6 +28,7 @@ type Config struct { PrivateKey configopaque.String `mapstructure:"private_key"` Granularity string `mapstructure:"granularity"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` + Projects []*ProjectConfig `mapstructure:"projects"` Alerts AlertConfig `mapstructure:"alerts"` Events *EventsConfig `mapstructure:"events"` Logs LogConfig `mapstructure:"logs"` @@ -133,6 +134,12 @@ var ( func (c *Config) Validate() error { var errs error + for _, project := range c.Projects { + if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 { + errs = multierr.Append(errs, errClusterConfig) + } + } + errs = multierr.Append(errs, c.Alerts.validate()) errs = multierr.Append(errs, c.Logs.validate()) if c.Events != nil { diff --git a/receiver/mongodbatlasreceiver/config_test.go b/receiver/mongodbatlasreceiver/config_test.go index 9148e7357ac8..183d58290053 100644 --- a/receiver/mongodbatlasreceiver/config_test.go +++ b/receiver/mongodbatlasreceiver/config_test.go @@ -116,6 +116,47 @@ func TestValidate(t *testing.T) { }, expectedErr: errNoCert.Error(), }, + { + name: "Valid Metrics Config", + input: Config{ + Projects: []*ProjectConfig{ + { + Name: "Project1", + }, + }, + ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type), + }, + }, + { + name: "Valid Metrics Config with multiple projects with an inclusion or exclusion", + input: Config{ + Projects: []*ProjectConfig{ + { + Name: "Project1", + IncludeClusters: []string{"Cluster1"}, + }, + { + Name: "Project2", + ExcludeClusters: []string{"Cluster1"}, + }, + }, + ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type), + }, + }, + { + name: "invalid Metrics Config", + input: Config{ + Projects: []*ProjectConfig{ + { + Name: "Project1", + IncludeClusters: []string{"Cluster1"}, + ExcludeClusters: []string{"Cluster2"}, + }, + }, + ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type), + }, + expectedErr: errClusterConfig.Error(), + }, { name: "Valid Logs Config", input: Config{ diff --git a/receiver/mongodbatlasreceiver/receiver.go b/receiver/mongodbatlasreceiver/receiver.go index 72723aa552b0..f6a97bae439f 100644 --- a/receiver/mongodbatlasreceiver/receiver.go +++ b/receiver/mongodbatlasreceiver/receiver.go @@ -37,6 +37,10 @@ type timeconstraints struct { func newMongoDBAtlasReceiver(settings receiver.CreateSettings, cfg *Config) *mongodbatlasreceiver { client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.RetrySettings, settings.Logger) + for _, p := range cfg.Projects { + p.populateIncludesAndExcludes() + } + return &mongodbatlasreceiver{ log: settings.Logger, cfg: cfg, @@ -77,47 +81,113 @@ func (s *mongodbatlasreceiver) shutdown(context.Context) error { return s.client.Shutdown() } +// poll decides whether to poll all projects or a specific project based on the configuration. func (s *mongodbatlasreceiver) poll(ctx context.Context, time timeconstraints) error { + if len(s.cfg.Projects) == 0 { + return s.pollAllProjects(ctx, time) + } + return s.pollProjects(ctx, time) +} + +// pollAllProjects handles polling across all projects within the organizations. +func (s *mongodbatlasreceiver) pollAllProjects(ctx context.Context, time timeconstraints) error { orgs, err := s.client.Organizations(ctx) if err != nil { return fmt.Errorf("error retrieving organizations: %w", err) } for _, org := range orgs { - projects, err := s.client.Projects(ctx, org.ID) + proj, err := s.client.Projects(ctx, org.ID) if err != nil { - return fmt.Errorf("error retrieving projects: %w", err) + s.log.Error("error retrieving projects", zap.String("orgID", org.ID), zap.Error(err)) + continue } - for _, project := range projects { - nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID) - if err != nil { - return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err) + for _, project := range proj { + // Since there is no specific ProjectConfig for these projects, pass nil. + if err := s.processProject(ctx, time, org.Name, project, nil); err != nil { + s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err)) } + } + } + return nil +} - processes, err := s.client.Processes(ctx, project.ID) - if err != nil { - return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err) - } - for _, process := range processes { - clusterName := nodeClusterMap[process.UserAlias] - providerValues := providerMap[clusterName] +// pollProject handles polling for specific projects as configured. +func (s *mongodbatlasreceiver) pollProjects(ctx context.Context, time timeconstraints) error { + for _, projectCfg := range s.cfg.Projects { + project, err := s.client.GetProject(ctx, projectCfg.Name) + if err != nil { + s.log.Error("error retrieving project", zap.String("projectName", projectCfg.Name), zap.Error(err)) + continue + } - if err := s.extractProcessMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil { - return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err) - } + org, err := s.client.GetOrganization(ctx, project.OrgID) + if err != nil { + s.log.Error("error retrieving organization from project", zap.String("projectName", projectCfg.Name), zap.Error(err)) + continue + } - if err := s.extractProcessDatabaseMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil { - return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err) - } + if err := s.processProject(ctx, time, org.Name, project, projectCfg); err != nil { + s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err)) + } + } + return nil +} - if err := s.extractProcessDiskMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil { - return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err) - } - } +func (s *mongodbatlasreceiver) processProject(ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, projectCfg *ProjectConfig) error { + nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID) + if err != nil { + return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err) + } + + processes, err := s.client.Processes(ctx, project.ID) + if err != nil { + return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err) + } + + for _, process := range processes { + clusterName := nodeClusterMap[process.UserAlias] + providerValues := providerMap[clusterName] + + if !shouldProcessCluster(projectCfg, clusterName) { + // Skip processing for this cluster + continue + } + + if err := s.extractProcessMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { + return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err) + } + + if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { + return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err) + } + + if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { + return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err) } } + return nil } +// shouldProcessCluster checks whether a given cluster should be processed based on the project configuration. +func shouldProcessCluster(projectCfg *ProjectConfig, clusterName string) bool { + if projectCfg == nil { + // If there is no project config, process all clusters. + return true + } + + _, isIncluded := projectCfg.includesByClusterName[clusterName] + _, isExcluded := projectCfg.excludesByClusterName[clusterName] + + // Return false immediately if the cluster is excluded. + if isExcluded { + return false + } + + // If IncludeClusters is empty, or the cluster is explicitly included, return true. + return len(projectCfg.IncludeClusters) == 0 || isIncluded +} + type providerValues struct { RegionName string ProviderName string diff --git a/receiver/mongodbatlasreceiver/receiver_test.go b/receiver/mongodbatlasreceiver/receiver_test.go index 88d9d05a7a93..c592890f3984 100644 --- a/receiver/mongodbatlasreceiver/receiver_test.go +++ b/receiver/mongodbatlasreceiver/receiver_test.go @@ -71,3 +71,69 @@ func TestTimeConstraints(t *testing.T) { t.Run(testCase.name, testCase.run) } } + +func TestShouldProcessCluster(t *testing.T) { + tests := []struct { + name string + projectCfg *ProjectConfig + clusterName string + want bool + }{ + { + name: "included cluster should be processed", + projectCfg: &ProjectConfig{ + IncludeClusters: []string{"Cluster1"}, + }, + clusterName: "Cluster1", + want: true, + }, + { + name: "cluster not included should not be processed", + projectCfg: &ProjectConfig{ + IncludeClusters: []string{"Cluster1"}, + }, + clusterName: "Cluster2", + want: false, + }, + { + name: "excluded cluster should not be processed", + projectCfg: &ProjectConfig{ + ExcludeClusters: []string{"Cluster2"}, + }, + clusterName: "Cluster2", + want: false, + }, + { + name: "cluster not excluded should processed assuming it exists in the project", + projectCfg: &ProjectConfig{ + ExcludeClusters: []string{"Cluster1"}, + }, + clusterName: "Cluster2", + want: true, + }, + { + name: "cluster should be processed when no includes or excludes are set", + projectCfg: &ProjectConfig{}, + clusterName: "Cluster1", + want: true, + }, + { + name: "cluster should be processed when no includes or excludes are set and cluster name is empty", + projectCfg: nil, + clusterName: "Cluster1", + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.projectCfg != nil { + tt.projectCfg.populateIncludesAndExcludes() + } + + if got := shouldProcessCluster(tt.projectCfg, tt.clusterName); got != tt.want { + t.Errorf("shouldProcessCluster() = %v, want %v", got, tt.want) + } + }) + } +} From 3a46b68c8ac579d4b75175753182e4b2f98f8e6b Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Thu, 16 Nov 2023 21:25:01 -0800 Subject: [PATCH 24/27] [chore][checkapi] Do not enforce that all packages must have exactly one function (#28886) If no functions are exposed, exit with no error. This change allows to remove `extension/encoding` from the allowlist. --- cmd/checkapi/allowlist.txt | 1 - cmd/checkapi/main.go | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/checkapi/allowlist.txt b/cmd/checkapi/allowlist.txt index b791c2c59028..f63baf5214a7 100644 --- a/cmd/checkapi/allowlist.txt +++ b/cmd/checkapi/allowlist.txt @@ -1,5 +1,4 @@ connector/servicegraphconnector -extension/encoding extension/observer processor/servicegraphprocessor receiver/kafkareceiver diff --git a/cmd/checkapi/main.go b/cmd/checkapi/main.go index 609d1fd55991..50d2bff18a71 100644 --- a/cmd/checkapi/main.go +++ b/cmd/checkapi/main.go @@ -172,12 +172,13 @@ func walkFolder(folder string, componentType string) error { return nil } + if len(result.Functions) == 0 { + return nil + } if len(result.Functions) > 1 { return fmt.Errorf("%s has more than one function: %q", folder, strings.Join(fnNames, ",")) } - if len(result.Functions) == 0 { - return fmt.Errorf("%s has no functions defined", folder) - } + newFactoryFn := result.Functions[0] if newFactoryFn.Name != "NewFactory" { return fmt.Errorf("%s does not define a NewFactory function", folder) From f713ebf60a412bf27faeffeb9f2f71b1db8821b9 Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Fri, 17 Nov 2023 15:49:11 -0500 Subject: [PATCH 25/27] remove unused struct --- .../internal/metrics/func_convert_gauge_to_sum_datapoint.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go index adfe9f37f253..dab12e96d094 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum_datapoint.go @@ -13,16 +13,12 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" ) -type convertDatapointGaugeToSumArguments struct { - StringAggTemp string - Monotonic bool -} - func newConvertDatapointGaugeToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { return ottl.NewFactory("convert_gauge_to_sum", &convertGaugeToSumArguments{}, createConvertDatapointGaugeToSumFunction) } func createConvertDatapointGaugeToSumFunction(_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[ottldatapoint.TransformContext], error) { + // use the same args as in metric context args, ok := oArgs.(*convertGaugeToSumArguments) if !ok { From ddec5c39fd60303a0320d29b8207a32d7e90fb5d Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Sat, 18 Nov 2023 08:43:33 -0500 Subject: [PATCH 26/27] crosslink --- processor/transformprocessor/go.mod | 2 ++ 1 file changed, 2 insertions(+) diff --git a/processor/transformprocessor/go.mod b/processor/transformprocessor/go.mod index 9c58dfb31f38..a247d1f37e31 100644 --- a/processor/transformprocessor/go.mod +++ b/processor/transformprocessor/go.mod @@ -67,3 +67,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common From 13c3ce5deb7eefa9ea8727c854d5a318c65fffda Mon Sep 17 00:00:00 2001 From: Gil Raphaelli Date: Mon, 20 Nov 2023 12:46:47 -0500 Subject: [PATCH 27/27] gotidy --- processor/transformprocessor/go.mod | 4 ++-- processor/transformprocessor/go.sum | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/processor/transformprocessor/go.mod b/processor/transformprocessor/go.mod index a247d1f37e31..caa7c5b76247 100644 --- a/processor/transformprocessor/go.mod +++ b/processor/transformprocessor/go.mod @@ -3,11 +3,13 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/processor/trans go 1.20 require ( + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.89.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/collector/component v0.89.0 go.opentelemetry.io/collector/confmap v0.89.0 go.opentelemetry.io/collector/consumer v0.89.0 + go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 go.opentelemetry.io/collector/processor v0.89.0 go.uber.org/multierr v1.11.0 @@ -32,13 +34,11 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.89.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.89.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.89.0 // indirect - go.opentelemetry.io/collector/featuregate v1.0.0-rcv0018 // indirect go.opentelemetry.io/otel v1.20.0 // indirect go.opentelemetry.io/otel/metric v1.20.0 // indirect go.opentelemetry.io/otel/trace v1.20.0 // indirect diff --git a/processor/transformprocessor/go.sum b/processor/transformprocessor/go.sum index 80befca5105a..f297157038e5 100644 --- a/processor/transformprocessor/go.sum +++ b/processor/transformprocessor/go.sum @@ -82,8 +82,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0 h1:stB4V0yU6htEVWxoNOVuiIPDUetbRLlpP4m1Rcn03G8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.89.0/go.mod h1:mrkZwYA2MKZaidETgwMffAyPzsLjOq5fEJB58TIXa0I= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=