From 762e9452c1363baef4211a9fc41ab934e278412d Mon Sep 17 00:00:00 2001 From: Dmitry Date: Sun, 20 Mar 2022 22:19:38 -0700 Subject: [PATCH] Split pdata package by telemetry signal type --- CHANGELOG.md | 3 + client/doc_test.go | 4 +- component/componenttest/nop_exporter_test.go | 10 +- component/componenttest/nop_processor_test.go | 10 +- component/receiver.go | 6 +- .../configgrpc/configgrpc_benchmark_test.go | 16 +- consumer/consumererror/signalerrors.go | 22 +- consumer/consumertest/consumer.go | 10 +- consumer/consumertest/err.go | 10 +- consumer/consumertest/err_test.go | 10 +- consumer/consumertest/nop.go | 10 +- consumer/consumertest/nop_test.go | 10 +- consumer/consumertest/sink.go | 28 +-- consumer/consumertest/sink_test.go | 10 +- consumer/logs.go | 12 +- consumer/logs_test.go | 18 +- consumer/metrics.go | 12 +- consumer/metrics_test.go | 18 +- consumer/traces.go | 12 +- consumer/traces_test.go | 18 +- exporter/exporterhelper/common_test.go | 4 +- .../internal/persistent_queue_test.go | 11 +- .../internal/persistent_storage_test.go | 6 +- exporter/exporterhelper/logs.go | 8 +- exporter/exporterhelper/logs_test.go | 12 +- exporter/exporterhelper/metrics.go | 8 +- exporter/exporterhelper/metrics_test.go | 12 +- exporter/exporterhelper/queued_retry_test.go | 4 +- exporter/exporterhelper/traces.go | 8 +- exporter/exporterhelper/traces_test.go | 14 +- exporter/loggingexporter/logging_exporter.go | 16 +- .../loggingexporter/logging_exporter_test.go | 22 +- exporter/otlpexporter/otlp.go | 10 +- exporter/otlpexporter/otlp_test.go | 26 ++- exporter/otlphttpexporter/otlp.go | 10 +- exporter/otlphttpexporter/otlp_test.go | 19 +- .../err_or_sink_consumer.go | 7 +- internal/otlptext/databuffer.go | 68 +++--- internal/otlptext/databuffer_test.go | 12 +- internal/otlptext/logs.go | 8 +- internal/otlptext/logs_test.go | 6 +- internal/otlptext/metrics.go | 10 +- internal/otlptext/metrics_test.go | 6 +- internal/otlptext/traces.go | 8 +- internal/otlptext/traces_test.go | 6 +- internal/testcomponents/example_exporter.go | 20 +- .../testcomponents/example_exporter_test.go | 7 +- internal/testdata/common.go | 34 +-- internal/testdata/log.go | 41 ++-- internal/testdata/metric.go | 85 +++---- internal/testdata/resource.go | 8 +- internal/testdata/trace.go | 39 ++-- .../cmd/pdatagen/internal/base_slices.go | 28 ++- .../cmd/pdatagen/internal/base_structs.go | 18 +- model/internal/cmd/pdatagen/internal/files.go | 33 +-- .../cmd/pdatagen/internal/log_structs.go | 2 +- .../cmd/pdatagen/internal/metrics_structs.go | 2 +- .../cmd/pdatagen/internal/trace_structs.go | 2 +- model/internal/cmd/pdatagen/main.go | 14 +- .../{generated_log.go => generated_plog.go} | 0 ...ted_log_test.go => generated_plog_test.go} | 0 ...erated_metrics.go => generated_pmetric.go} | 0 ...rics_test.go => generated_pmetric_test.go} | 0 ...generated_trace.go => generated_ptrace.go} | 0 ...trace_test.go => generated_ptrace_test.go} | 0 model/internal/pdata/metrics.go | 10 +- model/otlp/json_marshaler.go | 16 +- model/otlp/json_test.go | 18 +- model/otlp/json_unmarshaler.go | 22 +- model/otlp/pb_marshaler.go | 34 +-- model/otlp/pb_test.go | 53 ++--- model/otlp/pb_unmarshaler.go | 16 +- model/otlpgrpc/logs.go | 8 +- model/otlpgrpc/metrics.go | 8 +- model/otlpgrpc/metrics_test.go | 4 +- model/otlpgrpc/traces.go | 8 +- model/otlpgrpc/traces_test.go | 4 +- model/pcommon/alias.go | 61 +++++ model/pcommon/generated_common_alias.go | 32 +++ model/pcommon/generated_resource_alias.go | 26 +++ model/pcommon/spanid_alias.go | 26 +++ model/pcommon/timestamp_alias.go | 23 ++ model/pcommon/traceid_alias.go | 26 +++ model/pdata/common_alias.go | 64 ++++-- model/pdata/generated_common_alias.go | 4 + model/pdata/generated_plog_alias.go | 68 ++++++ model/pdata/generated_pmetric_alias.go | 212 ++++++++++++++++++ model/pdata/generated_ptrace_alias.go | 108 +++++++++ model/pdata/generated_resource_alias.go | 2 + model/pdata/logs_alias.go | 104 +++++++-- model/pdata/metrics_alias.go | 72 ++++-- model/pdata/spanid_alias.go | 3 + model/pdata/timestamp_alias.go | 2 + model/pdata/traceid_alias.go | 3 + model/pdata/traces_alias.go | 38 +++- model/plog/alias.go | 65 ++++++ .../generated_alias.go} | 2 +- model/pmetric/alias.go | 81 +++++++ .../generated_alias.go} | 2 +- model/ptrace/alias.go | 62 +++++ .../generated_alias.go} | 2 +- processor/batchprocessor/batch_processor.go | 46 ++-- .../batchprocessor/batch_processor_test.go | 42 ++-- processor/batchprocessor/splitlogs.go | 16 +- processor/batchprocessor/splitlogs_test.go | 10 +- processor/batchprocessor/splitmetrics.go | 62 ++--- processor/batchprocessor/splitmetrics_test.go | 34 +-- processor/batchprocessor/splittraces.go | 16 +- processor/batchprocessor/splittraces_test.go | 12 +- .../memorylimiterprocessor/memorylimiter.go | 10 +- .../memorylimiter_test.go | 10 +- processor/processorhelper/logs.go | 6 +- processor/processorhelper/logs_test.go | 10 +- processor/processorhelper/metrics.go | 6 +- processor/processorhelper/metrics_test.go | 10 +- processor/processorhelper/traces.go | 6 +- processor/processorhelper/traces_test.go | 10 +- receiver/otlpreceiver/otlp_test.go | 33 +-- receiver/scraperhelper/scraper.go | 8 +- receiver/scraperhelper/scrapercontroller.go | 4 +- .../scraperhelper/scrapercontroller_test.go | 10 +- .../builder/pipelines_builder_test.go | 4 +- .../builder/receivers_builder_test.go | 4 +- service/internal/fanoutconsumer/logs.go | 6 +- service/internal/fanoutconsumer/metrics.go | 6 +- service/internal/fanoutconsumer/traces.go | 6 +- 126 files changed, 1827 insertions(+), 752 deletions(-) rename model/internal/pdata/{generated_log.go => generated_plog.go} (100%) rename model/internal/pdata/{generated_log_test.go => generated_plog_test.go} (100%) rename model/internal/pdata/{generated_metrics.go => generated_pmetric.go} (100%) rename model/internal/pdata/{generated_metrics_test.go => generated_pmetric_test.go} (100%) rename model/internal/pdata/{generated_trace.go => generated_ptrace.go} (100%) rename model/internal/pdata/{generated_trace_test.go => generated_ptrace_test.go} (100%) create mode 100644 model/pcommon/alias.go create mode 100644 model/pcommon/generated_common_alias.go create mode 100644 model/pcommon/generated_resource_alias.go create mode 100644 model/pcommon/spanid_alias.go create mode 100644 model/pcommon/timestamp_alias.go create mode 100644 model/pcommon/traceid_alias.go create mode 100644 model/pdata/generated_plog_alias.go create mode 100644 model/pdata/generated_pmetric_alias.go create mode 100644 model/pdata/generated_ptrace_alias.go create mode 100644 model/plog/alias.go rename model/{pdata/generated_log_alias.go => plog/generated_alias.go} (99%) create mode 100644 model/pmetric/alias.go rename model/{pdata/generated_metrics_alias.go => pmetric/generated_alias.go} (99%) create mode 100644 model/ptrace/alias.go rename model/{pdata/generated_trace_alias.go => ptrace/generated_alias.go} (99%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 138846a725b..354847cbb24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,9 @@ ### 🚩 Deprecations 🚩 +- All public API from model/pdata package is deprecated in favor of packages separated by telemetry + signal type: model/pcommon, model/plog, model/pmetric and model/ptrace (#5087) + ### 💡 Enhancements 💡 - OTLP HTTP receiver will use HTTP/2 over TLS if client supports it (#5190) diff --git a/client/doc_test.go b/client/doc_test.go index ee8f0bf97d1..54a83629369 100644 --- a/client/doc_test.go +++ b/client/doc_test.go @@ -21,7 +21,7 @@ import ( "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) func Example_receiver() { @@ -29,7 +29,7 @@ func Example_receiver() { var next consumer.Traces // You'll convert the incoming data into pipeline data - td := pdata.NewTraces() + td := ptrace.NewTraces() // You probably have a context with client metadata from your listener or // scraper diff --git a/component/componenttest/nop_exporter_test.go b/component/componenttest/nop_exporter_test.go index d1182ff2af4..136bb5d0c67 100644 --- a/component/componenttest/nop_exporter_test.go +++ b/component/componenttest/nop_exporter_test.go @@ -22,7 +22,9 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestNewNopExporterFactory(t *testing.T) { @@ -35,18 +37,18 @@ func TestNewNopExporterFactory(t *testing.T) { traces, err := factory.CreateTracesExporter(context.Background(), NewNopExporterCreateSettings(), cfg) require.NoError(t, err) assert.NoError(t, traces.Start(context.Background(), NewNopHost())) - assert.NoError(t, traces.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, traces.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.NoError(t, traces.Shutdown(context.Background())) metrics, err := factory.CreateMetricsExporter(context.Background(), NewNopExporterCreateSettings(), cfg) require.NoError(t, err) assert.NoError(t, metrics.Start(context.Background(), NewNopHost())) - assert.NoError(t, metrics.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, metrics.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.NoError(t, metrics.Shutdown(context.Background())) logs, err := factory.CreateLogsExporter(context.Background(), NewNopExporterCreateSettings(), cfg) require.NoError(t, err) assert.NoError(t, logs.Start(context.Background(), NewNopHost())) - assert.NoError(t, logs.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, logs.ConsumeLogs(context.Background(), plog.NewLogs())) assert.NoError(t, logs.Shutdown(context.Background())) } diff --git a/component/componenttest/nop_processor_test.go b/component/componenttest/nop_processor_test.go index b82b1c344e2..35841fdb6d1 100644 --- a/component/componenttest/nop_processor_test.go +++ b/component/componenttest/nop_processor_test.go @@ -24,7 +24,9 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestNewNopProcessorFactory(t *testing.T) { @@ -38,20 +40,20 @@ func TestNewNopProcessorFactory(t *testing.T) { require.NoError(t, err) assert.Equal(t, consumer.Capabilities{MutatesData: false}, traces.Capabilities()) assert.NoError(t, traces.Start(context.Background(), NewNopHost())) - assert.NoError(t, traces.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, traces.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.NoError(t, traces.Shutdown(context.Background())) metrics, err := factory.CreateMetricsProcessor(context.Background(), NewNopProcessorCreateSettings(), cfg, consumertest.NewNop()) require.NoError(t, err) assert.Equal(t, consumer.Capabilities{MutatesData: false}, metrics.Capabilities()) assert.NoError(t, metrics.Start(context.Background(), NewNopHost())) - assert.NoError(t, metrics.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, metrics.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.NoError(t, metrics.Shutdown(context.Background())) logs, err := factory.CreateLogsProcessor(context.Background(), NewNopProcessorCreateSettings(), cfg, consumertest.NewNop()) require.NoError(t, err) assert.Equal(t, consumer.Capabilities{MutatesData: false}, logs.Capabilities()) assert.NoError(t, logs.Start(context.Background(), NewNopHost())) - assert.NoError(t, logs.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, logs.ConsumeLogs(context.Background(), plog.NewLogs())) assert.NoError(t, logs.Shutdown(context.Background())) } diff --git a/component/receiver.go b/component/receiver.go index 6ed6c44217e..93111f26248 100644 --- a/component/receiver.go +++ b/component/receiver.go @@ -67,7 +67,7 @@ type Receiver interface { // Its purpose is to translate data from any format to the collector's internal trace format. // TracesReceiver feeds a consumer.Traces with data. // -// For example it could be Zipkin data source which translates Zipkin spans into pdata.Traces. +// For example it could be Zipkin data source which translates Zipkin spans into ptrace.Traces. type TracesReceiver interface { Receiver } @@ -76,7 +76,7 @@ type TracesReceiver interface { // Its purpose is to translate data from any format to the collector's internal metrics format. // MetricsReceiver feeds a consumer.Metrics with data. // -// For example it could be Prometheus data source which translates Prometheus metrics into pdata.Metrics. +// For example it could be Prometheus data source which translates Prometheus metrics into pmetric.Metrics. type MetricsReceiver interface { Receiver } @@ -85,7 +85,7 @@ type MetricsReceiver interface { // Its purpose is to translate data from any format to the collector's internal logs data format. // LogsReceiver feeds a consumer.Logs with data. // -// For example a LogsReceiver can read syslogs and convert them into pdata.Logs. +// For example a LogsReceiver can read syslogs and convert them into plog.Logs. type LogsReceiver interface { Receiver } diff --git a/config/configgrpc/configgrpc_benchmark_test.go b/config/configgrpc/configgrpc_benchmark_test.go index a96446e2eb7..ebe6e6d3f42 100644 --- a/config/configgrpc/configgrpc_benchmark_test.go +++ b/config/configgrpc/configgrpc_benchmark_test.go @@ -32,7 +32,9 @@ import ( "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func BenchmarkCompressors(b *testing.B) { @@ -106,27 +108,27 @@ type marshaler interface { } type logMarshaler struct { - pdata.LogsMarshaler + plog.LogsMarshaler } func (m *logMarshaler) marshal(e interface{}) ([]byte, error) { - return m.MarshalLogs(e.(pdata.Logs)) + return m.MarshalLogs(e.(plog.Logs)) } type traceMarshaler struct { - pdata.TracesMarshaler + ptrace.TracesMarshaler } func (m *traceMarshaler) marshal(e interface{}) ([]byte, error) { - return m.MarshalTraces(e.(pdata.Traces)) + return m.MarshalTraces(e.(ptrace.Traces)) } type metricsMarshaler struct { - pdata.MetricsMarshaler + pmetric.MetricsMarshaler } func (m *metricsMarshaler) marshal(e interface{}) ([]byte, error) { - return m.MarshalMetrics(e.(pdata.Metrics)) + return m.MarshalMetrics(e.(pmetric.Metrics)) } func setupTestPayloads() []testPayload { diff --git a/consumer/consumererror/signalerrors.go b/consumer/consumererror/signalerrors.go index f8c7e5bd53d..1a3e5fbfae2 100644 --- a/consumer/consumererror/signalerrors.go +++ b/consumer/consumererror/signalerrors.go @@ -15,18 +15,20 @@ package consumererror // import "go.opentelemetry.io/collector/consumer/consumererror" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) // Traces is an error that may carry associated Trace data for a subset of received data // that failed to be processed or sent. type Traces struct { error - failed pdata.Traces + failed ptrace.Traces } // NewTraces creates a Traces that can encapsulate received data that failed to be processed or sent. -func NewTraces(err error, failed pdata.Traces) error { +func NewTraces(err error, failed ptrace.Traces) error { return Traces{ error: err, failed: failed, @@ -34,7 +36,7 @@ func NewTraces(err error, failed pdata.Traces) error { } // GetTraces returns failed traces from the associated error. -func (err Traces) GetTraces() pdata.Traces { +func (err Traces) GetTraces() ptrace.Traces { return err.failed } @@ -47,11 +49,11 @@ func (err Traces) Unwrap() error { // that failed to be processed or sent. type Logs struct { error - failed pdata.Logs + failed plog.Logs } // NewLogs creates a Logs that can encapsulate received data that failed to be processed or sent. -func NewLogs(err error, failed pdata.Logs) error { +func NewLogs(err error, failed plog.Logs) error { return Logs{ error: err, failed: failed, @@ -59,7 +61,7 @@ func NewLogs(err error, failed pdata.Logs) error { } // GetLogs returns failed logs from the associated error. -func (err Logs) GetLogs() pdata.Logs { +func (err Logs) GetLogs() plog.Logs { return err.failed } @@ -72,11 +74,11 @@ func (err Logs) Unwrap() error { // that failed to be processed or sent. type Metrics struct { error - failed pdata.Metrics + failed pmetric.Metrics } // NewMetrics creates a Metrics that can encapsulate received data that failed to be processed or sent. -func NewMetrics(err error, failed pdata.Metrics) error { +func NewMetrics(err error, failed pmetric.Metrics) error { return Metrics{ error: err, failed: failed, @@ -84,7 +86,7 @@ func NewMetrics(err error, failed pdata.Metrics) error { } // GetMetrics returns failed metrics from the associated error. -func (err Metrics) GetMetrics() pdata.Metrics { +func (err Metrics) GetMetrics() pmetric.Metrics { return err.failed } diff --git a/consumer/consumertest/consumer.go b/consumer/consumertest/consumer.go index a9cee267436..9502a1849d1 100644 --- a/consumer/consumertest/consumer.go +++ b/consumer/consumertest/consumer.go @@ -18,7 +18,9 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) // Consumer is a convenience interface that implements all consumer interfaces. @@ -29,11 +31,11 @@ type Consumer interface { // Capabilities to implement the base consumer functionality. Capabilities() consumer.Capabilities // ConsumeTraces to implement the consumer.Traces. - ConsumeTraces(context.Context, pdata.Traces) error + ConsumeTraces(context.Context, ptrace.Traces) error // ConsumeMetrics to implement the consumer.Metrics. - ConsumeMetrics(context.Context, pdata.Metrics) error + ConsumeMetrics(context.Context, pmetric.Metrics) error // ConsumeLogs to implement the consumer.Logs. - ConsumeLogs(context.Context, pdata.Logs) error + ConsumeLogs(context.Context, plog.Logs) error unexported() } diff --git a/consumer/consumertest/err.go b/consumer/consumertest/err.go index dc0cf989cc1..b93d44b738a 100644 --- a/consumer/consumertest/err.go +++ b/consumer/consumertest/err.go @@ -17,7 +17,9 @@ package consumertest // import "go.opentelemetry.io/collector/consumer/consumert import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type errConsumer struct { @@ -27,15 +29,15 @@ type errConsumer struct { func (er *errConsumer) unexported() {} -func (er *errConsumer) ConsumeTraces(context.Context, pdata.Traces) error { +func (er *errConsumer) ConsumeTraces(context.Context, ptrace.Traces) error { return er.err } -func (er *errConsumer) ConsumeMetrics(context.Context, pdata.Metrics) error { +func (er *errConsumer) ConsumeMetrics(context.Context, pmetric.Metrics) error { return er.err } -func (er *errConsumer) ConsumeLogs(context.Context, pdata.Logs) error { +func (er *errConsumer) ConsumeLogs(context.Context, plog.Logs) error { return er.err } diff --git a/consumer/consumertest/err_test.go b/consumer/consumertest/err_test.go index 382b3809086..8831a86433d 100644 --- a/consumer/consumertest/err_test.go +++ b/consumer/consumertest/err_test.go @@ -22,7 +22,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestErr(t *testing.T) { @@ -30,7 +32,7 @@ func TestErr(t *testing.T) { ec := NewErr(err) require.NotNil(t, ec) assert.NotPanics(t, ec.unexported) - assert.Equal(t, err, ec.ConsumeLogs(context.Background(), pdata.NewLogs())) - assert.Equal(t, err, ec.ConsumeMetrics(context.Background(), pdata.NewMetrics())) - assert.Equal(t, err, ec.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.Equal(t, err, ec.ConsumeLogs(context.Background(), plog.NewLogs())) + assert.Equal(t, err, ec.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) + assert.Equal(t, err, ec.ConsumeTraces(context.Background(), ptrace.NewTraces())) } diff --git a/consumer/consumertest/nop.go b/consumer/consumertest/nop.go index f48123677ca..2ed855117e2 100644 --- a/consumer/consumertest/nop.go +++ b/consumer/consumertest/nop.go @@ -17,7 +17,9 @@ package consumertest // import "go.opentelemetry.io/collector/consumer/consumert import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) var ( @@ -30,15 +32,15 @@ type nopConsumer struct { func (nc *nopConsumer) unexported() {} -func (nc *nopConsumer) ConsumeTraces(context.Context, pdata.Traces) error { +func (nc *nopConsumer) ConsumeTraces(context.Context, ptrace.Traces) error { return nil } -func (nc *nopConsumer) ConsumeMetrics(context.Context, pdata.Metrics) error { +func (nc *nopConsumer) ConsumeMetrics(context.Context, pmetric.Metrics) error { return nil } -func (nc *nopConsumer) ConsumeLogs(context.Context, pdata.Logs) error { +func (nc *nopConsumer) ConsumeLogs(context.Context, plog.Logs) error { return nil } diff --git a/consumer/consumertest/nop_test.go b/consumer/consumertest/nop_test.go index a16b129808a..bdae9f30179 100644 --- a/consumer/consumertest/nop_test.go +++ b/consumer/consumertest/nop_test.go @@ -21,14 +21,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestNop(t *testing.T) { nc := NewNop() require.NotNil(t, nc) assert.NotPanics(t, nc.unexported) - assert.NoError(t, nc.ConsumeLogs(context.Background(), pdata.NewLogs())) - assert.NoError(t, nc.ConsumeMetrics(context.Background(), pdata.NewMetrics())) - assert.NoError(t, nc.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, nc.ConsumeLogs(context.Background(), plog.NewLogs())) + assert.NoError(t, nc.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) + assert.NoError(t, nc.ConsumeTraces(context.Background(), ptrace.NewTraces())) } diff --git a/consumer/consumertest/sink.go b/consumer/consumertest/sink.go index d9a23602222..aa7e5d2c74f 100644 --- a/consumer/consumertest/sink.go +++ b/consumer/consumertest/sink.go @@ -19,7 +19,9 @@ import ( "sync" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) // TracesSink is a consumer.Traces that acts like a sink that @@ -27,14 +29,14 @@ import ( type TracesSink struct { nonMutatingConsumer mu sync.Mutex - traces []pdata.Traces + traces []ptrace.Traces spanCount int } var _ consumer.Traces = (*TracesSink)(nil) // ConsumeTraces stores traces to this sink. -func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error { +func (ste *TracesSink) ConsumeTraces(_ context.Context, td ptrace.Traces) error { ste.mu.Lock() defer ste.mu.Unlock() @@ -45,11 +47,11 @@ func (ste *TracesSink) ConsumeTraces(_ context.Context, td pdata.Traces) error { } // AllTraces returns the traces stored by this sink since last Reset. -func (ste *TracesSink) AllTraces() []pdata.Traces { +func (ste *TracesSink) AllTraces() []ptrace.Traces { ste.mu.Lock() defer ste.mu.Unlock() - copyTraces := make([]pdata.Traces, len(ste.traces)) + copyTraces := make([]ptrace.Traces, len(ste.traces)) copy(copyTraces, ste.traces) return copyTraces } @@ -75,14 +77,14 @@ func (ste *TracesSink) Reset() { type MetricsSink struct { nonMutatingConsumer mu sync.Mutex - metrics []pdata.Metrics + metrics []pmetric.Metrics dataPointCount int } var _ consumer.Metrics = (*MetricsSink)(nil) // ConsumeMetrics stores metrics to this sink. -func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { sme.mu.Lock() defer sme.mu.Unlock() @@ -93,11 +95,11 @@ func (sme *MetricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) erro } // AllMetrics returns the metrics stored by this sink since last Reset. -func (sme *MetricsSink) AllMetrics() []pdata.Metrics { +func (sme *MetricsSink) AllMetrics() []pmetric.Metrics { sme.mu.Lock() defer sme.mu.Unlock() - copyMetrics := make([]pdata.Metrics, len(sme.metrics)) + copyMetrics := make([]pmetric.Metrics, len(sme.metrics)) copy(copyMetrics, sme.metrics) return copyMetrics } @@ -123,14 +125,14 @@ func (sme *MetricsSink) Reset() { type LogsSink struct { nonMutatingConsumer mu sync.Mutex - logs []pdata.Logs + logs []plog.Logs logRecordCount int } var _ consumer.Logs = (*LogsSink)(nil) // ConsumeLogs stores logs to this sink. -func (sle *LogsSink) ConsumeLogs(_ context.Context, ld pdata.Logs) error { +func (sle *LogsSink) ConsumeLogs(_ context.Context, ld plog.Logs) error { sle.mu.Lock() defer sle.mu.Unlock() @@ -141,11 +143,11 @@ func (sle *LogsSink) ConsumeLogs(_ context.Context, ld pdata.Logs) error { } // AllLogs returns the logs stored by this sink since last Reset. -func (sle *LogsSink) AllLogs() []pdata.Logs { +func (sle *LogsSink) AllLogs() []plog.Logs { sle.mu.Lock() defer sle.mu.Unlock() - copyLogs := make([]pdata.Logs, len(sle.logs)) + copyLogs := make([]plog.Logs, len(sle.logs)) copy(copyLogs, sle.logs) return copyLogs } diff --git a/consumer/consumertest/sink_test.go b/consumer/consumertest/sink_test.go index 2098019471f..ca8ed787b9c 100644 --- a/consumer/consumertest/sink_test.go +++ b/consumer/consumertest/sink_test.go @@ -22,13 +22,15 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestTracesSink(t *testing.T) { sink := new(TracesSink) td := testdata.GenerateTracesOneSpan() - want := make([]pdata.Traces, 0, 7) + want := make([]ptrace.Traces, 0, 7) for i := 0; i < 7; i++ { require.NoError(t, sink.ConsumeTraces(context.Background(), td)) want = append(want, td) @@ -43,7 +45,7 @@ func TestTracesSink(t *testing.T) { func TestMetricsSink(t *testing.T) { sink := new(MetricsSink) md := testdata.GenerateMetricsOneMetric() - want := make([]pdata.Metrics, 0, 7) + want := make([]pmetric.Metrics, 0, 7) for i := 0; i < 7; i++ { require.NoError(t, sink.ConsumeMetrics(context.Background(), md)) want = append(want, md) @@ -58,7 +60,7 @@ func TestMetricsSink(t *testing.T) { func TestLogsSink(t *testing.T) { sink := new(LogsSink) md := testdata.GenerateLogsOneLogRecord() - want := make([]pdata.Logs, 0, 7) + want := make([]plog.Logs, 0, 7) for i := 0; i < 7; i++ { require.NoError(t, sink.ConsumeLogs(context.Background(), md)) want = append(want, md) diff --git a/consumer/logs.go b/consumer/logs.go index 2876c14978e..12beff95af1 100644 --- a/consumer/logs.go +++ b/consumer/logs.go @@ -17,22 +17,22 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) -// Logs is an interface that receives pdata.Logs, processes it +// Logs is an interface that receives plog.Logs, processes it // as needed, and sends it to the next processing node if any or to the destination. type Logs interface { baseConsumer - // ConsumeLogs receives pdata.Logs for consumption. - ConsumeLogs(ctx context.Context, ld pdata.Logs) error + // ConsumeLogs receives plog.Logs for consumption. + ConsumeLogs(ctx context.Context, ld plog.Logs) error } // ConsumeLogsFunc is a helper function that is similar to ConsumeLogs. -type ConsumeLogsFunc func(ctx context.Context, ld pdata.Logs) error +type ConsumeLogsFunc func(ctx context.Context, ld plog.Logs) error // ConsumeLogs calls f(ctx, ld). -func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld plog.Logs) error { return f(ctx, ld) } diff --git a/consumer/logs_test.go b/consumer/logs_test.go index d3815937d96..b112de2c029 100644 --- a/consumer/logs_test.go +++ b/consumer/logs_test.go @@ -21,13 +21,13 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) func TestDefaultLogs(t *testing.T) { - cp, err := NewLogs(func(context.Context, pdata.Logs) error { return nil }) + cp, err := NewLogs(func(context.Context, plog.Logs) error { return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, cp.ConsumeLogs(context.Background(), plog.NewLogs())) assert.Equal(t, Capabilities{MutatesData: false}, cp.Capabilities()) } @@ -38,24 +38,24 @@ func TestNilFuncLogs(t *testing.T) { func TestWithCapabilitiesLogs(t *testing.T) { cp, err := NewLogs( - func(context.Context, pdata.Logs) error { return nil }, + func(context.Context, plog.Logs) error { return nil }, WithCapabilities(Capabilities{MutatesData: true})) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, cp.ConsumeLogs(context.Background(), plog.NewLogs())) assert.Equal(t, Capabilities{MutatesData: true}, cp.Capabilities()) } func TestConsumeLogs(t *testing.T) { consumeCalled := false - cp, err := NewLogs(func(context.Context, pdata.Logs) error { consumeCalled = true; return nil }) + cp, err := NewLogs(func(context.Context, plog.Logs) error { consumeCalled = true; return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, cp.ConsumeLogs(context.Background(), plog.NewLogs())) assert.True(t, consumeCalled) } func TestConsumeLogs_ReturnError(t *testing.T) { want := errors.New("my_error") - cp, err := NewLogs(func(context.Context, pdata.Logs) error { return want }) + cp, err := NewLogs(func(context.Context, plog.Logs) error { return want }) assert.NoError(t, err) - assert.Equal(t, want, cp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.Equal(t, want, cp.ConsumeLogs(context.Background(), plog.NewLogs())) } diff --git a/consumer/metrics.go b/consumer/metrics.go index 43102cee7f1..95c9aab8234 100644 --- a/consumer/metrics.go +++ b/consumer/metrics.go @@ -17,22 +17,22 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) -// Metrics is the new metrics consumer interface that receives pdata.Metrics, processes it +// Metrics is the new metrics consumer interface that receives pmetric.Metrics, processes it // as needed, and sends it to the next processing node if any or to the destination. type Metrics interface { baseConsumer - // ConsumeMetrics receives pdata.Metrics for consumption. - ConsumeMetrics(ctx context.Context, md pdata.Metrics) error + // ConsumeMetrics receives pmetric.Metrics for consumption. + ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error } // ConsumeMetricsFunc is a helper function that is similar to ConsumeMetrics. -type ConsumeMetricsFunc func(ctx context.Context, ld pdata.Metrics) error +type ConsumeMetricsFunc func(ctx context.Context, ld pmetric.Metrics) error // ConsumeMetrics calls f(ctx, ld). -func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, ld pdata.Metrics) error { +func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, ld pmetric.Metrics) error { return f(ctx, ld) } diff --git a/consumer/metrics_test.go b/consumer/metrics_test.go index f6b88568869..f7884acc01b 100644 --- a/consumer/metrics_test.go +++ b/consumer/metrics_test.go @@ -21,13 +21,13 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) func TestDefaultMetrics(t *testing.T) { - cp, err := NewMetrics(func(context.Context, pdata.Metrics) error { return nil }) + cp, err := NewMetrics(func(context.Context, pmetric.Metrics) error { return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, cp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.Equal(t, Capabilities{MutatesData: false}, cp.Capabilities()) } @@ -38,24 +38,24 @@ func TestNilFuncMetrics(t *testing.T) { func TestWithCapabilitiesMetrics(t *testing.T) { cp, err := NewMetrics( - func(context.Context, pdata.Metrics) error { return nil }, + func(context.Context, pmetric.Metrics) error { return nil }, WithCapabilities(Capabilities{MutatesData: true})) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, cp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.Equal(t, Capabilities{MutatesData: true}, cp.Capabilities()) } func TestConsumeMetrics(t *testing.T) { consumeCalled := false - cp, err := NewMetrics(func(context.Context, pdata.Metrics) error { consumeCalled = true; return nil }) + cp, err := NewMetrics(func(context.Context, pmetric.Metrics) error { consumeCalled = true; return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, cp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.True(t, consumeCalled) } func TestConsumeMetrics_ReturnError(t *testing.T) { want := errors.New("my_error") - cp, err := NewMetrics(func(context.Context, pdata.Metrics) error { return want }) + cp, err := NewMetrics(func(context.Context, pmetric.Metrics) error { return want }) assert.NoError(t, err) - assert.Equal(t, want, cp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.Equal(t, want, cp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) } diff --git a/consumer/traces.go b/consumer/traces.go index 3852d7d253b..6df24555ee1 100644 --- a/consumer/traces.go +++ b/consumer/traces.go @@ -17,22 +17,22 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) -// Traces is an interface that receives pdata.Traces, processes it +// Traces is an interface that receives ptrace.Traces, processes it // as needed, and sends it to the next processing node if any or to the destination. type Traces interface { baseConsumer - // ConsumeTraces receives pdata.Traces for consumption. - ConsumeTraces(ctx context.Context, td pdata.Traces) error + // ConsumeTraces receives ptrace.Traces for consumption. + ConsumeTraces(ctx context.Context, td ptrace.Traces) error } // ConsumeTracesFunc is a helper function that is similar to ConsumeTraces. -type ConsumeTracesFunc func(ctx context.Context, ld pdata.Traces) error +type ConsumeTracesFunc func(ctx context.Context, ld ptrace.Traces) error // ConsumeTraces calls f(ctx, ld). -func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, ld pdata.Traces) error { +func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, ld ptrace.Traces) error { return f(ctx, ld) } diff --git a/consumer/traces_test.go b/consumer/traces_test.go index 41cd269cfe0..b50ba49c771 100644 --- a/consumer/traces_test.go +++ b/consumer/traces_test.go @@ -21,13 +21,13 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) func TestDefaultTraces(t *testing.T) { - cp, err := NewTraces(func(context.Context, pdata.Traces) error { return nil }) + cp, err := NewTraces(func(context.Context, ptrace.Traces) error { return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, cp.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.Equal(t, Capabilities{MutatesData: false}, cp.Capabilities()) } @@ -38,24 +38,24 @@ func TestNilFuncTraces(t *testing.T) { func TestWithCapabilitiesTraces(t *testing.T) { cp, err := NewTraces( - func(context.Context, pdata.Traces) error { return nil }, + func(context.Context, ptrace.Traces) error { return nil }, WithCapabilities(Capabilities{MutatesData: true})) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, cp.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.Equal(t, Capabilities{MutatesData: true}, cp.Capabilities()) } func TestConsumeTraces(t *testing.T) { consumeCalled := false - cp, err := NewTraces(func(context.Context, pdata.Traces) error { consumeCalled = true; return nil }) + cp, err := NewTraces(func(context.Context, ptrace.Traces) error { consumeCalled = true; return nil }) assert.NoError(t, err) - assert.NoError(t, cp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, cp.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.True(t, consumeCalled) } func TestConsumeTraces_ReturnError(t *testing.T) { want := errors.New("my_error") - cp, err := NewTraces(func(context.Context, pdata.Traces) error { return want }) + cp, err := NewTraces(func(context.Context, ptrace.Traces) error { return want }) assert.NoError(t, err) - assert.Equal(t, want, cp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.Equal(t, want, cp.ConsumeTraces(context.Background(), ptrace.NewTraces())) } diff --git a/exporter/exporterhelper/common_test.go b/exporter/exporterhelper/common_test.go index 42559e212a2..2e64060d541 100644 --- a/exporter/exporterhelper/common_test.go +++ b/exporter/exporterhelper/common_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) var ( @@ -71,7 +71,7 @@ func checkStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { } func nopTracePusher() consumer.ConsumeTracesFunc { - return func(ctx context.Context, ld pdata.Traces) error { + return func(ctx context.Context, ld ptrace.Traces) error { return nil } } diff --git a/exporter/exporterhelper/internal/persistent_queue_test.go b/exporter/exporterhelper/internal/persistent_queue_test.go index f94a4b35aa4..0854352bad6 100644 --- a/exporter/exporterhelper/internal/persistent_queue_test.go +++ b/exporter/exporterhelper/internal/persistent_queue_test.go @@ -31,7 +31,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/extension/experimental/storage" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/ptrace" ) func createTestQueue(extension storage.Extension, capacity int) *persistentQueue { @@ -167,20 +168,20 @@ func TestPersistentQueue_ConsumersProducers(t *testing.T) { } } -func newTraces(numTraces int, numSpans int) pdata.Traces { - traces := pdata.NewTraces() +func newTraces(numTraces int, numSpans int) ptrace.Traces { + traces := ptrace.NewTraces() batch := traces.ResourceSpans().AppendEmpty() batch.Resource().Attributes().InsertString("resource-attr", "some-resource") batch.Resource().Attributes().InsertInt("num-traces", int64(numTraces)) batch.Resource().Attributes().InsertInt("num-spans", int64(numSpans)) for i := 0; i < numTraces; i++ { - traceID := pdata.NewTraceID([16]byte{1, 2, 3, byte(i)}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, byte(i)}) ils := batch.ScopeSpans().AppendEmpty() for j := 0; j < numSpans; j++ { span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, byte(j)})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, byte(j)})) span.SetName("should-not-be-changed") span.Attributes().InsertInt("int-attribute", int64(j)) span.Attributes().InsertString("str-attribute-1", "foobar") diff --git a/exporter/exporterhelper/internal/persistent_storage_test.go b/exporter/exporterhelper/internal/persistent_storage_test.go index d0a83ddfc42..feed2e55800 100644 --- a/exporter/exporterhelper/internal/persistent_storage_test.go +++ b/exporter/exporterhelper/internal/persistent_storage_test.go @@ -35,7 +35,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/extension/experimental/storage" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) func createStorageExtension(_ string) storage.Extension { @@ -69,12 +69,12 @@ func createTemporaryDirectory() string { } type fakeTracesRequest struct { - td pdata.Traces + td ptrace.Traces processingFinishedCallback func() PersistentRequest } -func newFakeTracesRequest(td pdata.Traces) *fakeTracesRequest { +func newFakeTracesRequest(td ptrace.Traces) *fakeTracesRequest { return &fakeTracesRequest{ td: td, } diff --git a/exporter/exporterhelper/logs.go b/exporter/exporterhelper/logs.go index 3837d9a901c..218e9d1a1e5 100644 --- a/exporter/exporterhelper/logs.go +++ b/exporter/exporterhelper/logs.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) var logsMarshaler = otlp.NewProtobufLogsMarshaler() @@ -32,11 +32,11 @@ var logsUnmarshaler = otlp.NewProtobufLogsUnmarshaler() type logsRequest struct { baseRequest - ld pdata.Logs + ld plog.Logs pusher consumer.ConsumeLogsFunc } -func newLogsRequest(ctx context.Context, ld pdata.Logs, pusher consumer.ConsumeLogsFunc) request { +func newLogsRequest(ctx context.Context, ld plog.Logs, pusher consumer.ConsumeLogsFunc) request { return &logsRequest{ baseRequest: baseRequest{ctx: ctx}, ld: ld, @@ -107,7 +107,7 @@ func NewLogsExporter( } }) - lc, err := consumer.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + lc, err := consumer.NewLogs(func(ctx context.Context, ld plog.Logs) error { req := newLogsRequest(ctx, ld, pusher) err := be.sender.send(req) if errors.Is(err, errSendingQueueIsFull) { diff --git a/exporter/exporterhelper/logs_test.go b/exporter/exporterhelper/logs_test.go index 3bd0dcc9b3f..1b82a12d03f 100644 --- a/exporter/exporterhelper/logs_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -33,7 +33,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -50,10 +50,10 @@ var ( func TestLogsRequest(t *testing.T) { lr := newLogsRequest(context.Background(), testdata.GenerateLogsOneLogRecord(), nil) - logErr := consumererror.NewLogs(errors.New("some error"), pdata.NewLogs()) + logErr := consumererror.NewLogs(errors.New("some error"), plog.NewLogs()) assert.EqualValues( t, - newLogsRequest(context.Background(), pdata.NewLogs(), nil), + newLogsRequest(context.Background(), plog.NewLogs(), nil), lr.onError(logErr), ) } @@ -77,7 +77,7 @@ func TestLogsExporter_NilPushLogsData(t *testing.T) { } func TestLogsExporter_Default(t *testing.T) { - ld := pdata.NewLogs() + ld := plog.NewLogs() le, err := NewLogsExporter(&fakeLogsExporterConfig, componenttest.NewNopExporterCreateSettings(), newPushLogsData(nil)) assert.NotNil(t, le) assert.NoError(t, err) @@ -98,7 +98,7 @@ func TestLogsExporter_WithCapabilities(t *testing.T) { } func TestLogsExporter_Default_ReturnError(t *testing.T) { - ld := pdata.NewLogs() + ld := plog.NewLogs() want := errors.New("my_error") le, err := NewLogsExporter(&fakeLogsExporterConfig, componenttest.NewNopExporterCreateSettings(), newPushLogsData(want)) require.NoError(t, err) @@ -199,7 +199,7 @@ func TestLogsExporter_WithShutdown_ReturnError(t *testing.T) { } func newPushLogsData(retError error) consumer.ConsumeLogsFunc { - return func(ctx context.Context, td pdata.Logs) error { + return func(ctx context.Context, td plog.Logs) error { return retError } } diff --git a/exporter/exporterhelper/metrics.go b/exporter/exporterhelper/metrics.go index f17eefee955..4cbf78e4730 100644 --- a/exporter/exporterhelper/metrics.go +++ b/exporter/exporterhelper/metrics.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) var metricsMarshaler = otlp.NewProtobufMetricsMarshaler() @@ -32,11 +32,11 @@ var metricsUnmarshaler = otlp.NewProtobufMetricsUnmarshaler() type metricsRequest struct { baseRequest - md pdata.Metrics + md pmetric.Metrics pusher consumer.ConsumeMetricsFunc } -func newMetricsRequest(ctx context.Context, md pdata.Metrics, pusher consumer.ConsumeMetricsFunc) request { +func newMetricsRequest(ctx context.Context, md pmetric.Metrics, pusher consumer.ConsumeMetricsFunc) request { return &metricsRequest{ baseRequest: baseRequest{ctx: ctx}, md: md, @@ -108,7 +108,7 @@ func NewMetricsExporter( } }) - mc, err := consumer.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + mc, err := consumer.NewMetrics(func(ctx context.Context, md pmetric.Metrics) error { req := newMetricsRequest(ctx, md, pusher) err := be.sender.send(req) if errors.Is(err, errSendingQueueIsFull) { diff --git a/exporter/exporterhelper/metrics_test.go b/exporter/exporterhelper/metrics_test.go index 0dfc62db76a..f423e626e0e 100644 --- a/exporter/exporterhelper/metrics_test.go +++ b/exporter/exporterhelper/metrics_test.go @@ -33,7 +33,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -49,10 +49,10 @@ var ( func TestMetricsRequest(t *testing.T) { mr := newMetricsRequest(context.Background(), testdata.GenerateMetricsOneMetric(), nil) - metricsErr := consumererror.NewMetrics(errors.New("some error"), pdata.NewMetrics()) + metricsErr := consumererror.NewMetrics(errors.New("some error"), pmetric.NewMetrics()) assert.EqualValues( t, - newMetricsRequest(context.Background(), pdata.NewMetrics(), nil), + newMetricsRequest(context.Background(), pmetric.NewMetrics(), nil), mr.onError(metricsErr), ) } @@ -76,7 +76,7 @@ func TestMetricsExporter_NilPushMetricsData(t *testing.T) { } func TestMetricsExporter_Default(t *testing.T) { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() me, err := NewMetricsExporter(&fakeMetricsExporterConfig, componenttest.NewNopExporterCreateSettings(), newPushMetricsData(nil)) assert.NoError(t, err) assert.NotNil(t, me) @@ -97,7 +97,7 @@ func TestMetricsExporter_WithCapabilities(t *testing.T) { } func TestMetricsExporter_Default_ReturnError(t *testing.T) { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() want := errors.New("my_error") me, err := NewMetricsExporter(&fakeMetricsExporterConfig, componenttest.NewNopExporterCreateSettings(), newPushMetricsData(want)) require.NoError(t, err) @@ -200,7 +200,7 @@ func TestMetricsExporter_WithShutdown_ReturnError(t *testing.T) { } func newPushMetricsData(retError error) consumer.ConsumeMetricsFunc { - return func(ctx context.Context, td pdata.Metrics) error { + return func(ctx context.Context, td pmetric.Metrics) error { return retError } } diff --git a/exporter/exporterhelper/queued_retry_test.go b/exporter/exporterhelper/queued_retry_test.go index 34c9b77daed..1b5d979bba9 100644 --- a/exporter/exporterhelper/queued_retry_test.go +++ b/exporter/exporterhelper/queued_retry_test.go @@ -34,7 +34,7 @@ import ( "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -428,7 +428,7 @@ func (m *mockRequest) export(ctx context.Context) error { } func (m *mockRequest) Marshal() ([]byte, error) { - return otlp.NewProtobufTracesMarshaler().MarshalTraces(pdata.NewTraces()) + return otlp.NewProtobufTracesMarshaler().MarshalTraces(ptrace.NewTraces()) } func (m *mockRequest) onError(error) request { diff --git a/exporter/exporterhelper/traces.go b/exporter/exporterhelper/traces.go index 1a10fb4c9c9..fee06861ced 100644 --- a/exporter/exporterhelper/traces.go +++ b/exporter/exporterhelper/traces.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) var tracesMarshaler = otlp.NewProtobufTracesMarshaler() @@ -32,11 +32,11 @@ var tracesUnmarshaler = otlp.NewProtobufTracesUnmarshaler() type tracesRequest struct { baseRequest - td pdata.Traces + td ptrace.Traces pusher consumer.ConsumeTracesFunc } -func newTracesRequest(ctx context.Context, td pdata.Traces, pusher consumer.ConsumeTracesFunc) request { +func newTracesRequest(ctx context.Context, td ptrace.Traces, pusher consumer.ConsumeTracesFunc) request { return &tracesRequest{ baseRequest: baseRequest{ctx: ctx}, td: td, @@ -109,7 +109,7 @@ func NewTracesExporter( } }) - tc, err := consumer.NewTraces(func(ctx context.Context, td pdata.Traces) error { + tc, err := consumer.NewTraces(func(ctx context.Context, td ptrace.Traces) error { req := newTracesRequest(ctx, td, pusher) err := be.sender.send(req) if errors.Is(err, errSendingQueueIsFull) { diff --git a/exporter/exporterhelper/traces_test.go b/exporter/exporterhelper/traces_test.go index efa81c908c9..7dbc7e31e97 100644 --- a/exporter/exporterhelper/traces_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -33,7 +33,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -49,8 +49,8 @@ var ( func TestTracesRequest(t *testing.T) { mr := newTracesRequest(context.Background(), testdata.GenerateTracesOneSpan(), nil) - traceErr := consumererror.NewTraces(errors.New("some error"), pdata.NewTraces()) - assert.EqualValues(t, newTracesRequest(context.Background(), pdata.NewTraces(), nil), mr.onError(traceErr)) + traceErr := consumererror.NewTraces(errors.New("some error"), ptrace.NewTraces()) + assert.EqualValues(t, newTracesRequest(context.Background(), ptrace.NewTraces(), nil), mr.onError(traceErr)) } func TestTracesExporter_InvalidName(t *testing.T) { @@ -72,7 +72,7 @@ func TestTracesExporter_NilPushTraceData(t *testing.T) { } func TestTracesExporter_Default(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() te, err := NewTracesExporter(&fakeTracesExporterConfig, componenttest.NewNopExporterCreateSettings(), newTraceDataPusher(nil)) assert.NotNil(t, te) assert.NoError(t, err) @@ -93,7 +93,7 @@ func TestTracesExporter_WithCapabilities(t *testing.T) { } func TestTracesExporter_Default_ReturnError(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() want := errors.New("my_error") te, err := NewTracesExporter(&fakeTracesExporterConfig, componenttest.NewNopExporterCreateSettings(), newTraceDataPusher(want)) require.NoError(t, err) @@ -200,7 +200,7 @@ func TestTracesExporter_WithShutdown_ReturnError(t *testing.T) { } func newTraceDataPusher(retError error) consumer.ConsumeTracesFunc { - return func(ctx context.Context, td pdata.Traces) error { + return func(ctx context.Context, td ptrace.Traces) error { return retError } } @@ -225,7 +225,7 @@ func checkRecordedMetricsForTracesExporter(t *testing.T, te component.TracesExpo } func generateTraceTraffic(t *testing.T, tracer trace.Tracer, te component.TracesExporter, numRequests int, wantError error) { - td := pdata.NewTraces() + td := ptrace.NewTraces() td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() ctx, span := tracer.Start(context.Background(), fakeTraceParentSpanName) defer span.End() diff --git a/exporter/loggingexporter/logging_exporter.go b/exporter/loggingexporter/logging_exporter.go index 301e1b372b6..5601bddeb55 100644 --- a/exporter/loggingexporter/logging_exporter.go +++ b/exporter/loggingexporter/logging_exporter.go @@ -26,17 +26,19 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/internal/otlptext" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type loggingExporter struct { logger *zap.Logger - logsMarshaler pdata.LogsMarshaler - metricsMarshaler pdata.MetricsMarshaler - tracesMarshaler pdata.TracesMarshaler + logsMarshaler plog.LogsMarshaler + metricsMarshaler pmetric.MetricsMarshaler + tracesMarshaler ptrace.TracesMarshaler } -func (s *loggingExporter) pushTraces(_ context.Context, td pdata.Traces) error { +func (s *loggingExporter) pushTraces(_ context.Context, td ptrace.Traces) error { s.logger.Info("TracesExporter", zap.Int("#spans", td.SpanCount())) if !s.logger.Core().Enabled(zapcore.DebugLevel) { return nil @@ -50,7 +52,7 @@ func (s *loggingExporter) pushTraces(_ context.Context, td pdata.Traces) error { return nil } -func (s *loggingExporter) pushMetrics(_ context.Context, md pdata.Metrics) error { +func (s *loggingExporter) pushMetrics(_ context.Context, md pmetric.Metrics) error { s.logger.Info("MetricsExporter", zap.Int("#metrics", md.MetricCount())) if !s.logger.Core().Enabled(zapcore.DebugLevel) { @@ -65,7 +67,7 @@ func (s *loggingExporter) pushMetrics(_ context.Context, md pdata.Metrics) error return nil } -func (s *loggingExporter) pushLogs(_ context.Context, ld pdata.Logs) error { +func (s *loggingExporter) pushLogs(_ context.Context, ld plog.Logs) error { s.logger.Info("LogsExporter", zap.Int("#logs", ld.LogRecordCount())) if !s.logger.Core().Enabled(zapcore.DebugLevel) { diff --git a/exporter/loggingexporter/logging_exporter_test.go b/exporter/loggingexporter/logging_exporter_test.go index d6ec66b5af5..8050623669f 100644 --- a/exporter/loggingexporter/logging_exporter_test.go +++ b/exporter/loggingexporter/logging_exporter_test.go @@ -26,7 +26,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestLoggingTracesExporterNoErrors(t *testing.T) { @@ -34,7 +36,7 @@ func TestLoggingTracesExporterNoErrors(t *testing.T) { require.NotNil(t, lte) assert.NoError(t, err) - assert.NoError(t, lte.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, lte.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.NoError(t, lte.ConsumeTraces(context.Background(), testdata.GenerateTracesTwoSpansSameResourceOneDifferent())) assert.NoError(t, lte.Shutdown(context.Background())) @@ -45,7 +47,7 @@ func TestLoggingMetricsExporterNoErrors(t *testing.T) { require.NotNil(t, lme) assert.NoError(t, err) - assert.NoError(t, lme.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, lme.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GeneratMetricsAllTypesWithSampleDatapoints())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsAllTypesEmptyDataPoint())) assert.NoError(t, lme.ConsumeMetrics(context.Background(), testdata.GenerateMetricsMetricTypeInvalid())) @@ -58,7 +60,7 @@ func TestLoggingLogsExporterNoErrors(t *testing.T) { require.NotNil(t, lle) assert.NoError(t, err) - assert.NoError(t, lle.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, lle.ConsumeLogs(context.Background(), plog.NewLogs())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsOneEmptyResourceLogs())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsNoLogRecords())) assert.NoError(t, lle.ConsumeLogs(context.Background(), testdata.GenerateLogsOneEmptyLogRecord())) @@ -74,23 +76,23 @@ func TestLoggingExporterErrors(t *testing.T) { le.tracesMarshaler = &errMarshaler{err: errWant} le.metricsMarshaler = &errMarshaler{err: errWant} le.logsMarshaler = &errMarshaler{err: errWant} - assert.Equal(t, errWant, le.pushTraces(context.Background(), pdata.NewTraces())) - assert.Equal(t, errWant, le.pushMetrics(context.Background(), pdata.NewMetrics())) - assert.Equal(t, errWant, le.pushLogs(context.Background(), pdata.NewLogs())) + assert.Equal(t, errWant, le.pushTraces(context.Background(), ptrace.NewTraces())) + assert.Equal(t, errWant, le.pushMetrics(context.Background(), pmetric.NewMetrics())) + assert.Equal(t, errWant, le.pushLogs(context.Background(), plog.NewLogs())) } type errMarshaler struct { err error } -func (e errMarshaler) MarshalLogs(pdata.Logs) ([]byte, error) { +func (e errMarshaler) MarshalLogs(plog.Logs) ([]byte, error) { return nil, e.err } -func (e errMarshaler) MarshalMetrics(pdata.Metrics) ([]byte, error) { +func (e errMarshaler) MarshalMetrics(pmetric.Metrics) ([]byte, error) { return nil, e.err } -func (e errMarshaler) MarshalTraces(pdata.Traces) ([]byte, error) { +func (e errMarshaler) MarshalTraces(ptrace.Traces) ([]byte, error) { return nil, e.err } diff --git a/exporter/otlpexporter/otlp.go b/exporter/otlpexporter/otlp.go index e8adbf44417..6516a6aab7b 100644 --- a/exporter/otlpexporter/otlp.go +++ b/exporter/otlpexporter/otlp.go @@ -32,7 +32,9 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type exporter struct { @@ -96,21 +98,21 @@ func (e *exporter) shutdown(context.Context) error { return e.clientConn.Close() } -func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { +func (e *exporter) pushTraces(ctx context.Context, td ptrace.Traces) error { req := otlpgrpc.NewTracesRequest() req.SetTraces(td) _, err := e.traceExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) return processError(err) } -func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { +func (e *exporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { req := otlpgrpc.NewMetricsRequest() req.SetMetrics(md) _, err := e.metricExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) return processError(err) } -func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { +func (e *exporter) pushLogs(ctx context.Context, ld plog.Logs) error { req := otlpgrpc.NewLogsRequest() req.SetLogs(ld) _, err := e.logExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 90075c4c075..806295279b8 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -36,7 +36,9 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type mockReceiver struct { @@ -55,7 +57,7 @@ func (r *mockReceiver) GetMetadata() metadata.MD { type mockTracesReceiver struct { mockReceiver - lastRequest pdata.Traces + lastRequest ptrace.Traces } func (r *mockTracesReceiver) Export(ctx context.Context, req otlpgrpc.TracesRequest) (otlpgrpc.TracesResponse, error) { @@ -69,7 +71,7 @@ func (r *mockTracesReceiver) Export(ctx context.Context, req otlpgrpc.TracesRequ return otlpgrpc.NewTracesResponse(), nil } -func (r *mockTracesReceiver) GetLastRequest() pdata.Traces { +func (r *mockTracesReceiver) GetLastRequest() ptrace.Traces { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -108,7 +110,7 @@ func otlpTracesReceiverOnGRPCServer(ln net.Listener, useTLS bool) (*mockTracesRe type mockLogsReceiver struct { mockReceiver - lastRequest pdata.Logs + lastRequest plog.Logs } func (r *mockLogsReceiver) Export(ctx context.Context, req otlpgrpc.LogsRequest) (otlpgrpc.LogsResponse, error) { @@ -122,7 +124,7 @@ func (r *mockLogsReceiver) Export(ctx context.Context, req otlpgrpc.LogsRequest) return otlpgrpc.NewLogsResponse(), nil } -func (r *mockLogsReceiver) GetLastRequest() pdata.Logs { +func (r *mockLogsReceiver) GetLastRequest() plog.Logs { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -146,7 +148,7 @@ func otlpLogsReceiverOnGRPCServer(ln net.Listener) *mockLogsReceiver { type mockMetricsReceiver struct { mockReceiver - lastRequest pdata.Metrics + lastRequest pmetric.Metrics } func (r *mockMetricsReceiver) Export(ctx context.Context, req otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { @@ -160,7 +162,7 @@ func (r *mockMetricsReceiver) Export(ctx context.Context, req otlpgrpc.MetricsRe return otlpgrpc.NewMetricsResponse(), nil } -func (r *mockMetricsReceiver) GetLastRequest() pdata.Metrics { +func (r *mockMetricsReceiver) GetLastRequest() pmetric.Metrics { r.mux.Lock() defer r.mux.Unlock() return r.lastRequest @@ -220,7 +222,7 @@ func TestSendTraces(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty trace. - td := pdata.NewTraces() + td := ptrace.NewTraces() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) // Wait until it is received. @@ -314,7 +316,7 @@ func TestSendTracesWhenEndpointHasHttpScheme(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty trace. - td := pdata.NewTraces() + td := ptrace.NewTraces() assert.NoError(t, exp.ConsumeTraces(context.Background(), td)) // Wait until it is received. @@ -366,7 +368,7 @@ func TestSendMetrics(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty metric. - md := pdata.NewMetrics() + md := pmetric.NewMetrics() assert.NoError(t, exp.ConsumeMetrics(context.Background(), md)) // Wait until it is received. @@ -513,7 +515,7 @@ func TestSendTraceDataServerStartWhileRequest(t *testing.T) { cancel() } -func startServerAndMakeRequest(t *testing.T, exp component.TracesExporter, td pdata.Traces, ln net.Listener) { +func startServerAndMakeRequest(t *testing.T, exp component.TracesExporter, td ptrace.Traces, ln net.Listener) { rcv, _ := otlpTracesReceiverOnGRPCServer(ln, false) defer rcv.srv.GracefulStop() // Ensure that initially there is no data in the receiver. @@ -572,7 +574,7 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, 0, atomic.LoadInt32(&rcv.requestCount)) // Send empty request. - ld := pdata.NewLogs() + ld := plog.NewLogs() assert.NoError(t, exp.ConsumeLogs(context.Background(), ld)) // Wait until it is received. diff --git a/exporter/otlphttpexporter/otlp.go b/exporter/otlphttpexporter/otlp.go index 717e00e4931..98c60dfb75d 100644 --- a/exporter/otlphttpexporter/otlp.go +++ b/exporter/otlphttpexporter/otlp.go @@ -36,7 +36,9 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type exporter struct { @@ -91,7 +93,7 @@ func (e *exporter) start(_ context.Context, host component.Host) error { return nil } -func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { +func (e *exporter) pushTraces(ctx context.Context, td ptrace.Traces) error { tr := otlpgrpc.NewTracesRequest() tr.SetTraces(td) request, err := tr.MarshalProto() @@ -102,7 +104,7 @@ func (e *exporter) pushTraces(ctx context.Context, td pdata.Traces) error { return e.export(ctx, e.tracesURL, request) } -func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { +func (e *exporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { tr := otlpgrpc.NewMetricsRequest() tr.SetMetrics(md) request, err := tr.MarshalProto() @@ -112,7 +114,7 @@ func (e *exporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { return e.export(ctx, e.metricsURL, request) } -func (e *exporter) pushLogs(ctx context.Context, ld pdata.Logs) error { +func (e *exporter) pushLogs(ctx context.Context, ld plog.Logs) error { tr := otlpgrpc.NewLogsRequest() tr.SetLogs(ld) request, err := tr.MarshalProto() diff --git a/exporter/otlphttpexporter/otlp_test.go b/exporter/otlphttpexporter/otlp_test.go index d4391d5a1df..178794d88b8 100644 --- a/exporter/otlphttpexporter/otlp_test.go +++ b/exporter/otlphttpexporter/otlp_test.go @@ -46,7 +46,10 @@ import ( "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/internal/testutil" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" "go.opentelemetry.io/collector/receiver/otlpreceiver" ) @@ -265,7 +268,7 @@ func TestIssue_4221(t *testing.T) { exp := startTracesExporter(t, "", svr.URL) - md := pdata.NewTraces() + md := ptrace.NewTraces() rms := md.ResourceSpans().AppendEmpty() rms.Resource().Attributes().UpsertString("service.name", "uop.stage-eu-1") rms.Resource().Attributes().UpsertString("outsystems.module.version", "903386") @@ -278,14 +281,14 @@ func TestIssue_4221(t *testing.T) { traceIDBytesSlice, err := hex.DecodeString("4303853f086f4f8c86cf198b6551df84") require.NoError(t, err) copy(traceIDBytes[:], traceIDBytesSlice) - span.SetTraceID(pdata.NewTraceID(traceIDBytes)) + span.SetTraceID(pcommon.NewTraceID(traceIDBytes)) assert.Equal(t, "4303853f086f4f8c86cf198b6551df84", span.TraceID().HexString()) var spanIDBytes [8]byte spanIDBytesSlice, err := hex.DecodeString("e5513c32795c41b9") require.NoError(t, err) copy(spanIDBytes[:], spanIDBytesSlice) - span.SetSpanID(pdata.NewSpanID(spanIDBytes)) + span.SetSpanID(pcommon.NewSpanID(spanIDBytes)) assert.Equal(t, "e5513c32795c41b9", span.SpanID().HexString()) span.SetEndTimestamp(1634684637873000000) @@ -464,7 +467,7 @@ func TestErrorResponses(t *testing.T) { }) // generate traces - traces := pdata.NewTraces() + traces := ptrace.NewTraces() err = exp.ConsumeTraces(context.Background(), traces) assert.Error(t, err) @@ -542,7 +545,7 @@ func TestUserAgent(t *testing.T) { }) // generate data - traces := pdata.NewTraces() + traces := ptrace.NewTraces() err = exp.ConsumeTraces(context.Background(), traces) require.NoError(t, err) @@ -587,7 +590,7 @@ func TestUserAgent(t *testing.T) { }) // generate data - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() err = exp.ConsumeMetrics(context.Background(), metrics) require.NoError(t, err) @@ -632,7 +635,7 @@ func TestUserAgent(t *testing.T) { }) // generate data - logs := pdata.NewLogs() + logs := plog.NewLogs() err = exp.ConsumeLogs(context.Background(), logs) require.NoError(t, err) diff --git a/internal/internalconsumertest/err_or_sink_consumer.go b/internal/internalconsumertest/err_or_sink_consumer.go index fb5f8914bf7..856f006481e 100644 --- a/internal/internalconsumertest/err_or_sink_consumer.go +++ b/internal/internalconsumertest/err_or_sink_consumer.go @@ -20,7 +20,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type ErrOrSinkConsumer struct { @@ -42,7 +43,7 @@ func (esc *ErrOrSinkConsumer) Capabilities() consumer.Capabilities { } // ConsumeTraces stores traces to this sink. -func (esc *ErrOrSinkConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (esc *ErrOrSinkConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { esc.mu.Lock() defer esc.mu.Unlock() @@ -54,7 +55,7 @@ func (esc *ErrOrSinkConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces } // ConsumeMetrics stores metrics to this sink. -func (esc *ErrOrSinkConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (esc *ErrOrSinkConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { esc.mu.Lock() defer esc.mu.Unlock() diff --git a/internal/otlptext/databuffer.go b/internal/otlptext/databuffer.go index 356e26b91ab..b92cd821996 100644 --- a/internal/otlptext/databuffer.go +++ b/internal/otlptext/databuffer.go @@ -21,7 +21,9 @@ import ( "strconv" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type dataBuffer struct { @@ -37,26 +39,26 @@ func (b *dataBuffer) logAttr(label string, value string) { b.logEntry(" %-15s: %s", label, value) } -func (b *dataBuffer) logAttributes(label string, m pdata.Map) { +func (b *dataBuffer) logAttributes(label string, m pcommon.Map) { if m.Len() == 0 { return } b.logEntry("%s:", label) - m.Range(func(k string, v pdata.Value) bool { + m.Range(func(k string, v pcommon.Value) bool { b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) return true }) } -func (b *dataBuffer) logInstrumentationScope(il pdata.InstrumentationScope) { +func (b *dataBuffer) logInstrumentationScope(il pcommon.InstrumentationScope) { b.logEntry( "InstrumentationScope %s %s", il.Name(), il.Version()) } -func (b *dataBuffer) logMetricDescriptor(md pdata.Metric) { +func (b *dataBuffer) logMetricDescriptor(md pmetric.Metric) { b.logEntry("Descriptor:") b.logEntry(" -> Name: %s", md.Name()) b.logEntry(" -> Description: %s", md.Description()) @@ -64,32 +66,32 @@ func (b *dataBuffer) logMetricDescriptor(md pdata.Metric) { b.logEntry(" -> DataType: %s", md.DataType().String()) } -func (b *dataBuffer) logMetricDataPoints(m pdata.Metric) { +func (b *dataBuffer) logMetricDataPoints(m pmetric.Metric) { switch m.DataType() { - case pdata.MetricDataTypeNone: + case pmetric.MetricDataTypeNone: return - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: b.logNumberDataPoints(m.Gauge().DataPoints()) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: data := m.Sum() b.logEntry(" -> IsMonotonic: %t", data.IsMonotonic()) b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) b.logNumberDataPoints(data.DataPoints()) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: data := m.Histogram() b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) b.logDoubleHistogramDataPoints(data.DataPoints()) - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: data := m.ExponentialHistogram() b.logEntry(" -> AggregationTemporality: %s", data.AggregationTemporality().String()) b.logExponentialHistogramDataPoints(data.DataPoints()) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: data := m.Summary() b.logDoubleSummaryDataPoints(data.DataPoints()) } } -func (b *dataBuffer) logNumberDataPoints(ps pdata.NumberDataPointSlice) { +func (b *dataBuffer) logNumberDataPoints(ps pmetric.NumberDataPointSlice) { for i := 0; i < ps.Len(); i++ { p := ps.At(i) b.logEntry("NumberDataPoints #%d", i) @@ -98,15 +100,15 @@ func (b *dataBuffer) logNumberDataPoints(ps pdata.NumberDataPointSlice) { b.logEntry("StartTimestamp: %s", p.StartTimestamp()) b.logEntry("Timestamp: %s", p.Timestamp()) switch p.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: b.logEntry("Value: %d", p.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: b.logEntry("Value: %f", p.DoubleVal()) } } } -func (b *dataBuffer) logDoubleHistogramDataPoints(ps pdata.HistogramDataPointSlice) { +func (b *dataBuffer) logDoubleHistogramDataPoints(ps pmetric.HistogramDataPointSlice) { for i := 0; i < ps.Len(); i++ { p := ps.At(i) b.logEntry("HistogramDataPoints #%d", i) @@ -133,7 +135,7 @@ func (b *dataBuffer) logDoubleHistogramDataPoints(ps pdata.HistogramDataPointSli } } -func (b *dataBuffer) logExponentialHistogramDataPoints(ps pdata.ExponentialHistogramDataPointSlice) { +func (b *dataBuffer) logExponentialHistogramDataPoints(ps pmetric.ExponentialHistogramDataPointSlice) { for i := 0; i < ps.Len(); i++ { p := ps.At(i) b.logEntry("ExponentialHistogramDataPoints #%d", i) @@ -181,7 +183,7 @@ func (b *dataBuffer) logExponentialHistogramDataPoints(ps pdata.ExponentialHisto } } -func (b *dataBuffer) logDoubleSummaryDataPoints(ps pdata.SummaryDataPointSlice) { +func (b *dataBuffer) logDoubleSummaryDataPoints(ps pmetric.SummaryDataPointSlice) { for i := 0; i < ps.Len(); i++ { p := ps.At(i) b.logEntry("SummaryDataPoints #%d", i) @@ -200,11 +202,11 @@ func (b *dataBuffer) logDoubleSummaryDataPoints(ps pdata.SummaryDataPointSlice) } } -func (b *dataBuffer) logDataPointAttributes(labels pdata.Map) { +func (b *dataBuffer) logDataPointAttributes(labels pcommon.Map) { b.logAttributes("Data point attributes", labels) } -func (b *dataBuffer) logEvents(description string, se pdata.SpanEventSlice) { +func (b *dataBuffer) logEvents(description string, se ptrace.SpanEventSlice) { if se.Len() == 0 { return } @@ -221,14 +223,14 @@ func (b *dataBuffer) logEvents(description string, se pdata.SpanEventSlice) { continue } b.logEntry(" -> Attributes:") - e.Attributes().Range(func(k string, v pdata.Value) bool { + e.Attributes().Range(func(k string, v pcommon.Value) bool { b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) return true }) } } -func (b *dataBuffer) logLinks(description string, sl pdata.SpanLinkSlice) { +func (b *dataBuffer) logLinks(description string, sl ptrace.SpanLinkSlice) { if sl.Len() == 0 { return } @@ -246,33 +248,33 @@ func (b *dataBuffer) logLinks(description string, sl pdata.SpanLinkSlice) { continue } b.logEntry(" -> Attributes:") - l.Attributes().Range(func(k string, v pdata.Value) bool { + l.Attributes().Range(func(k string, v pcommon.Value) bool { b.logEntry(" -> %s: %s(%s)", k, v.Type().String(), attributeValueToString(v)) return true }) } } -func attributeValueToString(v pdata.Value) string { +func attributeValueToString(v pcommon.Value) string { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return v.StringVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return strconv.FormatBool(v.BoolVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return strconv.FormatInt(v.IntVal(), 10) - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: return sliceToString(v.SliceVal()) - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: return mapToString(v.MapVal()) default: return fmt.Sprintf("", v.Type()) } } -func sliceToString(s pdata.Slice) string { +func sliceToString(s pcommon.Slice) string { var b strings.Builder b.WriteByte('[') for i := 0; i < s.Len(); i++ { @@ -287,11 +289,11 @@ func sliceToString(s pdata.Slice) string { return b.String() } -func mapToString(m pdata.Map) string { +func mapToString(m pcommon.Map) string { var b strings.Builder b.WriteString("{\n") - m.Sort().Range(func(k string, v pdata.Value) bool { + m.Sort().Range(func(k string, v pcommon.Value) bool { fmt.Fprintf(&b, " -> %s: %s(%s)\n", k, v.Type(), v.AsString()) return true }) diff --git a/internal/otlptext/databuffer_test.go b/internal/otlptext/databuffer_test.go index 01b6c23345c..19831e2f954 100644 --- a/internal/otlptext/databuffer_test.go +++ b/internal/otlptext/databuffer_test.go @@ -19,15 +19,15 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" ) func TestNestedArraySerializesCorrectly(t *testing.T) { - ava := pdata.NewValueSlice() + ava := pcommon.NewValueSlice() ava.SliceVal().AppendEmpty().SetStringVal("foo") ava.SliceVal().AppendEmpty().SetIntVal(42) - ava2 := pdata.NewValueSlice() + ava2 := pcommon.NewValueSlice() ava2.SliceVal().AppendEmpty().SetStringVal("bar") ava2.CopyTo(ava.SliceVal().AppendEmpty()) @@ -39,11 +39,11 @@ func TestNestedArraySerializesCorrectly(t *testing.T) { } func TestNestedMapSerializesCorrectly(t *testing.T) { - ava := pdata.NewValueMap() + ava := pcommon.NewValueMap() av := ava.MapVal() - av.Insert("foo", pdata.NewValueString("test")) + av.Insert("foo", pcommon.NewValueString("test")) - ava2 := pdata.NewValueMap() + ava2 := pcommon.NewValueMap() av2 := ava2.MapVal() av2.InsertInt("bar", 13) av.Insert("zoo", ava2) diff --git a/internal/otlptext/logs.go b/internal/otlptext/logs.go index f68e3c90a05..855e23d0e51 100644 --- a/internal/otlptext/logs.go +++ b/internal/otlptext/logs.go @@ -15,18 +15,18 @@ package otlptext // import "go.opentelemetry.io/collector/internal/otlptext" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) // NewTextLogsMarshaler returns a serializer.LogsMarshaler to encode to OTLP text bytes. -func NewTextLogsMarshaler() pdata.LogsMarshaler { +func NewTextLogsMarshaler() plog.LogsMarshaler { return textLogsMarshaler{} } type textLogsMarshaler struct{} -// MarshalLogs pdata.Logs to OTLP text. -func (textLogsMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) { +// MarshalLogs plog.Logs to OTLP text. +func (textLogsMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { buf := dataBuffer{} rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { diff --git a/internal/otlptext/logs_test.go b/internal/otlptext/logs_test.go index 137e3c73a25..378062b02a3 100644 --- a/internal/otlptext/logs_test.go +++ b/internal/otlptext/logs_test.go @@ -20,19 +20,19 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) func TestLogsText(t *testing.T) { type args struct { - ld pdata.Logs + ld plog.Logs } tests := []struct { name string args args empty bool }{ - {"empty logs", args{pdata.NewLogs()}, true}, + {"empty logs", args{plog.NewLogs()}, true}, {"logs data with empty resource log", args{testdata.GenerateLogsOneEmptyResourceLogs()}, false}, {"logs data with no log records", args{testdata.GenerateLogsNoLogRecords()}, false}, {"logs with one empty log", args{testdata.GenerateLogsOneEmptyLogRecord()}, false}, diff --git a/internal/otlptext/metrics.go b/internal/otlptext/metrics.go index 4e3d5e69b14..adbc4c7bafe 100644 --- a/internal/otlptext/metrics.go +++ b/internal/otlptext/metrics.go @@ -14,19 +14,17 @@ package otlptext // import "go.opentelemetry.io/collector/internal/otlptext" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/model/pmetric" // NewTextMetricsMarshaler returns a serializer.MetricsMarshaler to encode to OTLP text bytes. -func NewTextMetricsMarshaler() pdata.MetricsMarshaler { +func NewTextMetricsMarshaler() pmetric.MetricsMarshaler { return textMetricsMarshaler{} } type textMetricsMarshaler struct{} -// MarshalMetrics pdata.Metrics to OTLP text. -func (textMetricsMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) { +// MarshalMetrics pmetric.Metrics to OTLP text. +func (textMetricsMarshaler) MarshalMetrics(md pmetric.Metrics) ([]byte, error) { buf := dataBuffer{} rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { diff --git a/internal/otlptext/metrics_test.go b/internal/otlptext/metrics_test.go index 5213bda8df8..9956d5efa22 100644 --- a/internal/otlptext/metrics_test.go +++ b/internal/otlptext/metrics_test.go @@ -20,19 +20,19 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) func TestMetricsText(t *testing.T) { type args struct { - md pdata.Metrics + md pmetric.Metrics } tests := []struct { name string args args empty bool }{ - {"empty metrics", args{pdata.NewMetrics()}, true}, + {"empty metrics", args{pmetric.NewMetrics()}, true}, {"metrics with all types and datapoints", args{testdata.GeneratMetricsAllTypesWithSampleDatapoints()}, false}, {"metrics with all types without datapoints", args{testdata.GenerateMetricsAllTypesEmptyDataPoint()}, false}, {"metrics with invalid metric types", args{testdata.GenerateMetricsMetricTypeInvalid()}, false}, diff --git a/internal/otlptext/traces.go b/internal/otlptext/traces.go index 7eb86f3c0df..fa2655c032f 100644 --- a/internal/otlptext/traces.go +++ b/internal/otlptext/traces.go @@ -15,18 +15,18 @@ package otlptext // import "go.opentelemetry.io/collector/internal/otlptext" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) // NewTextTracesMarshaler returns a serializer.TracesMarshaler to encode to OTLP text bytes. -func NewTextTracesMarshaler() pdata.TracesMarshaler { +func NewTextTracesMarshaler() ptrace.TracesMarshaler { return textTracesMarshaler{} } type textTracesMarshaler struct{} -// MarshalTraces pdata.Traces to OTLP text. -func (textTracesMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { +// MarshalTraces ptrace.Traces to OTLP text. +func (textTracesMarshaler) MarshalTraces(td ptrace.Traces) ([]byte, error) { buf := dataBuffer{} rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { diff --git a/internal/otlptext/traces_test.go b/internal/otlptext/traces_test.go index 875dfb3227a..f5101c369a3 100644 --- a/internal/otlptext/traces_test.go +++ b/internal/otlptext/traces_test.go @@ -20,19 +20,19 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) func TestTracesText(t *testing.T) { type args struct { - td pdata.Traces + td ptrace.Traces } tests := []struct { name string args args empty bool }{ - {"empty traces", args{pdata.NewTraces()}, true}, + {"empty traces", args{ptrace.NewTraces()}, true}, {"traces with two spans", args{testdata.GenerateTracesTwoSpansSameResource()}, false}, } for _, tt := range tests { diff --git a/internal/testcomponents/example_exporter.go b/internal/testcomponents/example_exporter.go index 984a3fb7dd3..fb6c37548ec 100644 --- a/internal/testcomponents/example_exporter.go +++ b/internal/testcomponents/example_exporter.go @@ -20,7 +20,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) var _ config.Unmarshallable = (*ExampleExporter)(nil) @@ -74,9 +76,9 @@ func createLogsExporter(context.Context, component.ExporterCreateSettings, confi // ExampleExporterConsumer stores consumed traces and metrics for testing purposes. type ExampleExporterConsumer struct { - Traces []pdata.Traces - Metrics []pdata.Metrics - Logs []pdata.Logs + Traces []ptrace.Traces + Metrics []pmetric.Metrics + Logs []plog.Logs ExporterStarted bool ExporterShutdown bool } @@ -89,8 +91,8 @@ func (exp *ExampleExporterConsumer) Start(_ context.Context, _ component.Host) e return nil } -// ConsumeTraces receives pdata.Traces for processing by the consumer.Traces. -func (exp *ExampleExporterConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { +// ConsumeTraces receives ptrace.Traces for processing by the consumer.Traces. +func (exp *ExampleExporterConsumer) ConsumeTraces(_ context.Context, td ptrace.Traces) error { exp.Traces = append(exp.Traces, td) return nil } @@ -99,13 +101,13 @@ func (exp *ExampleExporterConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -// ConsumeMetrics receives pdata.Metrics for processing by the Metrics. -func (exp *ExampleExporterConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +// ConsumeMetrics receives pmetric.Metrics for processing by the Metrics. +func (exp *ExampleExporterConsumer) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { exp.Metrics = append(exp.Metrics, md) return nil } -func (exp *ExampleExporterConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { +func (exp *ExampleExporterConsumer) ConsumeLogs(_ context.Context, ld plog.Logs) error { exp.Logs = append(exp.Logs, ld) return nil } diff --git a/internal/testcomponents/example_exporter_test.go b/internal/testcomponents/example_exporter_test.go index 48ae7907b34..3f026a4fd60 100644 --- a/internal/testcomponents/example_exporter_test.go +++ b/internal/testcomponents/example_exporter_test.go @@ -21,7 +21,8 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestExampleExporterConsumer(t *testing.T) { @@ -33,12 +34,12 @@ func TestExampleExporterConsumer(t *testing.T) { assert.True(t, exp.ExporterStarted) assert.Equal(t, 0, len(exp.Traces)) - err = exp.ConsumeTraces(context.Background(), pdata.Traces{}) + err = exp.ConsumeTraces(context.Background(), ptrace.Traces{}) assert.NoError(t, err) assert.Equal(t, 1, len(exp.Traces)) assert.Equal(t, 0, len(exp.Metrics)) - err = exp.ConsumeMetrics(context.Background(), pdata.Metrics{}) + err = exp.ConsumeMetrics(context.Background(), pmetric.Metrics{}) assert.NoError(t, err) assert.Equal(t, 1, len(exp.Metrics)) diff --git a/internal/testdata/common.go b/internal/testdata/common.go index 73b454ae7ce..bbfe0552b61 100644 --- a/internal/testdata/common.go +++ b/internal/testdata/common.go @@ -15,16 +15,16 @@ package testdata import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" ) var ( - resourceAttributes1 = pdata.NewMapFromRaw(map[string]interface{}{"resource-attr": "resource-attr-val-1"}) - resourceAttributes2 = pdata.NewMapFromRaw(map[string]interface{}{"resource-attr": "resource-attr-val-2"}) - spanEventAttributes = pdata.NewMapFromRaw(map[string]interface{}{"span-event-attr": "span-event-attr-val"}) - spanLinkAttributes = pdata.NewMapFromRaw(map[string]interface{}{"span-link-attr": "span-link-attr-val"}) - spanAttributes = pdata.NewMapFromRaw(map[string]interface{}{"span-attr": "span-attr-val"}) - metricAttachment = pdata.NewMapFromRaw(map[string]interface{}{"exemplar-attachment": "exemplar-attachment-value"}) + resourceAttributes1 = pcommon.NewMapFromRaw(map[string]interface{}{"resource-attr": "resource-attr-val-1"}) + resourceAttributes2 = pcommon.NewMapFromRaw(map[string]interface{}{"resource-attr": "resource-attr-val-2"}) + spanEventAttributes = pcommon.NewMapFromRaw(map[string]interface{}{"span-event-attr": "span-event-attr-val"}) + spanLinkAttributes = pcommon.NewMapFromRaw(map[string]interface{}{"span-link-attr": "span-link-attr-val"}) + spanAttributes = pcommon.NewMapFromRaw(map[string]interface{}{"span-attr": "span-attr-val"}) + metricAttachment = pcommon.NewMapFromRaw(map[string]interface{}{"exemplar-attachment": "exemplar-attachment-value"}) ) const ( @@ -36,52 +36,52 @@ const ( TestLabelValue3 = "label-value-3" ) -func initResourceAttributes1(dest pdata.Map) { +func initResourceAttributes1(dest pcommon.Map) { dest.Clear() resourceAttributes1.CopyTo(dest) } -func initResourceAttributes2(dest pdata.Map) { +func initResourceAttributes2(dest pcommon.Map) { dest.Clear() resourceAttributes2.CopyTo(dest) } -func initSpanAttributes(dest pdata.Map) { +func initSpanAttributes(dest pcommon.Map) { dest.Clear() spanAttributes.CopyTo(dest) } -func initSpanEventAttributes(dest pdata.Map) { +func initSpanEventAttributes(dest pcommon.Map) { dest.Clear() spanEventAttributes.CopyTo(dest) } -func initSpanLinkAttributes(dest pdata.Map) { +func initSpanLinkAttributes(dest pcommon.Map) { dest.Clear() spanLinkAttributes.CopyTo(dest) } -func initMetricAttachment(dest pdata.Map) { +func initMetricAttachment(dest pcommon.Map) { dest.Clear() metricAttachment.CopyTo(dest) } -func initMetricAttributes1(dest pdata.Map) { +func initMetricAttributes1(dest pcommon.Map) { dest.Clear() dest.InsertString(TestLabelKey1, TestLabelValue1) } -func initMetricAttributes12(dest pdata.Map) { +func initMetricAttributes12(dest pcommon.Map) { initMetricAttributes1(dest) dest.InsertString(TestLabelKey2, TestLabelValue2) } -func initMetricAttributes13(dest pdata.Map) { +func initMetricAttributes13(dest pcommon.Map) { initMetricAttributes1(dest) dest.InsertString(TestLabelKey3, TestLabelValue3) } -func initMetricAttributes2(dest pdata.Map) { +func initMetricAttributes2(dest pcommon.Map) { dest.Clear() dest.InsertString(TestLabelKey2, TestLabelValue2) } diff --git a/internal/testdata/log.go b/internal/testdata/log.go index 01167385829..087747a051c 100644 --- a/internal/testdata/log.go +++ b/internal/testdata/log.go @@ -17,40 +17,41 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/plog" ) var ( TestLogTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestLogTimestamp = pdata.NewTimestampFromTime(TestLogTime) + TestLogTimestamp = pcommon.NewTimestampFromTime(TestLogTime) ) -func GenerateLogsOneEmptyResourceLogs() pdata.Logs { - ld := pdata.NewLogs() +func GenerateLogsOneEmptyResourceLogs() plog.Logs { + ld := plog.NewLogs() ld.ResourceLogs().AppendEmpty() return ld } -func GenerateLogsNoLogRecords() pdata.Logs { +func GenerateLogsNoLogRecords() plog.Logs { ld := GenerateLogsOneEmptyResourceLogs() initResource1(ld.ResourceLogs().At(0).Resource()) return ld } -func GenerateLogsOneEmptyLogRecord() pdata.Logs { +func GenerateLogsOneEmptyLogRecord() plog.Logs { ld := GenerateLogsNoLogRecords() rs0 := ld.ResourceLogs().At(0) rs0.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() return ld } -func GenerateLogsOneLogRecord() pdata.Logs { +func GenerateLogsOneLogRecord() plog.Logs { ld := GenerateLogsOneEmptyLogRecord() fillLogOne(ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0)) return ld } -func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { +func GenerateLogsTwoLogRecordsSameResource() plog.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords() fillLogOne(logs.At(0)) @@ -58,8 +59,8 @@ func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { return ld } -func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { - ld := pdata.NewLogs() +func GenerateLogsTwoLogRecordsSameResourceOneDifferent() plog.Logs { + ld := plog.NewLogs() rl0 := ld.ResourceLogs().AppendEmpty() initResource1(rl0.Resource()) logs := rl0.ScopeLogs().AppendEmpty().LogRecords() @@ -70,13 +71,13 @@ func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { fillLogThree(rl1.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) return ld } -func fillLogOne(log pdata.LogRecord) { +func fillLogOne(log plog.LogRecord) { log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityNumber(plog.SeverityNumberINFO) log.SetSeverityText("Info") - log.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) - log.SetTraceID(pdata.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) + log.SetSpanID(pcommon.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) + log.SetTraceID(pcommon.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) attrs := log.Attributes() attrs.InsertString("app", "server") @@ -85,10 +86,10 @@ func fillLogOne(log pdata.LogRecord) { log.Body().SetStringVal("This is a log message") } -func fillLogTwo(log pdata.LogRecord) { +func fillLogTwo(log plog.LogRecord) { log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityNumber(plog.SeverityNumberINFO) log.SetSeverityText("Info") attrs := log.Attributes() @@ -98,21 +99,21 @@ func fillLogTwo(log pdata.LogRecord) { log.Body().SetStringVal("something happened") } -func fillLogThree(log pdata.LogRecord) { +func fillLogThree(log plog.LogRecord) { log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberWARN) + log.SetSeverityNumber(plog.SeverityNumberWARN) log.SetSeverityText("Warning") log.Body().SetStringVal("something else happened") } -func GenerateLogsManyLogRecordsSameResource(count int) pdata.Logs { +func GenerateLogsManyLogRecordsSameResource(count int) plog.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords() logs.EnsureCapacity(count) for i := 0; i < count; i++ { - var l pdata.LogRecord + var l plog.LogRecord if i < logs.Len() { l = logs.At(i) } else { diff --git a/internal/testdata/metric.go b/internal/testdata/metric.go index 4fc2fe1e4cb..003daf9081c 100644 --- a/internal/testdata/metric.go +++ b/internal/testdata/metric.go @@ -17,18 +17,19 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/pmetric" ) var ( TestMetricStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) - TestMetricStartTimestamp = pdata.NewTimestampFromTime(TestMetricStartTime) + TestMetricStartTimestamp = pcommon.NewTimestampFromTime(TestMetricStartTime) TestMetricExemplarTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) - TestMetricExemplarTimestamp = pdata.NewTimestampFromTime(TestMetricExemplarTime) + TestMetricExemplarTimestamp = pcommon.NewTimestampFromTime(TestMetricExemplarTime) TestMetricTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestMetricTimestamp = pdata.NewTimestampFromTime(TestMetricTime) + TestMetricTimestamp = pcommon.NewTimestampFromTime(TestMetricTime) ) const ( @@ -41,33 +42,33 @@ const ( TestSummaryMetricName = "summary" ) -func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { - md := pdata.NewMetrics() +func GenerateMetricsOneEmptyResourceMetrics() pmetric.Metrics { + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty() return md } -func GenerateMetricsNoLibraries() pdata.Metrics { +func GenerateMetricsNoLibraries() pmetric.Metrics { md := GenerateMetricsOneEmptyResourceMetrics() ms0 := md.ResourceMetrics().At(0) initResource1(ms0.Resource()) return md } -func GenerateMetricsOneEmptyInstrumentationScope() pdata.Metrics { +func GenerateMetricsOneEmptyInstrumentationScope() pmetric.Metrics { md := GenerateMetricsNoLibraries() md.ResourceMetrics().At(0).ScopeMetrics().AppendEmpty() return md } -func GenerateMetricsOneMetric() pdata.Metrics { +func GenerateMetricsOneMetric() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() rm0ils0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) return md } -func GenerateMetricsTwoMetrics() pdata.Metrics { +func GenerateMetricsTwoMetrics() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() rm0ils0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) @@ -75,40 +76,40 @@ func GenerateMetricsTwoMetrics() pdata.Metrics { return md } -func GenerateMetricsAllTypesEmptyDataPoint() pdata.Metrics { +func GenerateMetricsAllTypesEmptyDataPoint() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) ms := ilm0.Metrics() doubleGauge := ms.AppendEmpty() - initMetric(doubleGauge, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + initMetric(doubleGauge, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) doubleGauge.Gauge().DataPoints().AppendEmpty() intGauge := ms.AppendEmpty() - initMetric(intGauge, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) + initMetric(intGauge, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) intGauge.Gauge().DataPoints().AppendEmpty() doubleSum := ms.AppendEmpty() - initMetric(doubleSum, TestSumDoubleMetricName, pdata.MetricDataTypeSum) + initMetric(doubleSum, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) doubleSum.Sum().DataPoints().AppendEmpty() intSum := ms.AppendEmpty() - initMetric(intSum, TestSumIntMetricName, pdata.MetricDataTypeSum) + initMetric(intSum, TestSumIntMetricName, pmetric.MetricDataTypeSum) intSum.Sum().DataPoints().AppendEmpty() histogram := ms.AppendEmpty() - initMetric(histogram, TestHistogramMetricName, pdata.MetricDataTypeHistogram) + initMetric(histogram, TestHistogramMetricName, pmetric.MetricDataTypeHistogram) histogram.Histogram().DataPoints().AppendEmpty() summary := ms.AppendEmpty() - initMetric(summary, TestSummaryMetricName, pdata.MetricDataTypeSummary) + initMetric(summary, TestSummaryMetricName, pmetric.MetricDataTypeSummary) summary.Summary().DataPoints().AppendEmpty() return md } -func GenerateMetricsMetricTypeInvalid() pdata.Metrics { +func GenerateMetricsMetricTypeInvalid() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) - initMetric(ilm0.Metrics().AppendEmpty(), TestSumIntMetricName, pdata.MetricDataTypeNone) + initMetric(ilm0.Metrics().AppendEmpty(), TestSumIntMetricName, pmetric.MetricDataTypeNone) return md } -func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { +func GeneratMetricsAllTypesWithSampleDatapoints() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() ilm := md.ResourceMetrics().At(0).ScopeMetrics().At(0) @@ -123,8 +124,8 @@ func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { return md } -func initGaugeIntMetric(im pdata.Metric) { - initMetric(im, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) +func initGaugeIntMetric(im pmetric.Metric) { + initMetric(im, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() @@ -139,8 +140,8 @@ func initGaugeIntMetric(im pdata.Metric) { idp1.SetIntVal(456) } -func initGaugeDoubleMetric(im pdata.Metric) { - initMetric(im, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) +func initGaugeDoubleMetric(im pmetric.Metric) { + initMetric(im, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() @@ -155,8 +156,8 @@ func initGaugeDoubleMetric(im pdata.Metric) { idp1.SetDoubleVal(4.56) } -func initSumIntMetric(im pdata.Metric) { - initMetric(im, TestSumIntMetricName, pdata.MetricDataTypeSum) +func initSumIntMetric(im pmetric.Metric) { + initMetric(im, TestSumIntMetricName, pmetric.MetricDataTypeSum) idps := im.Sum().DataPoints() idp0 := idps.AppendEmpty() @@ -171,8 +172,8 @@ func initSumIntMetric(im pdata.Metric) { idp1.SetIntVal(456) } -func initSumDoubleMetric(dm pdata.Metric) { - initMetric(dm, TestSumDoubleMetricName, pdata.MetricDataTypeSum) +func initSumDoubleMetric(dm pmetric.Metric) { + initMetric(dm, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) ddps := dm.Sum().DataPoints() ddp0 := ddps.AppendEmpty() @@ -188,8 +189,8 @@ func initSumDoubleMetric(dm pdata.Metric) { ddp1.SetDoubleVal(4.56) } -func initHistogramMetric(hm pdata.Metric) { - initMetric(hm, TestHistogramMetricName, pdata.MetricDataTypeHistogram) +func initHistogramMetric(hm pmetric.Metric) { + initMetric(hm, TestHistogramMetricName, pmetric.MetricDataTypeHistogram) hdps := hm.Histogram().DataPoints() hdp0 := hdps.AppendEmpty() @@ -213,8 +214,8 @@ func initHistogramMetric(hm pdata.Metric) { hdp1.SetExplicitBounds([]float64{1}) } -func initExponentialHistogramMetric(hm pdata.Metric) { - initMetric(hm, TestExponentialHistogramMetricName, pdata.MetricDataTypeExponentialHistogram) +func initExponentialHistogramMetric(hm pmetric.Metric) { + initMetric(hm, TestExponentialHistogramMetricName, pmetric.MetricDataTypeExponentialHistogram) hdps := hm.ExponentialHistogram().DataPoints() hdp0 := hdps.AppendEmpty() @@ -264,8 +265,8 @@ func initExponentialHistogramMetric(hm pdata.Metric) { initMetricAttachment(exemplar.FilteredAttributes()) } -func initSummaryMetric(sm pdata.Metric) { - initMetric(sm, TestSummaryMetricName, pdata.MetricDataTypeSummary) +func initSummaryMetric(sm pmetric.Metric) { + initMetric(sm, TestSummaryMetricName, pmetric.MetricDataTypeSummary) sdps := sm.Summary().DataPoints() sdp0 := sdps.AppendEmpty() @@ -287,26 +288,26 @@ func initSummaryMetric(sm pdata.Metric) { quantile.SetValue(15) } -func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { +func initMetric(m pmetric.Metric, name string, ty pmetric.MetricDataType) { m.SetName(name) m.SetDescription("") m.SetUnit("1") m.SetDataType(ty) switch ty { - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sum := m.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - case pdata.MetricDataTypeHistogram: + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + case pmetric.MetricDataTypeHistogram: histo := m.Histogram() - histo.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - case pdata.MetricDataTypeExponentialHistogram: + histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + case pmetric.MetricDataTypeExponentialHistogram: histo := m.ExponentialHistogram() - histo.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) } } -func GenerateMetricsManyMetricsSameResource(metricsCount int) pdata.Metrics { +func GenerateMetricsManyMetricsSameResource(metricsCount int) pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationScope() rs0ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) rs0ilm0.Metrics().EnsureCapacity(metricsCount) diff --git a/internal/testdata/resource.go b/internal/testdata/resource.go index f5d2b8f726d..f64ec7f05c5 100644 --- a/internal/testdata/resource.go +++ b/internal/testdata/resource.go @@ -14,14 +14,12 @@ package testdata -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/model/pcommon" -func initResource1(r pdata.Resource) { +func initResource1(r pcommon.Resource) { initResourceAttributes1(r.Attributes()) } -func initResource2(r pdata.Resource) { +func initResource2(r pcommon.Resource) { initResourceAttributes2(r.Attributes()) } diff --git a/internal/testdata/trace.go b/internal/testdata/trace.go index e96207c6d63..baa23617ce2 100644 --- a/internal/testdata/trace.go +++ b/internal/testdata/trace.go @@ -17,47 +17,48 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/ptrace" ) var ( TestSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) - TestSpanStartTimestamp = pdata.NewTimestampFromTime(TestSpanStartTime) + TestSpanStartTimestamp = pcommon.NewTimestampFromTime(TestSpanStartTime) TestSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) - TestSpanEventTimestamp = pdata.NewTimestampFromTime(TestSpanEventTime) + TestSpanEventTimestamp = pcommon.NewTimestampFromTime(TestSpanEventTime) TestSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestSpanEndTimestamp = pdata.NewTimestampFromTime(TestSpanEndTime) + TestSpanEndTimestamp = pcommon.NewTimestampFromTime(TestSpanEndTime) ) -func GenerateTracesOneEmptyResourceSpans() pdata.Traces { - td := pdata.NewTraces() +func GenerateTracesOneEmptyResourceSpans() ptrace.Traces { + td := ptrace.NewTraces() td.ResourceSpans().AppendEmpty() return td } -func GenerateTracesNoLibraries() pdata.Traces { +func GenerateTracesNoLibraries() ptrace.Traces { td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) initResource1(rs0.Resource()) return td } -func GenerateTracesOneEmptyInstrumentationScope() pdata.Traces { +func GenerateTracesOneEmptyInstrumentationScope() ptrace.Traces { td := GenerateTracesNoLibraries() td.ResourceSpans().At(0).ScopeSpans().AppendEmpty() return td } -func GenerateTracesOneSpan() pdata.Traces { +func GenerateTracesOneSpan() ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationScope() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) return td } -func GenerateTracesTwoSpansSameResource() pdata.Traces { +func GenerateTracesTwoSpansSameResource() ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationScope() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) @@ -65,8 +66,8 @@ func GenerateTracesTwoSpansSameResource() pdata.Traces { return td } -func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { - td := pdata.NewTraces() +func GenerateTracesTwoSpansSameResourceOneDifferent() ptrace.Traces { + td := ptrace.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() initResource1(rs0.Resource()) rs0ils0 := rs0.ScopeSpans().AppendEmpty() @@ -79,7 +80,7 @@ func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { return td } -func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { +func GenerateTracesManySpansSameResource(spanCount int) ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationScope() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) rs0ils0.Spans().EnsureCapacity(spanCount) @@ -89,13 +90,13 @@ func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { return td } -func fillSpanOne(span pdata.Span) { +func fillSpanOne(span ptrace.Span) { span.SetName("operationA") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) span.SetDroppedAttributesCount(1) - span.SetTraceID(pdata.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10})) - span.SetSpanID(pdata.NewSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18})) + span.SetTraceID(pcommon.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18})) evs := span.Events() ev0 := evs.AppendEmpty() ev0.SetTimestamp(TestSpanEventTimestamp) @@ -108,11 +109,11 @@ func fillSpanOne(span pdata.Span) { ev1.SetDroppedAttributesCount(2) span.SetDroppedEventsCount(1) status := span.Status() - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) status.SetMessage("status-cancelled") } -func fillSpanTwo(span pdata.Span) { +func fillSpanTwo(span ptrace.Span) { span.SetName("operationB") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) @@ -124,7 +125,7 @@ func fillSpanTwo(span pdata.Span) { span.SetDroppedLinksCount(3) } -func fillSpanThree(span pdata.Span) { +func fillSpanThree(span ptrace.Span) { span.SetName("operationC") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) diff --git a/model/internal/cmd/pdatagen/internal/base_slices.go b/model/internal/cmd/pdatagen/internal/base_slices.go index ca0a25ab828..6acd7c4cb35 100644 --- a/model/internal/cmd/pdatagen/internal/base_slices.go +++ b/model/internal/cmd/pdatagen/internal/base_slices.go @@ -112,10 +112,10 @@ func fillTest${structName}(tv ${structName}) { }` const commonSliceAliasTemplate = `// ${structName} is an alias for pdata.${structName} struct. -type ${structName} = pdata.${structName} +${extraStructComment}type ${structName} = pdata.${structName} // New${structName} is an alias for a function to create ${structName}. -var New${structName} = pdata.New${structName}` +${extraNewComment}var New${structName} = pdata.New${structName}` const slicePtrTemplate = `// ${structName} logically represents a slice of ${elementName}. // @@ -477,11 +477,21 @@ func (ss *sliceOfPtrs) templateFields() func(name string) string { } } -func (ss *sliceOfPtrs) generateAlias(sb *strings.Builder) { +func (ss *sliceOfPtrs) generateAlias(sb *strings.Builder, deprecatedInFavor string) { sb.WriteString(os.Expand(commonSliceAliasTemplate, func(name string) string { switch name { case "structName": return ss.structName + case "extraStructComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + "." + ss.structName + " instead.\n" + } + return "" + case "extraNewComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + ".New" + ss.structName + " instead.\n" + } + return "" default: panic(name) } @@ -530,11 +540,21 @@ func (ss *sliceOfValues) templateFields() func(name string) string { } } -func (ss *sliceOfValues) generateAlias(sb *strings.Builder) { +func (ss *sliceOfValues) generateAlias(sb *strings.Builder, deprecatedInFavor string) { sb.WriteString(os.Expand(commonSliceAliasTemplate, func(name string) string { switch name { case "structName": return ss.structName + case "extraStructComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + "." + ss.structName + " instead.\n" + } + return "" + case "extraNewComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + ".New" + ss.structName + " instead.\n" + } + return "" default: panic(name) } diff --git a/model/internal/cmd/pdatagen/internal/base_structs.go b/model/internal/cmd/pdatagen/internal/base_structs.go index aed6e312dfe..878623dfddb 100644 --- a/model/internal/cmd/pdatagen/internal/base_structs.go +++ b/model/internal/cmd/pdatagen/internal/base_structs.go @@ -79,10 +79,10 @@ const messageValueFillTestHeaderTemplate = `func fillTest${structName}(tv ${stru const messageValueFillTestFooterTemplate = `}` const messageValueAliasTemplate = `// ${structName} is an alias for pdata.${structName} struct. -type ${structName} = pdata.${structName} +${extraStructComment}type ${structName} = pdata.${structName} // New${structName} is an alias for a function to create a new empty ${structName}. -var New${structName} = pdata.New${structName}` +${extraNewComment}var New${structName} = pdata.New${structName}` const newLine = "\n" @@ -97,7 +97,7 @@ type baseStruct interface { } type aliasGenerator interface { - generateAlias(sb *strings.Builder) + generateAlias(sb *strings.Builder, deprecatedInFavor string) } type messageValueStruct struct { @@ -196,11 +196,21 @@ func (ms *messageValueStruct) generateTestValueHelpers(sb *strings.Builder) { })) } -func (ms *messageValueStruct) generateAlias(sb *strings.Builder) { +func (ms *messageValueStruct) generateAlias(sb *strings.Builder, deprecatedInFavor string) { sb.WriteString(os.Expand(messageValueAliasTemplate, func(name string) string { switch name { case "structName": return ms.structName + case "extraStructComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + "." + ms.structName + " instead.\n" + } + return "" + case "extraNewComment": + if deprecatedInFavor != "" { + return "// Deprecated: [v0.49.0] Use " + deprecatedInFavor + ".New" + ms.structName + " instead.\n" + } + return "" default: panic(name) } diff --git a/model/internal/cmd/pdatagen/internal/files.go b/model/internal/cmd/pdatagen/internal/files.go index ec851363073..e9729dc1c91 100644 --- a/model/internal/cmd/pdatagen/internal/files.go +++ b/model/internal/cmd/pdatagen/internal/files.go @@ -31,9 +31,7 @@ const header = `// Copyright The OpenTelemetry Authors // limitations under the License. // Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. -// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". - -package pdata` +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go".` // AllFiles is a list of all files that needs to be generated. var AllFiles = []*File{ @@ -57,9 +55,8 @@ type File struct { func (f *File) GenerateFile() string { var sb strings.Builder - // Write headers - sb.WriteString(header) - sb.WriteString(newLine + newLine) + generateHeader(&sb, "pdata") + // Add imports sb.WriteString("import (" + newLine) for _, imp := range f.imports { @@ -83,9 +80,8 @@ func (f *File) GenerateFile() string { func (f *File) GenerateTestFile() string { var sb strings.Builder - // Write headers - sb.WriteString(header) - sb.WriteString(newLine + newLine) + generateHeader(&sb, "pdata") + // Add imports sb.WriteString("import (" + newLine) for _, imp := range f.testImports { @@ -111,12 +107,10 @@ func (f *File) GenerateTestFile() string { } // GenerateFile generates the aliases for data structures for this File. -func (f *File) GenerateAliasFile() string { +func (f *File) GenerateAliasFile(packageName string, deprecatedInFavor string) string { var sb strings.Builder - // Write headers - sb.WriteString(header) - sb.WriteString(newLine + newLine) + generateHeader(&sb, packageName) // Add import sb.WriteString("import \"go.opentelemetry.io/collector/model/internal/pdata\"" + newLine + newLine) @@ -124,9 +118,20 @@ func (f *File) GenerateAliasFile() string { // Write all types and funcs for _, s := range f.structs { if ag, ok := s.(aliasGenerator); ok { - ag.generateAlias(&sb) + ag.generateAlias(&sb, deprecatedInFavor) } } sb.WriteString(newLine) return sb.String() } + +func (f *File) IsCommon() bool { + return f.Name == "resource" || f.Name == "common" +} + +func generateHeader(sb *strings.Builder, packageName string) { + sb.WriteString(header) + sb.WriteString(newLine + newLine) + sb.WriteString("package " + packageName) + sb.WriteString(newLine + newLine) +} diff --git a/model/internal/cmd/pdatagen/internal/log_structs.go b/model/internal/cmd/pdatagen/internal/log_structs.go index d743cbcaa59..b4acc24d0c2 100644 --- a/model/internal/cmd/pdatagen/internal/log_structs.go +++ b/model/internal/cmd/pdatagen/internal/log_structs.go @@ -15,7 +15,7 @@ package internal // import "go.opentelemetry.io/collector/model/internal/cmd/pdatagen/internal" var logFile = &File{ - Name: "log", + Name: "plog", imports: []string{ `"sort"`, ``, diff --git a/model/internal/cmd/pdatagen/internal/metrics_structs.go b/model/internal/cmd/pdatagen/internal/metrics_structs.go index 37c3be39f09..4794276e421 100644 --- a/model/internal/cmd/pdatagen/internal/metrics_structs.go +++ b/model/internal/cmd/pdatagen/internal/metrics_structs.go @@ -15,7 +15,7 @@ package internal // import "go.opentelemetry.io/collector/model/internal/cmd/pdatagen/internal" var metricsFile = &File{ - Name: "metrics", + Name: "pmetric", imports: []string{ `"sort"`, ``, diff --git a/model/internal/cmd/pdatagen/internal/trace_structs.go b/model/internal/cmd/pdatagen/internal/trace_structs.go index b88fe7a8e5d..4b794eedeed 100644 --- a/model/internal/cmd/pdatagen/internal/trace_structs.go +++ b/model/internal/cmd/pdatagen/internal/trace_structs.go @@ -15,7 +15,7 @@ package internal // import "go.opentelemetry.io/collector/model/internal/cmd/pdatagen/internal" var traceFile = &File{ - Name: "trace", + Name: "ptrace", imports: []string{ `"sort"`, ``, diff --git a/model/internal/cmd/pdatagen/main.go b/model/internal/cmd/pdatagen/main.go index b65b9e4077d..d552db765ec 100644 --- a/model/internal/cmd/pdatagen/main.go +++ b/model/internal/cmd/pdatagen/main.go @@ -16,6 +16,7 @@ package main import ( "os" + "path/filepath" "go.opentelemetry.io/collector/model/internal/cmd/pdatagen/internal" ) @@ -40,7 +41,18 @@ func main() { check(f.Close()) f, err = os.Create("./model/pdata/generated_" + fp.Name + "_alias.go") check(err) - _, err = f.WriteString(fp.GenerateAliasFile()) + fileName := "generated_alias.go" + packageName := fp.Name + if fp.IsCommon() { + fileName = "generated_" + fp.Name + "_alias.go" + packageName = "pcommon" + } + _, err = f.WriteString(fp.GenerateAliasFile("pdata", packageName)) + check(err) + check(f.Close()) + f, err = os.Create(filepath.Clean("./model/" + packageName + "/" + fileName)) + check(err) + _, err = f.WriteString(fp.GenerateAliasFile(packageName, "")) check(err) check(f.Close()) } diff --git a/model/internal/pdata/generated_log.go b/model/internal/pdata/generated_plog.go similarity index 100% rename from model/internal/pdata/generated_log.go rename to model/internal/pdata/generated_plog.go diff --git a/model/internal/pdata/generated_log_test.go b/model/internal/pdata/generated_plog_test.go similarity index 100% rename from model/internal/pdata/generated_log_test.go rename to model/internal/pdata/generated_plog_test.go diff --git a/model/internal/pdata/generated_metrics.go b/model/internal/pdata/generated_pmetric.go similarity index 100% rename from model/internal/pdata/generated_metrics.go rename to model/internal/pdata/generated_pmetric.go diff --git a/model/internal/pdata/generated_metrics_test.go b/model/internal/pdata/generated_pmetric_test.go similarity index 100% rename from model/internal/pdata/generated_metrics_test.go rename to model/internal/pdata/generated_pmetric_test.go diff --git a/model/internal/pdata/generated_trace.go b/model/internal/pdata/generated_ptrace.go similarity index 100% rename from model/internal/pdata/generated_trace.go rename to model/internal/pdata/generated_ptrace.go diff --git a/model/internal/pdata/generated_trace_test.go b/model/internal/pdata/generated_ptrace_test.go similarity index 100% rename from model/internal/pdata/generated_trace_test.go rename to model/internal/pdata/generated_ptrace_test.go diff --git a/model/internal/pdata/metrics.go b/model/internal/pdata/metrics.go index dd857195194..197f14327ce 100644 --- a/model/internal/pdata/metrics.go +++ b/model/internal/pdata/metrics.go @@ -18,17 +18,17 @@ import ( otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1" ) -// MetricsMarshaler marshals pdata.Metrics into bytes. +// MetricsMarshaler marshals pmetric.Metrics into bytes. type MetricsMarshaler interface { - // MarshalMetrics the given pdata.Metrics into bytes. + // MarshalMetrics the given pmetric.Metrics into bytes. // If the error is not nil, the returned bytes slice cannot be used. MarshalMetrics(md Metrics) ([]byte, error) } -// MetricsUnmarshaler unmarshalls bytes into pdata.Metrics. +// MetricsUnmarshaler unmarshalls bytes into pmetric.Metrics. type MetricsUnmarshaler interface { - // UnmarshalMetrics the given bytes into pdata.Metrics. - // If the error is not nil, the returned pdata.Metrics cannot be used. + // UnmarshalMetrics the given bytes into pmetric.Metrics. + // If the error is not nil, the returned pmetric.Metrics cannot be used. UnmarshalMetrics(buf []byte) (Metrics, error) } diff --git a/model/otlp/json_marshaler.go b/model/otlp/json_marshaler.go index f85c7f8bd6e..dd9f54344d9 100644 --- a/model/otlp/json_marshaler.go +++ b/model/otlp/json_marshaler.go @@ -20,21 +20,23 @@ import ( "github.com/gogo/protobuf/jsonpb" ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) // NewJSONTracesMarshaler returns a model.TracesMarshaler. Marshals to OTLP json bytes. -func NewJSONTracesMarshaler() pdata.TracesMarshaler { +func NewJSONTracesMarshaler() ptrace.TracesMarshaler { return newJSONMarshaler() } // NewJSONMetricsMarshaler returns a model.MetricsMarshaler. Marshals to OTLP json bytes. -func NewJSONMetricsMarshaler() pdata.MetricsMarshaler { +func NewJSONMetricsMarshaler() pmetric.MetricsMarshaler { return newJSONMarshaler() } // NewJSONLogsMarshaler returns a model.LogsMarshaler. Marshals to OTLP json bytes. -func NewJSONLogsMarshaler() pdata.LogsMarshaler { +func NewJSONLogsMarshaler() plog.LogsMarshaler { return newJSONMarshaler() } @@ -46,19 +48,19 @@ func newJSONMarshaler() *jsonMarshaler { return &jsonMarshaler{delegate: jsonpb.Marshaler{}} } -func (e *jsonMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) { +func (e *jsonMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { buf := bytes.Buffer{} err := e.delegate.Marshal(&buf, ipdata.LogsToOtlp(ld)) return buf.Bytes(), err } -func (e *jsonMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) { +func (e *jsonMarshaler) MarshalMetrics(md pmetric.Metrics) ([]byte, error) { buf := bytes.Buffer{} err := e.delegate.Marshal(&buf, ipdata.MetricsToOtlp(md)) return buf.Bytes(), err } -func (e *jsonMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { +func (e *jsonMarshaler) MarshalTraces(td ptrace.Traces) ([]byte, error) { buf := bytes.Buffer{} err := e.delegate.Marshal(&buf, ipdata.TracesToOtlp(td)) return buf.Bytes(), err diff --git a/model/otlp/json_test.go b/model/otlp/json_test.go index 20438ab3443..0a9f99158f5 100644 --- a/model/otlp/json_test.go +++ b/model/otlp/json_test.go @@ -19,11 +19,13 @@ import ( "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) -var tracesOTLP = func() pdata.Traces { - td := pdata.NewTraces() +var tracesOTLP = func() ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().UpsertString("host.name", "testHost") il := rs.ScopeSpans().AppendEmpty() @@ -35,8 +37,8 @@ var tracesOTLP = func() pdata.Traces { var tracesJSON = `{"resourceSpans":[{"resource":{"attributes":[{"key":"host.name","value":{"stringValue":"testHost"}}]},"scopeSpans":[{"scope":{"name":"name","version":"version"},"spans":[{"traceId":"","spanId":"","parentSpanId":"","name":"testSpan","status":{}}]}]}]}` -var metricsOTLP = func() pdata.Metrics { - md := pdata.NewMetrics() +var metricsOTLP = func() pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().UpsertString("host.name", "testHost") il := rm.ScopeMetrics().AppendEmpty() @@ -48,8 +50,8 @@ var metricsOTLP = func() pdata.Metrics { var metricsJSON = `{"resourceMetrics":[{"resource":{"attributes":[{"key":"host.name","value":{"stringValue":"testHost"}}]},"scopeMetrics":[{"scope":{"name":"name","version":"version"},"metrics":[{"name":"testMetric"}]}]}]}` -var logsOTLP = func() pdata.Logs { - ld := pdata.NewLogs() +var logsOTLP = func() plog.Logs { + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() rl.Resource().Attributes().UpsertString("host.name", "testHost") il := rl.ScopeLogs().AppendEmpty() @@ -233,5 +235,5 @@ func TestMetricsNil(t *testing.T) { got, err := decoder.UnmarshalMetrics([]byte(jsonBuf)) assert.Error(t, err) - assert.EqualValues(t, pdata.Metrics{}, got) + assert.EqualValues(t, pmetric.Metrics{}, got) } diff --git a/model/otlp/json_unmarshaler.go b/model/otlp/json_unmarshaler.go index b1db47d684b..fce518bf6ba 100644 --- a/model/otlp/json_unmarshaler.go +++ b/model/otlp/json_unmarshaler.go @@ -24,7 +24,9 @@ import ( otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1" ipdata "go.opentelemetry.io/collector/model/internal/pdata" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type jsonUnmarshaler struct { @@ -32,17 +34,17 @@ type jsonUnmarshaler struct { } // NewJSONTracesUnmarshaler returns a model.TracesUnmarshaler. Unmarshals from OTLP json bytes. -func NewJSONTracesUnmarshaler() pdata.TracesUnmarshaler { +func NewJSONTracesUnmarshaler() ptrace.TracesUnmarshaler { return newJSONUnmarshaler() } // NewJSONMetricsUnmarshaler returns a model.MetricsUnmarshaler. Unmarshals from OTLP json bytes. -func NewJSONMetricsUnmarshaler() pdata.MetricsUnmarshaler { +func NewJSONMetricsUnmarshaler() pmetric.MetricsUnmarshaler { return newJSONUnmarshaler() } // NewJSONLogsUnmarshaler returns a model.LogsUnmarshaler. Unmarshals from OTLP json bytes. -func NewJSONLogsUnmarshaler() pdata.LogsUnmarshaler { +func NewJSONLogsUnmarshaler() plog.LogsUnmarshaler { return newJSONUnmarshaler() } @@ -50,28 +52,28 @@ func newJSONUnmarshaler() *jsonUnmarshaler { return &jsonUnmarshaler{delegate: jsonpb.Unmarshaler{}} } -func (d *jsonUnmarshaler) UnmarshalLogs(buf []byte) (pdata.Logs, error) { +func (d *jsonUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { ld := &otlplogs.LogsData{} if err := d.delegate.Unmarshal(bytes.NewReader(buf), ld); err != nil { - return pdata.Logs{}, err + return plog.Logs{}, err } otlpgrpc.InstrumentationLibraryLogsToScope(ld.ResourceLogs) return ipdata.LogsFromOtlp(ld), nil } -func (d *jsonUnmarshaler) UnmarshalMetrics(buf []byte) (pdata.Metrics, error) { +func (d *jsonUnmarshaler) UnmarshalMetrics(buf []byte) (pmetric.Metrics, error) { md := &otlpmetrics.MetricsData{} if err := d.delegate.Unmarshal(bytes.NewReader(buf), md); err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } otlpgrpc.InstrumentationLibraryMetricsToScope(md.ResourceMetrics) return ipdata.MetricsFromOtlp(md), nil } -func (d *jsonUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (d *jsonUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { td := &otlptrace.TracesData{} if err := d.delegate.Unmarshal(bytes.NewReader(buf), td); err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } otlpgrpc.InstrumentationLibrarySpansToScope(td.ResourceSpans) return ipdata.TracesFromOtlp(td), nil diff --git a/model/otlp/pb_marshaler.go b/model/otlp/pb_marshaler.go index 4c596723a98..a82dae547c2 100644 --- a/model/otlp/pb_marshaler.go +++ b/model/otlp/pb_marshaler.go @@ -16,21 +16,23 @@ package otlp // import "go.opentelemetry.io/collector/model/otlp" import ( ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) -// NewProtobufTracesMarshaler returns a pdata.TracesMarshaler. Marshals to OTLP binary protobuf bytes. -func NewProtobufTracesMarshaler() pdata.TracesMarshaler { +// NewProtobufTracesMarshaler returns a ptrace.TracesMarshaler. Marshals to OTLP binary protobuf bytes. +func NewProtobufTracesMarshaler() ptrace.TracesMarshaler { return newPbMarshaler() } -// NewProtobufMetricsMarshaler returns a pdata.MetricsMarshaler. Marshals to OTLP binary protobuf bytes. -func NewProtobufMetricsMarshaler() pdata.MetricsMarshaler { +// NewProtobufMetricsMarshaler returns a pmetric.MetricsMarshaler. Marshals to OTLP binary protobuf bytes. +func NewProtobufMetricsMarshaler() pmetric.MetricsMarshaler { return newPbMarshaler() } -// NewProtobufLogsMarshaler returns a pdata.LogsMarshaler. Marshals to OTLP binary protobuf bytes. -func NewProtobufLogsMarshaler() pdata.LogsMarshaler { +// NewProtobufLogsMarshaler returns a plog.LogsMarshaler. Marshals to OTLP binary protobuf bytes. +func NewProtobufLogsMarshaler() plog.LogsMarshaler { return newPbMarshaler() } @@ -41,30 +43,30 @@ func newPbMarshaler() *pbMarshaler { return &pbMarshaler{} } -var _ pdata.TracesSizer = (*pbMarshaler)(nil) -var _ pdata.MetricsSizer = (*pbMarshaler)(nil) -var _ pdata.LogsSizer = (*pbMarshaler)(nil) +var _ ptrace.TracesSizer = (*pbMarshaler)(nil) +var _ pmetric.MetricsSizer = (*pbMarshaler)(nil) +var _ plog.LogsSizer = (*pbMarshaler)(nil) -func (e *pbMarshaler) MarshalLogs(ld pdata.Logs) ([]byte, error) { +func (e *pbMarshaler) MarshalLogs(ld plog.Logs) ([]byte, error) { return ipdata.LogsToOtlp(ld).Marshal() } -func (e *pbMarshaler) MarshalMetrics(md pdata.Metrics) ([]byte, error) { +func (e *pbMarshaler) MarshalMetrics(md pmetric.Metrics) ([]byte, error) { return ipdata.MetricsToOtlp(md).Marshal() } -func (e *pbMarshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { +func (e *pbMarshaler) MarshalTraces(td ptrace.Traces) ([]byte, error) { return ipdata.TracesToOtlp(td).Marshal() } -func (e *pbMarshaler) TracesSize(td pdata.Traces) int { +func (e *pbMarshaler) TracesSize(td ptrace.Traces) int { return ipdata.TracesToOtlp(td).Size() } -func (e *pbMarshaler) MetricsSize(md pdata.Metrics) int { +func (e *pbMarshaler) MetricsSize(md pmetric.Metrics) int { return ipdata.MetricsToOtlp(md).Size() } -func (e *pbMarshaler) LogsSize(ld pdata.Logs) int { +func (e *pbMarshaler) LogsSize(ld plog.Logs) int { return ipdata.LogsToOtlp(ld).Size() } diff --git a/model/otlp/pb_test.go b/model/otlp/pb_test.go index 3053f23fdd5..9a659380435 100644 --- a/model/otlp/pb_test.go +++ b/model/otlp/pb_test.go @@ -21,7 +21,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestProtobufLogsUnmarshaler_error(t *testing.T) { @@ -43,9 +46,9 @@ func TestProtobufTracesUnmarshaler_error(t *testing.T) { } func TestProtobufTracesSizer(t *testing.T) { - sizer := NewProtobufTracesMarshaler().(pdata.TracesSizer) + sizer := NewProtobufTracesMarshaler().(ptrace.TracesSizer) marshaler := NewProtobufTracesMarshaler() - td := pdata.NewTraces() + td := ptrace.NewTraces() rms := td.ResourceSpans() rms.AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetName("foo") @@ -57,15 +60,15 @@ func TestProtobufTracesSizer(t *testing.T) { } func TestProtobufTracesSizer_withNil(t *testing.T) { - sizer := NewProtobufTracesMarshaler().(pdata.TracesSizer) + sizer := NewProtobufTracesMarshaler().(ptrace.TracesSizer) - assert.Equal(t, 0, sizer.TracesSize(pdata.NewTraces())) + assert.Equal(t, 0, sizer.TracesSize(ptrace.NewTraces())) } func TestProtobufMetricsSizer(t *testing.T) { - sizer := NewProtobufMetricsMarshaler().(pdata.MetricsSizer) + sizer := NewProtobufMetricsMarshaler().(pmetric.MetricsSizer) marshaler := NewProtobufMetricsMarshaler() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("foo") size := sizer.MetricsSize(md) @@ -76,15 +79,15 @@ func TestProtobufMetricsSizer(t *testing.T) { } func TestProtobufMetricsSizer_withNil(t *testing.T) { - sizer := NewProtobufMetricsMarshaler().(pdata.MetricsSizer) + sizer := NewProtobufMetricsMarshaler().(pmetric.MetricsSizer) - assert.Equal(t, 0, sizer.MetricsSize(pdata.NewMetrics())) + assert.Equal(t, 0, sizer.MetricsSize(pmetric.NewMetrics())) } func TestProtobufLogsSizer(t *testing.T) { - sizer := NewProtobufLogsMarshaler().(pdata.LogsSizer) + sizer := NewProtobufLogsMarshaler().(plog.LogsSizer) marshaler := NewProtobufLogsMarshaler() - ld := pdata.NewLogs() + ld := plog.NewLogs() ld.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().SetSeverityText("error") size := sizer.LogsSize(ld) @@ -96,9 +99,9 @@ func TestProtobufLogsSizer(t *testing.T) { } func TestProtobufLogsSizer_withNil(t *testing.T) { - sizer := NewProtobufLogsMarshaler().(pdata.LogsSizer) + sizer := NewProtobufLogsMarshaler().(plog.LogsSizer) - assert.Equal(t, 0, sizer.LogsSize(pdata.NewLogs())) + assert.Equal(t, 0, sizer.LogsSize(plog.NewLogs())) } func BenchmarkLogsToProtobuf(b *testing.B) { @@ -182,10 +185,10 @@ func BenchmarkTracesFromProtobuf(b *testing.B) { } } -func generateBenchmarkLogs(logsCount int) pdata.Logs { - endTime := pdata.NewTimestampFromTime(time.Now()) +func generateBenchmarkLogs(logsCount int) plog.Logs { + endTime := pcommon.NewTimestampFromTime(time.Now()) - md := pdata.NewLogs() + md := plog.NewLogs() ilm := md.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() ilm.LogRecords().EnsureCapacity(logsCount) for i := 0; i < logsCount; i++ { @@ -195,18 +198,18 @@ func generateBenchmarkLogs(logsCount int) pdata.Logs { return md } -func generateBenchmarkMetrics(metricsCount int) pdata.Metrics { +func generateBenchmarkMetrics(metricsCount int) pmetric.Metrics { now := time.Now() - startTime := pdata.NewTimestampFromTime(now.Add(-10 * time.Second)) - endTime := pdata.NewTimestampFromTime(now) + startTime := pcommon.NewTimestampFromTime(now.Add(-10 * time.Second)) + endTime := pcommon.NewTimestampFromTime(now) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Metrics().EnsureCapacity(metricsCount) for i := 0; i < metricsCount; i++ { im := ilm.Metrics().AppendEmpty() im.SetName("test_name") - im.SetDataType(pdata.MetricDataTypeSum) + im.SetDataType(pmetric.MetricDataTypeSum) idp := im.Sum().DataPoints().AppendEmpty() idp.SetStartTimestamp(startTime) idp.SetTimestamp(endTime) @@ -215,12 +218,12 @@ func generateBenchmarkMetrics(metricsCount int) pdata.Metrics { return md } -func generateBenchmarkTraces(metricsCount int) pdata.Traces { +func generateBenchmarkTraces(metricsCount int) ptrace.Traces { now := time.Now() - startTime := pdata.NewTimestampFromTime(now.Add(-10 * time.Second)) - endTime := pdata.NewTimestampFromTime(now) + startTime := pcommon.NewTimestampFromTime(now.Add(-10 * time.Second)) + endTime := pcommon.NewTimestampFromTime(now) - md := pdata.NewTraces() + md := ptrace.NewTraces() ilm := md.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() ilm.Spans().EnsureCapacity(metricsCount) for i := 0; i < metricsCount; i++ { diff --git a/model/otlp/pb_unmarshaler.go b/model/otlp/pb_unmarshaler.go index 1140a092e10..5276086c7bf 100644 --- a/model/otlp/pb_unmarshaler.go +++ b/model/otlp/pb_unmarshaler.go @@ -19,23 +19,25 @@ import ( otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1" otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1" ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) type pbUnmarshaler struct{} // NewProtobufTracesUnmarshaler returns a model.TracesUnmarshaler. Unmarshals from OTLP binary protobuf bytes. -func NewProtobufTracesUnmarshaler() pdata.TracesUnmarshaler { +func NewProtobufTracesUnmarshaler() ptrace.TracesUnmarshaler { return newPbUnmarshaler() } // NewProtobufMetricsUnmarshaler returns a model.MetricsUnmarshaler. Unmarshals from OTLP binary protobuf bytes. -func NewProtobufMetricsUnmarshaler() pdata.MetricsUnmarshaler { +func NewProtobufMetricsUnmarshaler() pmetric.MetricsUnmarshaler { return newPbUnmarshaler() } // NewProtobufLogsUnmarshaler returns a model.LogsUnmarshaler. Unmarshals from OTLP binary protobuf bytes. -func NewProtobufLogsUnmarshaler() pdata.LogsUnmarshaler { +func NewProtobufLogsUnmarshaler() plog.LogsUnmarshaler { return newPbUnmarshaler() } @@ -43,19 +45,19 @@ func newPbUnmarshaler() *pbUnmarshaler { return &pbUnmarshaler{} } -func (d *pbUnmarshaler) UnmarshalLogs(buf []byte) (pdata.Logs, error) { +func (d *pbUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { ld := &otlplogs.LogsData{} err := ld.Unmarshal(buf) return ipdata.LogsFromOtlp(ld), err } -func (d *pbUnmarshaler) UnmarshalMetrics(buf []byte) (pdata.Metrics, error) { +func (d *pbUnmarshaler) UnmarshalMetrics(buf []byte) (pmetric.Metrics, error) { md := &otlpmetrics.MetricsData{} err := md.Unmarshal(buf) return ipdata.MetricsFromOtlp(md), err } -func (d *pbUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (d *pbUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { td := &otlptrace.TracesData{} err := td.Unmarshal(buf) return ipdata.TracesFromOtlp(td), err diff --git a/model/otlpgrpc/logs.go b/model/otlpgrpc/logs.go index f40c60358d3..36bf379935f 100644 --- a/model/otlpgrpc/logs.go +++ b/model/otlpgrpc/logs.go @@ -25,7 +25,7 @@ import ( v1 "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1" otlplogs "go.opentelemetry.io/collector/model/internal/data/protogen/logs/v1" ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) var jsonMarshaler = &jsonpb.Marshaler{} @@ -107,11 +107,11 @@ func (lr LogsRequest) UnmarshalJSON(data []byte) error { return nil } -func (lr LogsRequest) SetLogs(ld pdata.Logs) { +func (lr LogsRequest) SetLogs(ld plog.Logs) { lr.orig.ResourceLogs = ipdata.LogsToOtlp(ld).ResourceLogs } -func (lr LogsRequest) Logs() pdata.Logs { +func (lr LogsRequest) Logs() plog.Logs { return ipdata.LogsFromOtlp(&otlplogs.LogsData{ResourceLogs: lr.orig.ResourceLogs}) } @@ -119,7 +119,7 @@ func (lr LogsRequest) Logs() pdata.Logs { // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LogsClient interface { - // Export pdata.Logs to the server. + // Export plog.Logs to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. diff --git a/model/otlpgrpc/metrics.go b/model/otlpgrpc/metrics.go index f0cccc06bad..a97e0fdf09a 100644 --- a/model/otlpgrpc/metrics.go +++ b/model/otlpgrpc/metrics.go @@ -24,7 +24,7 @@ import ( v1 "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1" otlpmetrics "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1" ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) // MetricsResponse represents the response for gRPC client/server. @@ -99,11 +99,11 @@ func (mr MetricsRequest) UnmarshalJSON(data []byte) error { return nil } -func (mr MetricsRequest) SetMetrics(ld pdata.Metrics) { +func (mr MetricsRequest) SetMetrics(ld pmetric.Metrics) { mr.orig.ResourceMetrics = ipdata.MetricsToOtlp(ld).ResourceMetrics } -func (mr MetricsRequest) Metrics() pdata.Metrics { +func (mr MetricsRequest) Metrics() pmetric.Metrics { return ipdata.MetricsFromOtlp(&otlpmetrics.MetricsData{ResourceMetrics: mr.orig.ResourceMetrics}) } @@ -111,7 +111,7 @@ func (mr MetricsRequest) Metrics() pdata.Metrics { // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MetricsClient interface { - // Export pdata.Metrics to the server. + // Export pmetric.Metrics to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. diff --git a/model/otlpgrpc/metrics_test.go b/model/otlpgrpc/metrics_test.go index f406f6fb12c..b173238ef1c 100644 --- a/model/otlpgrpc/metrics_test.go +++ b/model/otlpgrpc/metrics_test.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/test/bufconn" v1 "go.opentelemetry.io/collector/model/internal/data/protogen/metrics/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) var _ json.Unmarshaler = MetricsResponse{} @@ -246,7 +246,7 @@ func (f fakeMetricsServer) Export(_ context.Context, request MetricsRequest) (Me } func generateMetricsRequest() MetricsRequest { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty().SetName("test_metric") mr := NewMetricsRequest() diff --git a/model/otlpgrpc/traces.go b/model/otlpgrpc/traces.go index 9bae9bed045..e004a5f638f 100644 --- a/model/otlpgrpc/traces.go +++ b/model/otlpgrpc/traces.go @@ -24,7 +24,7 @@ import ( v1 "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1" otlptrace "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1" ipdata "go.opentelemetry.io/collector/model/internal/pdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) // TracesResponse represents the response for gRPC client/server. @@ -103,11 +103,11 @@ func (tr TracesRequest) UnmarshalJSON(data []byte) error { return nil } -func (tr TracesRequest) SetTraces(td pdata.Traces) { +func (tr TracesRequest) SetTraces(td ptrace.Traces) { tr.orig.ResourceSpans = ipdata.TracesToOtlp(td).ResourceSpans } -func (tr TracesRequest) Traces() pdata.Traces { +func (tr TracesRequest) Traces() ptrace.Traces { return ipdata.TracesFromOtlp(&otlptrace.TracesData{ResourceSpans: tr.orig.ResourceSpans}) } @@ -115,7 +115,7 @@ func (tr TracesRequest) Traces() pdata.Traces { // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TracesClient interface { - // Export pdata.Traces to the server. + // Export ptrace.Traces to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. diff --git a/model/otlpgrpc/traces_test.go b/model/otlpgrpc/traces_test.go index e8509dad402..685cad3f07b 100644 --- a/model/otlpgrpc/traces_test.go +++ b/model/otlpgrpc/traces_test.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/test/bufconn" v1 "go.opentelemetry.io/collector/model/internal/data/protogen/trace/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) var _ json.Unmarshaler = TracesResponse{} @@ -262,7 +262,7 @@ func (f fakeTracesServer) Export(_ context.Context, request TracesRequest) (Trac } func generateTracesRequest() TracesRequest { - td := pdata.NewTraces() + td := ptrace.NewTraces() td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetName("test_span") tr := NewTracesRequest() diff --git a/model/pcommon/alias.go b/model/pcommon/alias.go new file mode 100644 index 00000000000..1caff3712fe --- /dev/null +++ b/model/pcommon/alias.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pcommon // import "go.opentelemetry.io/collector/model/pcommon" + +// This file contains aliases to data structures that are common for all +// signal types, such as timestamps, attributes, etc. + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// ValueType is an alias for pdata.ValueType type. +type ValueType = pdata.ValueType + +const ( + ValueTypeEmpty = pdata.ValueTypeEmpty + ValueTypeString = pdata.ValueTypeString + ValueTypeInt = pdata.ValueTypeInt + ValueTypeDouble = pdata.ValueTypeDouble + ValueTypeBool = pdata.ValueTypeBool + ValueTypeMap = pdata.ValueTypeMap + ValueTypeSlice = pdata.ValueTypeSlice + ValueTypeBytes = pdata.ValueTypeBytes +) + +// Value is an alias for pdata.Value struct. +type Value = pdata.Value + +// Aliases for functions to create pdata.Value. +var ( + NewValueEmpty = pdata.NewValueEmpty + NewValueString = pdata.NewValueString + NewValueInt = pdata.NewValueInt + NewValueDouble = pdata.NewValueDouble + NewValueBool = pdata.NewValueBool + NewValueMap = pdata.NewValueMap + NewValueSlice = pdata.NewValueSlice + NewValueBytes = pdata.NewValueBytes +) + +// Map is an alias for pdata.Map struct. +type Map = pdata.Map + +// Deprecated: [v0.48.0] Use Map instead. +type AttributeMap = pdata.Map + +// Aliases for functions to create pdata.Map. +var ( + NewMap = pdata.NewMap + NewMapFromRaw = pdata.NewMapFromRaw +) diff --git a/model/pcommon/generated_common_alias.go b/model/pcommon/generated_common_alias.go new file mode 100644 index 00000000000..cd8ac8a6b2b --- /dev/null +++ b/model/pcommon/generated_common_alias.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". + +package pcommon + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// InstrumentationScope is an alias for pdata.InstrumentationScope struct. +type InstrumentationScope = pdata.InstrumentationScope + +// NewInstrumentationScope is an alias for a function to create a new empty InstrumentationScope. +var NewInstrumentationScope = pdata.NewInstrumentationScope + +// Slice is an alias for pdata.Slice struct. +type Slice = pdata.Slice + +// NewSlice is an alias for a function to create Slice. +var NewSlice = pdata.NewSlice diff --git a/model/pcommon/generated_resource_alias.go b/model/pcommon/generated_resource_alias.go new file mode 100644 index 00000000000..80f69b456f2 --- /dev/null +++ b/model/pcommon/generated_resource_alias.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". + +package pcommon + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// Resource is an alias for pdata.Resource struct. +type Resource = pdata.Resource + +// NewResource is an alias for a function to create a new empty Resource. +var NewResource = pdata.NewResource diff --git a/model/pcommon/spanid_alias.go b/model/pcommon/spanid_alias.go new file mode 100644 index 00000000000..c35dce3c7d5 --- /dev/null +++ b/model/pcommon/spanid_alias.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pcommon // import "go.opentelemetry.io/collector/model/pcommon" + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// SpanID is an alias for pdata.SpanID struct. +type SpanID = pdata.SpanID + +// InvalidSpanID is an alias for pdata.InvalidSpanID function. +var InvalidSpanID = pdata.InvalidSpanID + +// NewSpanID is an alias for a function to create new SpanID. +var NewSpanID = pdata.NewSpanID diff --git a/model/pcommon/timestamp_alias.go b/model/pcommon/timestamp_alias.go new file mode 100644 index 00000000000..d6549d8b412 --- /dev/null +++ b/model/pcommon/timestamp_alias.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pcommon // import "go.opentelemetry.io/collector/model/pcommon" + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// Timestamp is a an alias for pdata.Timestamp. +type Timestamp = pdata.Timestamp + +// NewTimestampFromTime is an alias for pdata.NewTimestampFromTime function. +var NewTimestampFromTime = pdata.NewTimestampFromTime diff --git a/model/pcommon/traceid_alias.go b/model/pcommon/traceid_alias.go new file mode 100644 index 00000000000..de1c890b3cc --- /dev/null +++ b/model/pcommon/traceid_alias.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pcommon // import "go.opentelemetry.io/collector/model/pcommon" + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// TraceID is an alias for pdata.TraceID struct. +type TraceID = pdata.TraceID + +// InvalidTraceID is an alias for pdata.InvalidTraceID function. +var InvalidTraceID = pdata.InvalidTraceID + +// NewTraceID is an alias for a function to create new TraceID. +var NewTraceID = pdata.NewTraceID diff --git a/model/pdata/common_alias.go b/model/pdata/common_alias.go index f32781eafdb..4864b8bcab7 100644 --- a/model/pdata/common_alias.go +++ b/model/pdata/common_alias.go @@ -20,6 +20,7 @@ package pdata // import "go.opentelemetry.io/collector/model/pdata" import "go.opentelemetry.io/collector/model/internal/pdata" // ValueType is an alias for pdata.ValueType type. +// Deprecated: [v0.49.0] Use pcommon.ValueType instead. type ValueType = pdata.ValueType // AttributeValueType is an alias for pdata.ValueType type. @@ -27,14 +28,29 @@ type ValueType = pdata.ValueType type AttributeValueType = pdata.ValueType const ( - ValueTypeEmpty = pdata.ValueTypeEmpty + // Deprecated: [v0.49.0] Use pcommon.ValueTypeEmpty instead. + ValueTypeEmpty = pdata.ValueTypeEmpty + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeString instead. ValueTypeString = pdata.ValueTypeString - ValueTypeInt = pdata.ValueTypeInt + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeInt instead. + ValueTypeInt = pdata.ValueTypeInt + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeDouble instead. ValueTypeDouble = pdata.ValueTypeDouble - ValueTypeBool = pdata.ValueTypeBool - ValueTypeMap = pdata.ValueTypeMap - ValueTypeSlice = pdata.ValueTypeSlice - ValueTypeBytes = pdata.ValueTypeBytes + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeBool instead. + ValueTypeBool = pdata.ValueTypeBool + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeMap instead. + ValueTypeMap = pdata.ValueTypeMap + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeSlice instead. + ValueTypeSlice = pdata.ValueTypeSlice + + // Deprecated: [v0.49.0] Use pcommon.ValueTypeBytes instead. + ValueTypeBytes = pdata.ValueTypeBytes // Deprecated: [v0.48.0] Use ValueTypeEmpty instead. AttributeValueTypeEmpty = pdata.ValueTypeEmpty @@ -62,6 +78,7 @@ const ( ) // Value is an alias for pdata.Value struct. +// Deprecated: [v0.49.0] Use pcommon.Value instead. type Value = pdata.Value // Deprecated: [v0.48.0] Use Value instead. @@ -69,14 +86,30 @@ type AttributeValue = pdata.Value // Aliases for functions to create pdata.Value. var ( - NewValueEmpty = pdata.NewValueEmpty + + // Deprecated: [v0.49.0] Use pcommon.NewValueEmpty instead. + NewValueEmpty = pdata.NewValueEmpty + + // Deprecated: [v0.49.0] Use pcommon.NewValueString instead. NewValueString = pdata.NewValueString - NewValueInt = pdata.NewValueInt + + // Deprecated: [v0.49.0] Use pcommon.NewValueInt instead. + NewValueInt = pdata.NewValueInt + + // Deprecated: [v0.49.0] Use pcommon.NewValueDouble instead. NewValueDouble = pdata.NewValueDouble - NewValueBool = pdata.NewValueBool - NewValueMap = pdata.NewValueMap - NewValueSlice = pdata.NewValueSlice - NewValueBytes = pdata.NewValueBytes + + // Deprecated: [v0.49.0] Use pcommon.NewValueBool instead. + NewValueBool = pdata.NewValueBool + + // Deprecated: [v0.49.0] Use pcommon.NewValueMap instead. + NewValueMap = pdata.NewValueMap + + // Deprecated: [v0.49.0] Use pcommon.NewValueSlice instead. + NewValueSlice = pdata.NewValueSlice + + // Deprecated: [v0.49.0] Use pcommon.NewValueBytes instead. + NewValueBytes = pdata.NewValueBytes // Deprecated: [v0.48.0] Use NewValueEmpty instead. NewAttributeValueEmpty = pdata.NewValueEmpty @@ -104,6 +137,7 @@ var ( ) // Map is an alias for pdata.Map struct. +// Deprecated: [v0.49.0] Use pcommon.Map instead. type Map = pdata.Map // Deprecated: [v0.48.0] Use Map instead. @@ -111,7 +145,11 @@ type AttributeMap = pdata.Map // Aliases for functions to create pdata.Map. var ( - NewMap = pdata.NewMap + + // Deprecated: [v0.49.0] Use pcommon.NewMap instead. + NewMap = pdata.NewMap + + // Deprecated: [v0.49.0] Use pcommon.NewMapFromRaw instead. NewMapFromRaw = pdata.NewMapFromRaw ) diff --git a/model/pdata/generated_common_alias.go b/model/pdata/generated_common_alias.go index 3d3e64a6878..38364940dcf 100644 --- a/model/pdata/generated_common_alias.go +++ b/model/pdata/generated_common_alias.go @@ -20,13 +20,17 @@ package pdata import "go.opentelemetry.io/collector/model/internal/pdata" // InstrumentationScope is an alias for pdata.InstrumentationScope struct. +// Deprecated: [v0.49.0] Use pcommon.InstrumentationScope instead. type InstrumentationScope = pdata.InstrumentationScope // NewInstrumentationScope is an alias for a function to create a new empty InstrumentationScope. +// Deprecated: [v0.49.0] Use pcommon.NewInstrumentationScope instead. var NewInstrumentationScope = pdata.NewInstrumentationScope // Slice is an alias for pdata.Slice struct. +// Deprecated: [v0.49.0] Use pcommon.Slice instead. type Slice = pdata.Slice // NewSlice is an alias for a function to create Slice. +// Deprecated: [v0.49.0] Use pcommon.NewSlice instead. var NewSlice = pdata.NewSlice diff --git a/model/pdata/generated_plog_alias.go b/model/pdata/generated_plog_alias.go new file mode 100644 index 00000000000..cd399038406 --- /dev/null +++ b/model/pdata/generated_plog_alias.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". + +package pdata + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// ResourceLogsSlice is an alias for pdata.ResourceLogsSlice struct. +// Deprecated: [v0.49.0] Use plog.ResourceLogsSlice instead. +type ResourceLogsSlice = pdata.ResourceLogsSlice + +// NewResourceLogsSlice is an alias for a function to create ResourceLogsSlice. +// Deprecated: [v0.49.0] Use plog.NewResourceLogsSlice instead. +var NewResourceLogsSlice = pdata.NewResourceLogsSlice + +// ResourceLogs is an alias for pdata.ResourceLogs struct. +// Deprecated: [v0.49.0] Use plog.ResourceLogs instead. +type ResourceLogs = pdata.ResourceLogs + +// NewResourceLogs is an alias for a function to create a new empty ResourceLogs. +// Deprecated: [v0.49.0] Use plog.NewResourceLogs instead. +var NewResourceLogs = pdata.NewResourceLogs + +// ScopeLogsSlice is an alias for pdata.ScopeLogsSlice struct. +// Deprecated: [v0.49.0] Use plog.ScopeLogsSlice instead. +type ScopeLogsSlice = pdata.ScopeLogsSlice + +// NewScopeLogsSlice is an alias for a function to create ScopeLogsSlice. +// Deprecated: [v0.49.0] Use plog.NewScopeLogsSlice instead. +var NewScopeLogsSlice = pdata.NewScopeLogsSlice + +// ScopeLogs is an alias for pdata.ScopeLogs struct. +// Deprecated: [v0.49.0] Use plog.ScopeLogs instead. +type ScopeLogs = pdata.ScopeLogs + +// NewScopeLogs is an alias for a function to create a new empty ScopeLogs. +// Deprecated: [v0.49.0] Use plog.NewScopeLogs instead. +var NewScopeLogs = pdata.NewScopeLogs + +// LogRecordSlice is an alias for pdata.LogRecordSlice struct. +// Deprecated: [v0.49.0] Use plog.LogRecordSlice instead. +type LogRecordSlice = pdata.LogRecordSlice + +// NewLogRecordSlice is an alias for a function to create LogRecordSlice. +// Deprecated: [v0.49.0] Use plog.NewLogRecordSlice instead. +var NewLogRecordSlice = pdata.NewLogRecordSlice + +// LogRecord is an alias for pdata.LogRecord struct. +// Deprecated: [v0.49.0] Use plog.LogRecord instead. +type LogRecord = pdata.LogRecord + +// NewLogRecord is an alias for a function to create a new empty LogRecord. +// Deprecated: [v0.49.0] Use plog.NewLogRecord instead. +var NewLogRecord = pdata.NewLogRecord diff --git a/model/pdata/generated_pmetric_alias.go b/model/pdata/generated_pmetric_alias.go new file mode 100644 index 00000000000..a8620d6de2c --- /dev/null +++ b/model/pdata/generated_pmetric_alias.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". + +package pdata + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// ResourceMetricsSlice is an alias for pdata.ResourceMetricsSlice struct. +// Deprecated: [v0.49.0] Use pmetric.ResourceMetricsSlice instead. +type ResourceMetricsSlice = pdata.ResourceMetricsSlice + +// NewResourceMetricsSlice is an alias for a function to create ResourceMetricsSlice. +// Deprecated: [v0.49.0] Use pmetric.NewResourceMetricsSlice instead. +var NewResourceMetricsSlice = pdata.NewResourceMetricsSlice + +// ResourceMetrics is an alias for pdata.ResourceMetrics struct. +// Deprecated: [v0.49.0] Use pmetric.ResourceMetrics instead. +type ResourceMetrics = pdata.ResourceMetrics + +// NewResourceMetrics is an alias for a function to create a new empty ResourceMetrics. +// Deprecated: [v0.49.0] Use pmetric.NewResourceMetrics instead. +var NewResourceMetrics = pdata.NewResourceMetrics + +// ScopeMetricsSlice is an alias for pdata.ScopeMetricsSlice struct. +// Deprecated: [v0.49.0] Use pmetric.ScopeMetricsSlice instead. +type ScopeMetricsSlice = pdata.ScopeMetricsSlice + +// NewScopeMetricsSlice is an alias for a function to create ScopeMetricsSlice. +// Deprecated: [v0.49.0] Use pmetric.NewScopeMetricsSlice instead. +var NewScopeMetricsSlice = pdata.NewScopeMetricsSlice + +// ScopeMetrics is an alias for pdata.ScopeMetrics struct. +// Deprecated: [v0.49.0] Use pmetric.ScopeMetrics instead. +type ScopeMetrics = pdata.ScopeMetrics + +// NewScopeMetrics is an alias for a function to create a new empty ScopeMetrics. +// Deprecated: [v0.49.0] Use pmetric.NewScopeMetrics instead. +var NewScopeMetrics = pdata.NewScopeMetrics + +// MetricSlice is an alias for pdata.MetricSlice struct. +// Deprecated: [v0.49.0] Use pmetric.MetricSlice instead. +type MetricSlice = pdata.MetricSlice + +// NewMetricSlice is an alias for a function to create MetricSlice. +// Deprecated: [v0.49.0] Use pmetric.NewMetricSlice instead. +var NewMetricSlice = pdata.NewMetricSlice + +// Metric is an alias for pdata.Metric struct. +// Deprecated: [v0.49.0] Use pmetric.Metric instead. +type Metric = pdata.Metric + +// NewMetric is an alias for a function to create a new empty Metric. +// Deprecated: [v0.49.0] Use pmetric.NewMetric instead. +var NewMetric = pdata.NewMetric + +// Gauge is an alias for pdata.Gauge struct. +// Deprecated: [v0.49.0] Use pmetric.Gauge instead. +type Gauge = pdata.Gauge + +// NewGauge is an alias for a function to create a new empty Gauge. +// Deprecated: [v0.49.0] Use pmetric.NewGauge instead. +var NewGauge = pdata.NewGauge + +// Sum is an alias for pdata.Sum struct. +// Deprecated: [v0.49.0] Use pmetric.Sum instead. +type Sum = pdata.Sum + +// NewSum is an alias for a function to create a new empty Sum. +// Deprecated: [v0.49.0] Use pmetric.NewSum instead. +var NewSum = pdata.NewSum + +// Histogram is an alias for pdata.Histogram struct. +// Deprecated: [v0.49.0] Use pmetric.Histogram instead. +type Histogram = pdata.Histogram + +// NewHistogram is an alias for a function to create a new empty Histogram. +// Deprecated: [v0.49.0] Use pmetric.NewHistogram instead. +var NewHistogram = pdata.NewHistogram + +// ExponentialHistogram is an alias for pdata.ExponentialHistogram struct. +// Deprecated: [v0.49.0] Use pmetric.ExponentialHistogram instead. +type ExponentialHistogram = pdata.ExponentialHistogram + +// NewExponentialHistogram is an alias for a function to create a new empty ExponentialHistogram. +// Deprecated: [v0.49.0] Use pmetric.NewExponentialHistogram instead. +var NewExponentialHistogram = pdata.NewExponentialHistogram + +// Summary is an alias for pdata.Summary struct. +// Deprecated: [v0.49.0] Use pmetric.Summary instead. +type Summary = pdata.Summary + +// NewSummary is an alias for a function to create a new empty Summary. +// Deprecated: [v0.49.0] Use pmetric.NewSummary instead. +var NewSummary = pdata.NewSummary + +// NumberDataPointSlice is an alias for pdata.NumberDataPointSlice struct. +// Deprecated: [v0.49.0] Use pmetric.NumberDataPointSlice instead. +type NumberDataPointSlice = pdata.NumberDataPointSlice + +// NewNumberDataPointSlice is an alias for a function to create NumberDataPointSlice. +// Deprecated: [v0.49.0] Use pmetric.NewNumberDataPointSlice instead. +var NewNumberDataPointSlice = pdata.NewNumberDataPointSlice + +// NumberDataPoint is an alias for pdata.NumberDataPoint struct. +// Deprecated: [v0.49.0] Use pmetric.NumberDataPoint instead. +type NumberDataPoint = pdata.NumberDataPoint + +// NewNumberDataPoint is an alias for a function to create a new empty NumberDataPoint. +// Deprecated: [v0.49.0] Use pmetric.NewNumberDataPoint instead. +var NewNumberDataPoint = pdata.NewNumberDataPoint + +// HistogramDataPointSlice is an alias for pdata.HistogramDataPointSlice struct. +// Deprecated: [v0.49.0] Use pmetric.HistogramDataPointSlice instead. +type HistogramDataPointSlice = pdata.HistogramDataPointSlice + +// NewHistogramDataPointSlice is an alias for a function to create HistogramDataPointSlice. +// Deprecated: [v0.49.0] Use pmetric.NewHistogramDataPointSlice instead. +var NewHistogramDataPointSlice = pdata.NewHistogramDataPointSlice + +// HistogramDataPoint is an alias for pdata.HistogramDataPoint struct. +// Deprecated: [v0.49.0] Use pmetric.HistogramDataPoint instead. +type HistogramDataPoint = pdata.HistogramDataPoint + +// NewHistogramDataPoint is an alias for a function to create a new empty HistogramDataPoint. +// Deprecated: [v0.49.0] Use pmetric.NewHistogramDataPoint instead. +var NewHistogramDataPoint = pdata.NewHistogramDataPoint + +// ExponentialHistogramDataPointSlice is an alias for pdata.ExponentialHistogramDataPointSlice struct. +// Deprecated: [v0.49.0] Use pmetric.ExponentialHistogramDataPointSlice instead. +type ExponentialHistogramDataPointSlice = pdata.ExponentialHistogramDataPointSlice + +// NewExponentialHistogramDataPointSlice is an alias for a function to create ExponentialHistogramDataPointSlice. +// Deprecated: [v0.49.0] Use pmetric.NewExponentialHistogramDataPointSlice instead. +var NewExponentialHistogramDataPointSlice = pdata.NewExponentialHistogramDataPointSlice + +// ExponentialHistogramDataPoint is an alias for pdata.ExponentialHistogramDataPoint struct. +// Deprecated: [v0.49.0] Use pmetric.ExponentialHistogramDataPoint instead. +type ExponentialHistogramDataPoint = pdata.ExponentialHistogramDataPoint + +// NewExponentialHistogramDataPoint is an alias for a function to create a new empty ExponentialHistogramDataPoint. +// Deprecated: [v0.49.0] Use pmetric.NewExponentialHistogramDataPoint instead. +var NewExponentialHistogramDataPoint = pdata.NewExponentialHistogramDataPoint + +// Buckets is an alias for pdata.Buckets struct. +// Deprecated: [v0.49.0] Use pmetric.Buckets instead. +type Buckets = pdata.Buckets + +// NewBuckets is an alias for a function to create a new empty Buckets. +// Deprecated: [v0.49.0] Use pmetric.NewBuckets instead. +var NewBuckets = pdata.NewBuckets + +// SummaryDataPointSlice is an alias for pdata.SummaryDataPointSlice struct. +// Deprecated: [v0.49.0] Use pmetric.SummaryDataPointSlice instead. +type SummaryDataPointSlice = pdata.SummaryDataPointSlice + +// NewSummaryDataPointSlice is an alias for a function to create SummaryDataPointSlice. +// Deprecated: [v0.49.0] Use pmetric.NewSummaryDataPointSlice instead. +var NewSummaryDataPointSlice = pdata.NewSummaryDataPointSlice + +// SummaryDataPoint is an alias for pdata.SummaryDataPoint struct. +// Deprecated: [v0.49.0] Use pmetric.SummaryDataPoint instead. +type SummaryDataPoint = pdata.SummaryDataPoint + +// NewSummaryDataPoint is an alias for a function to create a new empty SummaryDataPoint. +// Deprecated: [v0.49.0] Use pmetric.NewSummaryDataPoint instead. +var NewSummaryDataPoint = pdata.NewSummaryDataPoint + +// ValueAtQuantileSlice is an alias for pdata.ValueAtQuantileSlice struct. +// Deprecated: [v0.49.0] Use pmetric.ValueAtQuantileSlice instead. +type ValueAtQuantileSlice = pdata.ValueAtQuantileSlice + +// NewValueAtQuantileSlice is an alias for a function to create ValueAtQuantileSlice. +// Deprecated: [v0.49.0] Use pmetric.NewValueAtQuantileSlice instead. +var NewValueAtQuantileSlice = pdata.NewValueAtQuantileSlice + +// ValueAtQuantile is an alias for pdata.ValueAtQuantile struct. +// Deprecated: [v0.49.0] Use pmetric.ValueAtQuantile instead. +type ValueAtQuantile = pdata.ValueAtQuantile + +// NewValueAtQuantile is an alias for a function to create a new empty ValueAtQuantile. +// Deprecated: [v0.49.0] Use pmetric.NewValueAtQuantile instead. +var NewValueAtQuantile = pdata.NewValueAtQuantile + +// ExemplarSlice is an alias for pdata.ExemplarSlice struct. +// Deprecated: [v0.49.0] Use pmetric.ExemplarSlice instead. +type ExemplarSlice = pdata.ExemplarSlice + +// NewExemplarSlice is an alias for a function to create ExemplarSlice. +// Deprecated: [v0.49.0] Use pmetric.NewExemplarSlice instead. +var NewExemplarSlice = pdata.NewExemplarSlice + +// Exemplar is an alias for pdata.Exemplar struct. +// Deprecated: [v0.49.0] Use pmetric.Exemplar instead. +type Exemplar = pdata.Exemplar + +// NewExemplar is an alias for a function to create a new empty Exemplar. +// Deprecated: [v0.49.0] Use pmetric.NewExemplar instead. +var NewExemplar = pdata.NewExemplar diff --git a/model/pdata/generated_ptrace_alias.go b/model/pdata/generated_ptrace_alias.go new file mode 100644 index 00000000000..3503fbe1b87 --- /dev/null +++ b/model/pdata/generated_ptrace_alias.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". + +package pdata + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// ResourceSpansSlice is an alias for pdata.ResourceSpansSlice struct. +// Deprecated: [v0.49.0] Use ptrace.ResourceSpansSlice instead. +type ResourceSpansSlice = pdata.ResourceSpansSlice + +// NewResourceSpansSlice is an alias for a function to create ResourceSpansSlice. +// Deprecated: [v0.49.0] Use ptrace.NewResourceSpansSlice instead. +var NewResourceSpansSlice = pdata.NewResourceSpansSlice + +// ResourceSpans is an alias for pdata.ResourceSpans struct. +// Deprecated: [v0.49.0] Use ptrace.ResourceSpans instead. +type ResourceSpans = pdata.ResourceSpans + +// NewResourceSpans is an alias for a function to create a new empty ResourceSpans. +// Deprecated: [v0.49.0] Use ptrace.NewResourceSpans instead. +var NewResourceSpans = pdata.NewResourceSpans + +// ScopeSpansSlice is an alias for pdata.ScopeSpansSlice struct. +// Deprecated: [v0.49.0] Use ptrace.ScopeSpansSlice instead. +type ScopeSpansSlice = pdata.ScopeSpansSlice + +// NewScopeSpansSlice is an alias for a function to create ScopeSpansSlice. +// Deprecated: [v0.49.0] Use ptrace.NewScopeSpansSlice instead. +var NewScopeSpansSlice = pdata.NewScopeSpansSlice + +// ScopeSpans is an alias for pdata.ScopeSpans struct. +// Deprecated: [v0.49.0] Use ptrace.ScopeSpans instead. +type ScopeSpans = pdata.ScopeSpans + +// NewScopeSpans is an alias for a function to create a new empty ScopeSpans. +// Deprecated: [v0.49.0] Use ptrace.NewScopeSpans instead. +var NewScopeSpans = pdata.NewScopeSpans + +// SpanSlice is an alias for pdata.SpanSlice struct. +// Deprecated: [v0.49.0] Use ptrace.SpanSlice instead. +type SpanSlice = pdata.SpanSlice + +// NewSpanSlice is an alias for a function to create SpanSlice. +// Deprecated: [v0.49.0] Use ptrace.NewSpanSlice instead. +var NewSpanSlice = pdata.NewSpanSlice + +// Span is an alias for pdata.Span struct. +// Deprecated: [v0.49.0] Use ptrace.Span instead. +type Span = pdata.Span + +// NewSpan is an alias for a function to create a new empty Span. +// Deprecated: [v0.49.0] Use ptrace.NewSpan instead. +var NewSpan = pdata.NewSpan + +// SpanEventSlice is an alias for pdata.SpanEventSlice struct. +// Deprecated: [v0.49.0] Use ptrace.SpanEventSlice instead. +type SpanEventSlice = pdata.SpanEventSlice + +// NewSpanEventSlice is an alias for a function to create SpanEventSlice. +// Deprecated: [v0.49.0] Use ptrace.NewSpanEventSlice instead. +var NewSpanEventSlice = pdata.NewSpanEventSlice + +// SpanEvent is an alias for pdata.SpanEvent struct. +// Deprecated: [v0.49.0] Use ptrace.SpanEvent instead. +type SpanEvent = pdata.SpanEvent + +// NewSpanEvent is an alias for a function to create a new empty SpanEvent. +// Deprecated: [v0.49.0] Use ptrace.NewSpanEvent instead. +var NewSpanEvent = pdata.NewSpanEvent + +// SpanLinkSlice is an alias for pdata.SpanLinkSlice struct. +// Deprecated: [v0.49.0] Use ptrace.SpanLinkSlice instead. +type SpanLinkSlice = pdata.SpanLinkSlice + +// NewSpanLinkSlice is an alias for a function to create SpanLinkSlice. +// Deprecated: [v0.49.0] Use ptrace.NewSpanLinkSlice instead. +var NewSpanLinkSlice = pdata.NewSpanLinkSlice + +// SpanLink is an alias for pdata.SpanLink struct. +// Deprecated: [v0.49.0] Use ptrace.SpanLink instead. +type SpanLink = pdata.SpanLink + +// NewSpanLink is an alias for a function to create a new empty SpanLink. +// Deprecated: [v0.49.0] Use ptrace.NewSpanLink instead. +var NewSpanLink = pdata.NewSpanLink + +// SpanStatus is an alias for pdata.SpanStatus struct. +// Deprecated: [v0.49.0] Use ptrace.SpanStatus instead. +type SpanStatus = pdata.SpanStatus + +// NewSpanStatus is an alias for a function to create a new empty SpanStatus. +// Deprecated: [v0.49.0] Use ptrace.NewSpanStatus instead. +var NewSpanStatus = pdata.NewSpanStatus diff --git a/model/pdata/generated_resource_alias.go b/model/pdata/generated_resource_alias.go index 4c4d32a87b0..58c8a2d61a9 100644 --- a/model/pdata/generated_resource_alias.go +++ b/model/pdata/generated_resource_alias.go @@ -20,7 +20,9 @@ package pdata import "go.opentelemetry.io/collector/model/internal/pdata" // Resource is an alias for pdata.Resource struct. +// Deprecated: [v0.49.0] Use pcommon.Resource instead. type Resource = pdata.Resource // NewResource is an alias for a function to create a new empty Resource. +// Deprecated: [v0.49.0] Use pcommon.NewResource instead. var NewResource = pdata.NewResource diff --git a/model/pdata/logs_alias.go b/model/pdata/logs_alias.go index 878606b92ba..95b93128c16 100644 --- a/model/pdata/logs_alias.go +++ b/model/pdata/logs_alias.go @@ -21,49 +21,105 @@ import ( ) // LogsMarshaler is an alias for pdata.LogsMarshaler interface. +// Deprecated: [v0.49.0] Use plog.LogsMarshaler instead. type LogsMarshaler = pdata.LogsMarshaler // LogsUnmarshaler is an alias for pdata.LogsUnmarshaler interface. +// Deprecated: [v0.49.0] Use plog.LogsUnmarshaler instead. type LogsUnmarshaler = pdata.LogsUnmarshaler // LogsSizer is an alias for pdata.LogsSizer interface. +// Deprecated: [v0.49.0] Use plog.LogsSizer instead. type LogsSizer = pdata.LogsSizer // Logs is an alias for pdata.Logs struct. +// Deprecated: [v0.49.0] Use plog.Logs instead. type Logs = pdata.Logs // NewLogs is an alias for a function to create new Logs. +// Deprecated: [v0.49.0] Use plog.NewLogs instead. var NewLogs = pdata.NewLogs // SeverityNumber is an alias for pdata.SeverityNumber type. +// Deprecated: [v0.49.0] Use plog.SeverityNumber instead. type SeverityNumber = pdata.SeverityNumber const ( + + // Deprecated: [v0.49.0] Use plog.SeverityNumberUNDEFINED instead. SeverityNumberUNDEFINED = pdata.SeverityNumberUNDEFINED - SeverityNumberTRACE = pdata.SeverityNumberTRACE - SeverityNumberTRACE2 = pdata.SeverityNumberTRACE2 - SeverityNumberTRACE3 = pdata.SeverityNumberTRACE3 - SeverityNumberTRACE4 = pdata.SeverityNumberTRACE4 - SeverityNumberDEBUG = pdata.SeverityNumberDEBUG - SeverityNumberDEBUG2 = pdata.SeverityNumberDEBUG2 - SeverityNumberDEBUG3 = pdata.SeverityNumberDEBUG3 - SeverityNumberDEBUG4 = pdata.SeverityNumberDEBUG4 - SeverityNumberINFO = pdata.SeverityNumberINFO - SeverityNumberINFO2 = pdata.SeverityNumberINFO2 - SeverityNumberINFO3 = pdata.SeverityNumberINFO3 - SeverityNumberINFO4 = pdata.SeverityNumberINFO4 - SeverityNumberWARN = pdata.SeverityNumberWARN - SeverityNumberWARN2 = pdata.SeverityNumberWARN2 - SeverityNumberWARN3 = pdata.SeverityNumberWARN3 - SeverityNumberWARN4 = pdata.SeverityNumberWARN4 - SeverityNumberERROR = pdata.SeverityNumberERROR - SeverityNumberERROR2 = pdata.SeverityNumberERROR2 - SeverityNumberERROR3 = pdata.SeverityNumberERROR3 - SeverityNumberERROR4 = pdata.SeverityNumberERROR4 - SeverityNumberFATAL = pdata.SeverityNumberFATAL - SeverityNumberFATAL2 = pdata.SeverityNumberFATAL2 - SeverityNumberFATAL3 = pdata.SeverityNumberFATAL3 - SeverityNumberFATAL4 = pdata.SeverityNumberFATAL4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberTRACE instead. + SeverityNumberTRACE = pdata.SeverityNumberTRACE + + // Deprecated: [v0.49.0] Use plog.SeverityNumberTRACE2 instead. + SeverityNumberTRACE2 = pdata.SeverityNumberTRACE2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberTRACE3 instead. + SeverityNumberTRACE3 = pdata.SeverityNumberTRACE3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberTRACE4 instead. + SeverityNumberTRACE4 = pdata.SeverityNumberTRACE4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberDEBUG instead. + SeverityNumberDEBUG = pdata.SeverityNumberDEBUG + + // Deprecated: [v0.49.0] Use plog.SeverityNumberDEBUG2 instead. + SeverityNumberDEBUG2 = pdata.SeverityNumberDEBUG2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberDEBUG3 instead. + SeverityNumberDEBUG3 = pdata.SeverityNumberDEBUG3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberDEBUG4 instead. + SeverityNumberDEBUG4 = pdata.SeverityNumberDEBUG4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberINFO instead. + SeverityNumberINFO = pdata.SeverityNumberINFO + + // Deprecated: [v0.49.0] Use plog.SeverityNumberINFO2 instead. + SeverityNumberINFO2 = pdata.SeverityNumberINFO2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberINFO3 instead. + SeverityNumberINFO3 = pdata.SeverityNumberINFO3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberINFO4 instead. + SeverityNumberINFO4 = pdata.SeverityNumberINFO4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberWARN instead. + SeverityNumberWARN = pdata.SeverityNumberWARN + + // Deprecated: [v0.49.0] Use plog.SeverityNumberWARN2 instead. + SeverityNumberWARN2 = pdata.SeverityNumberWARN2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberWARN3 instead. + SeverityNumberWARN3 = pdata.SeverityNumberWARN3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberWARN4 instead. + SeverityNumberWARN4 = pdata.SeverityNumberWARN4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberERROR instead. + SeverityNumberERROR = pdata.SeverityNumberERROR + + // Deprecated: [v0.49.0] Use plog.SeverityNumberERROR2 instead. + SeverityNumberERROR2 = pdata.SeverityNumberERROR2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberERROR3 instead. + SeverityNumberERROR3 = pdata.SeverityNumberERROR3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberERROR4 instead. + SeverityNumberERROR4 = pdata.SeverityNumberERROR4 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberFATAL instead. + SeverityNumberFATAL = pdata.SeverityNumberFATAL + + // Deprecated: [v0.49.0] Use plog.SeverityNumberFATAL2 instead. + SeverityNumberFATAL2 = pdata.SeverityNumberFATAL2 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberFATAL3 instead. + SeverityNumberFATAL3 = pdata.SeverityNumberFATAL3 + + // Deprecated: [v0.49.0] Use plog.SeverityNumberFATAL4 instead. + SeverityNumberFATAL4 = pdata.SeverityNumberFATAL4 ) // Deprecated: [v0.48.0] Use ScopeLogsSlice instead. diff --git a/model/pdata/metrics_alias.go b/model/pdata/metrics_alias.go index 707d2147a3f..b460c9576ae 100644 --- a/model/pdata/metrics_alias.go +++ b/model/pdata/metrics_alias.go @@ -18,67 +18,105 @@ package pdata // import "go.opentelemetry.io/collector/model/pdata" import ( "go.opentelemetry.io/collector/model/internal/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) -// MetricsMarshaler is an alias for pdata.MetricsMarshaler interface. -type MetricsMarshaler = pdata.MetricsMarshaler +// MetricsMarshaler is an alias for pmetric.MetricsMarshaler interface. +// Deprecated: [v0.49.0] Use pmetric.MetricsMarshaler instead. +type MetricsMarshaler = pmetric.MetricsMarshaler -// MetricsUnmarshaler is an alias for pdata.MetricsUnmarshaler interface. -type MetricsUnmarshaler = pdata.MetricsUnmarshaler +// MetricsUnmarshaler is an alias for pmetric.MetricsUnmarshaler interface. +// Deprecated: [v0.49.0] Use pmetric.MetricsUnmarshaler instead. +type MetricsUnmarshaler = pmetric.MetricsUnmarshaler -// MetricsSizer is an alias for pdata.MetricsSizer interface. -type MetricsSizer = pdata.MetricsSizer +// MetricsSizer is an alias for pmetric.MetricsSizer interface. +// Deprecated: [v0.49.0] Use pmetric.MetricsSizer instead. +type MetricsSizer = pmetric.MetricsSizer -// Metrics is an alias for pdata.Metrics structure. -type Metrics = pdata.Metrics +// Metrics is an alias for pmetric.Metrics structure. +// Deprecated: [v0.49.0] Use pmetric.Metrics instead. +type Metrics = pmetric.Metrics // NewMetrics is an alias for a function to create new Metrics. +// Deprecated: [v0.49.0] Use pmetric.NewMetrics instead. var NewMetrics = pdata.NewMetrics // MetricDataType is an alias for pdata.MetricDataType type. +// Deprecated: [v0.49.0] Use pmetric.MetricDataType instead. type MetricDataType = pdata.MetricDataType const ( - MetricDataTypeNone = pdata.MetricDataTypeNone - MetricDataTypeGauge = pdata.MetricDataTypeGauge - MetricDataTypeSum = pdata.MetricDataTypeSum - MetricDataTypeHistogram = pdata.MetricDataTypeHistogram + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeNone instead. + MetricDataTypeNone = pdata.MetricDataTypeNone + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeGauge instead. + MetricDataTypeGauge = pdata.MetricDataTypeGauge + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeSum instead. + MetricDataTypeSum = pdata.MetricDataTypeSum + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeHistogram instead. + MetricDataTypeHistogram = pdata.MetricDataTypeHistogram + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeExponentialHistogram instead. MetricDataTypeExponentialHistogram = pdata.MetricDataTypeExponentialHistogram - MetricDataTypeSummary = pdata.MetricDataTypeSummary + + // Deprecated: [v0.49.0] Use pmetric.MetricDataTypeSummary instead. + MetricDataTypeSummary = pdata.MetricDataTypeSummary ) // MetricAggregationTemporality is an alias for pdata.MetricAggregationTemporality type. +// Deprecated: [v0.49.0] Use pmetric.MetricAggregationTemporality instead. type MetricAggregationTemporality = pdata.MetricAggregationTemporality const ( + + // Deprecated: [v0.49.0] Use pmetric.MetricAggregationTemporalityUnspecified instead. MetricAggregationTemporalityUnspecified = pdata.MetricAggregationTemporalityUnspecified - MetricAggregationTemporalityDelta = pdata.MetricAggregationTemporalityDelta - MetricAggregationTemporalityCumulative = pdata.MetricAggregationTemporalityCumulative + + // Deprecated: [v0.49.0] Use pmetric.MetricAggregationTemporalityDelta instead. + MetricAggregationTemporalityDelta = pdata.MetricAggregationTemporalityDelta + + // Deprecated: [v0.49.0] Use pmetric.MetricAggregationTemporalityCumulative instead. + MetricAggregationTemporalityCumulative = pdata.MetricAggregationTemporalityCumulative ) // MetricDataPointFlags is an alias for pdata.MetricDataPointFlags type. +// Deprecated: [v0.49.0] Use pmetric.MetricDataPointFlags instead. type MetricDataPointFlags = pdata.MetricDataPointFlags const ( + // Deprecated: [v0.49.0] Use pmetric.MetricDataPointFlagsNone instead. MetricDataPointFlagsNone = pdata.MetricDataPointFlagsNone ) // NewMetricDataPointFlags is an alias for a function to create new MetricDataPointFlags. +// Deprecated: [v0.49.0] Use pmetric.NewMetricDataPointFlags instead. var NewMetricDataPointFlags = pdata.NewMetricDataPointFlags // MetricDataPointFlag is an alias for pdata.MetricDataPointFlag type. +// Deprecated: [v0.49.0] Use pmetric.MetricDataPointFlag instead. type MetricDataPointFlag = pdata.MetricDataPointFlag const ( + // Deprecated: [v0.49.0] Use pmetric.MetricDataPointFlagNoRecordedValue instead. MetricDataPointFlagNoRecordedValue = pdata.MetricDataPointFlagNoRecordedValue ) // MetricValueType is an alias for pdata.MetricValueType type. +// Deprecated: [v0.49.0] Use pmetric.MetricValueType instead. type MetricValueType = pdata.MetricValueType const ( - MetricValueTypeNone = pdata.MetricValueTypeNone - MetricValueTypeInt = pdata.MetricValueTypeInt + + // Deprecated: [v0.49.0] Use pmetric.MetricValueTypeNone instead. + MetricValueTypeNone = pdata.MetricValueTypeNone + + // Deprecated: [v0.49.0] Use pmetric.MetricValueTypeInt instead. + MetricValueTypeInt = pdata.MetricValueTypeInt + + // Deprecated: [v0.49.0] Use pmetric.MetricValueTypeDouble instead. MetricValueTypeDouble = pdata.MetricValueTypeDouble ) diff --git a/model/pdata/spanid_alias.go b/model/pdata/spanid_alias.go index 1d6477c0ebd..d01f03ac9c7 100644 --- a/model/pdata/spanid_alias.go +++ b/model/pdata/spanid_alias.go @@ -17,10 +17,13 @@ package pdata // import "go.opentelemetry.io/collector/model/pdata" import "go.opentelemetry.io/collector/model/internal/pdata" // SpanID is an alias for pdata.SpanID struct. +// Deprecated: [v0.49.0] Use pcommon.SpanID instead. type SpanID = pdata.SpanID // InvalidSpanID is an alias for pdata.InvalidSpanID function. +// Deprecated: [v0.49.0] Use pcommon.InvalidSpanID instead. var InvalidSpanID = pdata.InvalidSpanID // NewSpanID is an alias for a function to create new SpanID. +// Deprecated: [v0.49.0] Use pcommon.NewSpanID instead. var NewSpanID = pdata.NewSpanID diff --git a/model/pdata/timestamp_alias.go b/model/pdata/timestamp_alias.go index 7719a2ff6a7..e4752fa3019 100644 --- a/model/pdata/timestamp_alias.go +++ b/model/pdata/timestamp_alias.go @@ -17,7 +17,9 @@ package pdata // import "go.opentelemetry.io/collector/model/pdata" import "go.opentelemetry.io/collector/model/internal/pdata" // Timestamp is a an alias for pdata.Timestamp. +// Deprecated: [v0.49.0] Use pcommon.Timestamp instead. type Timestamp = pdata.Timestamp // NewTimestampFromTime is an alias for pdata.NewTimestampFromTime function. +// Deprecated: [v0.49.0] Use pcommon.NewTimestampFromTime instead. var NewTimestampFromTime = pdata.NewTimestampFromTime diff --git a/model/pdata/traceid_alias.go b/model/pdata/traceid_alias.go index 4c655b3cf07..85d7db617cd 100644 --- a/model/pdata/traceid_alias.go +++ b/model/pdata/traceid_alias.go @@ -17,10 +17,13 @@ package pdata // import "go.opentelemetry.io/collector/model/pdata" import "go.opentelemetry.io/collector/model/internal/pdata" // TraceID is an alias for pdata.TraceID struct. +// Deprecated: [v0.49.0] Use pcommon.TraceID instead. type TraceID = pdata.TraceID // InvalidTraceID is an alias for pdata.InvalidTraceID function. +// Deprecated: [v0.49.0] Use pcommon.InvalidTraceID instead. var InvalidTraceID = pdata.InvalidTraceID // NewTraceID is an alias for a function to create new TraceID. +// Deprecated: [v0.49.0] Use pcommon.NewTraceID instead. var NewTraceID = pdata.NewTraceID diff --git a/model/pdata/traces_alias.go b/model/pdata/traces_alias.go index db34296db4e..105c67dec77 100644 --- a/model/pdata/traces_alias.go +++ b/model/pdata/traces_alias.go @@ -21,21 +21,27 @@ import ( ) // TracesMarshaler is an alias for pdata.TracesMarshaler interface. +// Deprecated: [v0.49.0] Use ptrace.TracesMarshaler instead. type TracesMarshaler = pdata.TracesMarshaler // TracesUnmarshaler is an alias for pdata.TracesUnmarshaler interface. +// Deprecated: [v0.49.0] Use ptrace.TracesUnmarshaler instead. type TracesUnmarshaler = pdata.TracesUnmarshaler // TracesSizer is an alias for pdata.TracesSizer interface. +// Deprecated: [v0.49.0] Use ptrace.TracesSizer instead. type TracesSizer = pdata.TracesSizer // Traces is an alias for pdata.Traces struct. +// Deprecated: [v0.49.0] Use ptrace.Traces instead. type Traces = pdata.Traces // NewTraces is an alias for a function to create new Traces. +// Deprecated: [v0.49.0] Use ptrace.NewTraces instead. var NewTraces = pdata.NewTraces // TraceState is an alias for pdata.TraceState type. +// Deprecated: [v0.49.0] Use ptrace.TraceState instead. type TraceState = pdata.TraceState const ( @@ -43,23 +49,43 @@ const ( ) // SpanKind is an alias for pdata.SpanKind type. +// Deprecated: [v0.49.0] Use ptrace.SpanKind instead. type SpanKind = pdata.SpanKind const ( + + // Deprecated: [v0.49.0] Use ptrace.SpanKindUnspecified instead. SpanKindUnspecified = pdata.SpanKindUnspecified - SpanKindInternal = pdata.SpanKindInternal - SpanKindServer = pdata.SpanKindServer - SpanKindClient = pdata.SpanKindClient - SpanKindProducer = pdata.SpanKindProducer - SpanKindConsumer = pdata.SpanKindConsumer + + // Deprecated: [v0.49.0] Use ptrace.SpanKindInternal instead. + SpanKindInternal = pdata.SpanKindInternal + + // Deprecated: [v0.49.0] Use ptrace.SpanKindServer instead. + SpanKindServer = pdata.SpanKindServer + + // Deprecated: [v0.49.0] Use ptrace.SpanKindClient instead. + SpanKindClient = pdata.SpanKindClient + + // Deprecated: [v0.49.0] Use ptrace.SpanKindProducer instead. + SpanKindProducer = pdata.SpanKindProducer + + // Deprecated: [v0.49.0] Use ptrace.SpanKindConsumer instead. + SpanKindConsumer = pdata.SpanKindConsumer ) // StatusCode is an alias for pdata.StatusCode type. +// Deprecated: [v0.49.0] Use ptrace.StatusCode instead. type StatusCode = pdata.StatusCode const ( + + // Deprecated: [v0.49.0] Use ptrace.StatusCodeUnset instead. StatusCodeUnset = pdata.StatusCodeUnset - StatusCodeOk = pdata.StatusCodeOk + + // Deprecated: [v0.49.0] Use ptrace.StatusCodeOk instead. + StatusCodeOk = pdata.StatusCodeOk + + // Deprecated: [v0.49.0] Use ptrace.StatusCodeError instead. StatusCodeError = pdata.StatusCodeError ) diff --git a/model/plog/alias.go b/model/plog/alias.go new file mode 100644 index 00000000000..ffc1c04945b --- /dev/null +++ b/model/plog/alias.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package plog // import "go.opentelemetry.io/collector/model/plog" + +// This file contains aliases for logs data structures. + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// LogsMarshaler is an alias for pdata.LogsMarshaler interface. +type LogsMarshaler = pdata.LogsMarshaler + +// LogsUnmarshaler is an alias for pdata.LogsUnmarshaler interface. +type LogsUnmarshaler = pdata.LogsUnmarshaler + +// LogsSizer is an alias for pdata.LogsSizer interface. +type LogsSizer = pdata.LogsSizer + +// Logs is an alias for pdata.Logs struct. +type Logs = pdata.Logs + +// New is an alias for a function to create new Logs. +var NewLogs = pdata.NewLogs + +// SeverityNumber is an alias for pdata.SeverityNumber type. +type SeverityNumber = pdata.SeverityNumber + +const ( + SeverityNumberUNDEFINED = pdata.SeverityNumberUNDEFINED + SeverityNumberTRACE = pdata.SeverityNumberTRACE + SeverityNumberTRACE2 = pdata.SeverityNumberTRACE2 + SeverityNumberTRACE3 = pdata.SeverityNumberTRACE3 + SeverityNumberTRACE4 = pdata.SeverityNumberTRACE4 + SeverityNumberDEBUG = pdata.SeverityNumberDEBUG + SeverityNumberDEBUG2 = pdata.SeverityNumberDEBUG2 + SeverityNumberDEBUG3 = pdata.SeverityNumberDEBUG3 + SeverityNumberDEBUG4 = pdata.SeverityNumberDEBUG4 + SeverityNumberINFO = pdata.SeverityNumberINFO + SeverityNumberINFO2 = pdata.SeverityNumberINFO2 + SeverityNumberINFO3 = pdata.SeverityNumberINFO3 + SeverityNumberINFO4 = pdata.SeverityNumberINFO4 + SeverityNumberWARN = pdata.SeverityNumberWARN + SeverityNumberWARN2 = pdata.SeverityNumberWARN2 + SeverityNumberWARN3 = pdata.SeverityNumberWARN3 + SeverityNumberWARN4 = pdata.SeverityNumberWARN4 + SeverityNumberERROR = pdata.SeverityNumberERROR + SeverityNumberERROR2 = pdata.SeverityNumberERROR2 + SeverityNumberERROR3 = pdata.SeverityNumberERROR3 + SeverityNumberERROR4 = pdata.SeverityNumberERROR4 + SeverityNumberFATAL = pdata.SeverityNumberFATAL + SeverityNumberFATAL2 = pdata.SeverityNumberFATAL2 + SeverityNumberFATAL3 = pdata.SeverityNumberFATAL3 + SeverityNumberFATAL4 = pdata.SeverityNumberFATAL4 +) diff --git a/model/pdata/generated_log_alias.go b/model/plog/generated_alias.go similarity index 99% rename from model/pdata/generated_log_alias.go rename to model/plog/generated_alias.go index bafbbff59d4..1eb9e84de66 100644 --- a/model/pdata/generated_log_alias.go +++ b/model/plog/generated_alias.go @@ -15,7 +15,7 @@ // Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". -package pdata +package plog import "go.opentelemetry.io/collector/model/internal/pdata" diff --git a/model/pmetric/alias.go b/model/pmetric/alias.go new file mode 100644 index 00000000000..4a9a7a36c05 --- /dev/null +++ b/model/pmetric/alias.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pmetric // import "go.opentelemetry.io/collector/model/pmetric" + +// This file contains aliases for metric data structures. + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// MetricsMarshaler is an alias for pdata.MetricsMarshaler interface. +type MetricsMarshaler = pdata.MetricsMarshaler + +// MetricsUnmarshaler is an alias for pdata.MetricsUnmarshaler interface. +type MetricsUnmarshaler = pdata.MetricsUnmarshaler + +// MetricsSizer is an alias for pdata.MetricsSizer interface. +type MetricsSizer = pdata.MetricsSizer + +// Metrics is an alias for pdata.Metrics structure. +type Metrics = pdata.Metrics + +// NewMetrics is an alias for a function to create new Metrics. +var NewMetrics = pdata.NewMetrics + +// MetricDataType is an alias for pdata.MetricDataType type. +type MetricDataType = pdata.MetricDataType + +const ( + MetricDataTypeNone = pdata.MetricDataTypeNone + MetricDataTypeGauge = pdata.MetricDataTypeGauge + MetricDataTypeSum = pdata.MetricDataTypeSum + MetricDataTypeHistogram = pdata.MetricDataTypeHistogram + MetricDataTypeExponentialHistogram = pdata.MetricDataTypeExponentialHistogram + MetricDataTypeSummary = pdata.MetricDataTypeSummary +) + +// MetricAggregationTemporality is an alias for pdata.MetricAggregationTemporality type. +type MetricAggregationTemporality = pdata.MetricAggregationTemporality + +const ( + MetricAggregationTemporalityUnspecified = pdata.MetricAggregationTemporalityUnspecified + MetricAggregationTemporalityDelta = pdata.MetricAggregationTemporalityDelta + MetricAggregationTemporalityCumulative = pdata.MetricAggregationTemporalityCumulative +) + +// MetricDataPointFlags is an alias for pdata.MetricDataPointFlags type. +type MetricDataPointFlags = pdata.MetricDataPointFlags + +const ( + MetricDataPointFlagsNone = pdata.MetricDataPointFlagsNone +) + +// NewMetricDataPointFlags is an alias for a function to create new MetricDataPointFlags. +var NewMetricDataPointFlags = pdata.NewMetricDataPointFlags + +// MetricDataPointFlag is an alias for pdata.MetricDataPointFlag type. +type MetricDataPointFlag = pdata.MetricDataPointFlag + +const ( + MetricDataPointFlagNoRecordedValue = pdata.MetricDataPointFlagNoRecordedValue +) + +// MetricValueType is an alias for pdata.MetricValueType type. +type MetricValueType = pdata.MetricValueType + +const ( + MetricValueTypeNone = pdata.MetricValueTypeNone + MetricValueTypeInt = pdata.MetricValueTypeInt + MetricValueTypeDouble = pdata.MetricValueTypeDouble +) diff --git a/model/pdata/generated_metrics_alias.go b/model/pmetric/generated_alias.go similarity index 99% rename from model/pdata/generated_metrics_alias.go rename to model/pmetric/generated_alias.go index 282dcc9a22f..73cc75da6fe 100644 --- a/model/pdata/generated_metrics_alias.go +++ b/model/pmetric/generated_alias.go @@ -15,7 +15,7 @@ // Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". -package pdata +package pmetric import "go.opentelemetry.io/collector/model/internal/pdata" diff --git a/model/ptrace/alias.go b/model/ptrace/alias.go new file mode 100644 index 00000000000..fd29f02770b --- /dev/null +++ b/model/ptrace/alias.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ptrace // import "go.opentelemetry.io/collector/model/ptrace" + +// This file contains aliases for trace data structures. + +import "go.opentelemetry.io/collector/model/internal/pdata" + +// TracesMarshaler is an alias for pdata.TracesMarshaler interface. +type TracesMarshaler = pdata.TracesMarshaler + +// TracesUnmarshaler is an alias for pdata.TracesUnmarshaler interface. +type TracesUnmarshaler = pdata.TracesUnmarshaler + +// TracesSizer is an alias for pdata.TracesSizer interface. +type TracesSizer = pdata.TracesSizer + +// Traces is an alias for pdata.Traces struct. +type Traces = pdata.Traces + +// NewTraces is an alias for a function to create new Traces. +var NewTraces = pdata.NewTraces + +// TraceState is an alias for pdata.TraceState type. +type TraceState = pdata.TraceState + +const ( + TraceStateEmpty = pdata.TraceStateEmpty +) + +// SpanKind is an alias for pdata.SpanKind type. +type SpanKind = pdata.SpanKind + +const ( + SpanKindUnspecified = pdata.SpanKindUnspecified + SpanKindInternal = pdata.SpanKindInternal + SpanKindServer = pdata.SpanKindServer + SpanKindClient = pdata.SpanKindClient + SpanKindProducer = pdata.SpanKindProducer + SpanKindConsumer = pdata.SpanKindConsumer +) + +// StatusCode is an alias for pdata.StatusCode type. +type StatusCode = pdata.StatusCode + +const ( + StatusCodeUnset = pdata.StatusCodeUnset + StatusCodeOk = pdata.StatusCodeOk + StatusCodeError = pdata.StatusCodeError +) diff --git a/model/pdata/generated_trace_alias.go b/model/ptrace/generated_alias.go similarity index 99% rename from model/pdata/generated_trace_alias.go rename to model/ptrace/generated_alias.go index 4b3bf5d399b..c96c5db5163 100644 --- a/model/pdata/generated_trace_alias.go +++ b/model/ptrace/generated_alias.go @@ -15,7 +15,7 @@ // Code generated by "model/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "go run model/internal/cmd/pdatagen/main.go". -package pdata +package ptrace import "go.opentelemetry.io/collector/model/internal/pdata" diff --git a/processor/batchprocessor/batch_processor.go b/processor/batchprocessor/batch_processor.go index c7974f08bdd..72ff4fe2c60 100644 --- a/processor/batchprocessor/batch_processor.go +++ b/processor/batchprocessor/batch_processor.go @@ -28,7 +28,9 @@ import ( "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) // batch_processor is a component that accepts spans and metrics, places them @@ -187,20 +189,20 @@ func (bp *batchProcessor) sendItems(triggerMeasure *stats.Int64Measure) { } // ConsumeTraces implements TracesProcessor -func (bp *batchProcessor) ConsumeTraces(_ context.Context, td pdata.Traces) error { +func (bp *batchProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error { bp.newItem <- td return nil } // ConsumeMetrics implements MetricsProcessor -func (bp *batchProcessor) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (bp *batchProcessor) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { // First thing is convert into a different internal format bp.newItem <- md return nil } // ConsumeLogs implements LogsProcessor -func (bp *batchProcessor) ConsumeLogs(_ context.Context, ld pdata.Logs) error { +func (bp *batchProcessor) ConsumeLogs(_ context.Context, ld plog.Logs) error { bp.newItem <- ld return nil } @@ -222,18 +224,18 @@ func newBatchLogsProcessor(set component.ProcessorCreateSettings, next consumer. type batchTraces struct { nextConsumer consumer.Traces - traceData pdata.Traces + traceData ptrace.Traces spanCount int - sizer pdata.TracesSizer + sizer ptrace.TracesSizer } func newBatchTraces(nextConsumer consumer.Traces) *batchTraces { - return &batchTraces{nextConsumer: nextConsumer, traceData: pdata.NewTraces(), sizer: otlp.NewProtobufTracesMarshaler().(pdata.TracesSizer)} + return &batchTraces{nextConsumer: nextConsumer, traceData: ptrace.NewTraces(), sizer: otlp.NewProtobufTracesMarshaler().(ptrace.TracesSizer)} } // add updates current batchTraces by adding new TraceData object func (bt *batchTraces) add(item interface{}) { - td := item.(pdata.Traces) + td := item.(ptrace.Traces) newSpanCount := td.SpanCount() if newSpanCount == 0 { return @@ -244,13 +246,13 @@ func (bt *batchTraces) add(item interface{}) { } func (bt *batchTraces) export(ctx context.Context, sendBatchMaxSize int) error { - var req pdata.Traces + var req ptrace.Traces if sendBatchMaxSize > 0 && bt.itemCount() > sendBatchMaxSize { req = splitTraces(sendBatchMaxSize, bt.traceData) bt.spanCount -= sendBatchMaxSize } else { req = bt.traceData - bt.traceData = pdata.NewTraces() + bt.traceData = ptrace.NewTraces() bt.spanCount = 0 } return bt.nextConsumer.ConsumeTraces(ctx, req) @@ -266,23 +268,23 @@ func (bt *batchTraces) size() int { type batchMetrics struct { nextConsumer consumer.Metrics - metricData pdata.Metrics + metricData pmetric.Metrics dataPointCount int - sizer pdata.MetricsSizer + sizer pmetric.MetricsSizer } func newBatchMetrics(nextConsumer consumer.Metrics) *batchMetrics { - return &batchMetrics{nextConsumer: nextConsumer, metricData: pdata.NewMetrics(), sizer: otlp.NewProtobufMetricsMarshaler().(pdata.MetricsSizer)} + return &batchMetrics{nextConsumer: nextConsumer, metricData: pmetric.NewMetrics(), sizer: otlp.NewProtobufMetricsMarshaler().(pmetric.MetricsSizer)} } func (bm *batchMetrics) export(ctx context.Context, sendBatchMaxSize int) error { - var req pdata.Metrics + var req pmetric.Metrics if sendBatchMaxSize > 0 && bm.dataPointCount > sendBatchMaxSize { req = splitMetrics(sendBatchMaxSize, bm.metricData) bm.dataPointCount -= sendBatchMaxSize } else { req = bm.metricData - bm.metricData = pdata.NewMetrics() + bm.metricData = pmetric.NewMetrics() bm.dataPointCount = 0 } return bm.nextConsumer.ConsumeMetrics(ctx, req) @@ -297,7 +299,7 @@ func (bm *batchMetrics) size() int { } func (bm *batchMetrics) add(item interface{}) { - md := item.(pdata.Metrics) + md := item.(pmetric.Metrics) newDataPointCount := md.DataPointCount() if newDataPointCount == 0 { @@ -309,23 +311,23 @@ func (bm *batchMetrics) add(item interface{}) { type batchLogs struct { nextConsumer consumer.Logs - logData pdata.Logs + logData plog.Logs logCount int - sizer pdata.LogsSizer + sizer plog.LogsSizer } func newBatchLogs(nextConsumer consumer.Logs) *batchLogs { - return &batchLogs{nextConsumer: nextConsumer, logData: pdata.NewLogs(), sizer: otlp.NewProtobufLogsMarshaler().(pdata.LogsSizer)} + return &batchLogs{nextConsumer: nextConsumer, logData: plog.NewLogs(), sizer: otlp.NewProtobufLogsMarshaler().(plog.LogsSizer)} } func (bl *batchLogs) export(ctx context.Context, sendBatchMaxSize int) error { - var req pdata.Logs + var req plog.Logs if sendBatchMaxSize > 0 && bl.logCount > sendBatchMaxSize { req = splitLogs(sendBatchMaxSize, bl.logData) bl.logCount -= sendBatchMaxSize } else { req = bl.logData - bl.logData = pdata.NewLogs() + bl.logData = plog.NewLogs() bl.logCount = 0 } return bl.nextConsumer.ConsumeLogs(ctx, req) @@ -340,7 +342,7 @@ func (bl *batchLogs) size() int { } func (bl *batchLogs) add(item interface{}) { - ld := item.(pdata.Logs) + ld := item.(plog.Logs) newLogsCount := ld.LogRecordCount() if newLogsCount == 0 { diff --git a/processor/batchprocessor/batch_processor_test.go b/processor/batchprocessor/batch_processor_test.go index 0b23cb7bff8..612e73e126d 100644 --- a/processor/batchprocessor/batch_processor_test.go +++ b/processor/batchprocessor/batch_processor_test.go @@ -32,7 +32,9 @@ import ( "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/internal/testdata" "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" ) func TestBatchProcessorSpansDelivered(t *testing.T) { @@ -46,7 +48,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) { requestCount := 1000 spansPerRequest := 100 - traceDataSlice := make([]pdata.Traces, 0, requestCount) + traceDataSlice := make([]ptrace.Traces, 0, requestCount) for requestNum := 0; requestNum < requestCount; requestNum++ { td := testdata.GenerateTracesManySpansSameResource(spansPerRequest) spans := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans() @@ -58,7 +60,7 @@ func TestBatchProcessorSpansDelivered(t *testing.T) { } // Added to test logic that check for empty resources. - td := pdata.NewTraces() + td := ptrace.NewTraces() assert.NoError(t, batcher.ConsumeTraces(context.Background(), td)) require.NoError(t, batcher.Shutdown(context.Background())) @@ -98,7 +100,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { } // Added to test logic that check for empty resources. - td := pdata.NewTraces() + td := ptrace.NewTraces() require.NoError(t, batcher.ConsumeTraces(context.Background(), td)) // wait for all spans to be reported @@ -120,7 +122,7 @@ func TestBatchProcessorSpansDeliveredEnforceBatchSize(t *testing.T) { } func TestBatchProcessorSentBySize(t *testing.T) { - sizer := otlp.NewProtobufTracesMarshaler().(pdata.TracesSizer) + sizer := otlp.NewProtobufTracesMarshaler().(ptrace.TracesSizer) views := MetricViews() require.NoError(t, view.Register(views...)) defer view.Unregister(views...) @@ -276,7 +278,7 @@ func TestBatchMetricProcessor_ReceivingData(t *testing.T) { require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) - metricDataSlice := make([]pdata.Metrics, 0, requestCount) + metricDataSlice := make([]pmetric.Metrics, 0, requestCount) for requestNum := 0; requestNum < requestCount; requestNum++ { md := testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest) @@ -289,7 +291,7 @@ func TestBatchMetricProcessor_ReceivingData(t *testing.T) { } // Added to test case with empty resources sent. - md := pdata.NewMetrics() + md := pmetric.NewMetrics() assert.NoError(t, batcher.ConsumeMetrics(context.Background(), md)) require.NoError(t, batcher.Shutdown(context.Background())) @@ -308,7 +310,7 @@ func TestBatchMetricProcessor_ReceivingData(t *testing.T) { } func TestBatchMetricProcessor_BatchSize(t *testing.T) { - sizer := otlp.NewProtobufMetricsMarshaler().(pdata.MetricsSizer) + sizer := otlp.NewProtobufMetricsMarshaler().(pmetric.MetricsSizer) views := MetricViews() require.NoError(t, view.Register(views...)) defer view.Unregister(views...) @@ -470,8 +472,8 @@ func getTestSpanName(requestNum, index int) string { return fmt.Sprintf("test-span-%d-%d", requestNum, index) } -func spansReceivedByName(tds []pdata.Traces) map[string]pdata.Span { - spansReceivedByName := map[string]pdata.Span{} +func spansReceivedByName(tds []ptrace.Traces) map[string]ptrace.Span { + spansReceivedByName := map[string]ptrace.Span{} for i := range tds { rss := tds[i].ResourceSpans() for i := 0; i < rss.Len(); i++ { @@ -488,8 +490,8 @@ func spansReceivedByName(tds []pdata.Traces) map[string]pdata.Span { return spansReceivedByName } -func metricsReceivedByName(mds []pdata.Metrics) map[string]pdata.Metric { - metricsReceivedByName := map[string]pdata.Metric{} +func metricsReceivedByName(mds []pmetric.Metrics) map[string]pmetric.Metric { + metricsReceivedByName := map[string]pmetric.Metric{} for _, md := range mds { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { @@ -511,7 +513,7 @@ func getTestMetricName(requestNum, index int) string { } func BenchmarkTraceSizeBytes(b *testing.B) { - sizer := otlp.NewProtobufTracesMarshaler().(pdata.TracesSizer) + sizer := otlp.NewProtobufTracesMarshaler().(ptrace.TracesSizer) td := testdata.GenerateTracesManySpansSameResource(8192) for n := 0; n < b.N; n++ { fmt.Println(sizer.TracesSize(td)) @@ -541,7 +543,7 @@ func BenchmarkBatchMetricProcessor(b *testing.B) { require.NoError(b, err) require.NoError(b, batcher.Start(ctx, componenttest.NewNopHost())) - mds := make([]pdata.Metrics, 0, b.N) + mds := make([]pmetric.Metrics, 0, b.N) for n := 0; n < b.N; n++ { mds = append(mds, testdata.GenerateMetricsManyMetricsSameResource(metricsPerRequest), @@ -567,7 +569,7 @@ func (sme *metricsSink) Capabilities() consumer.Capabilities { } } -func (sme *metricsSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (sme *metricsSink) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { sme.mu.Lock() defer sme.mu.Unlock() sme.metricsCount += md.MetricCount() @@ -592,7 +594,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { require.NoError(t, err) require.NoError(t, batcher.Start(context.Background(), componenttest.NewNopHost())) - logDataSlice := make([]pdata.Logs, 0, requestCount) + logDataSlice := make([]plog.Logs, 0, requestCount) for requestNum := 0; requestNum < requestCount; requestNum++ { ld := testdata.GenerateLogsManyLogRecordsSameResource(logsPerRequest) @@ -605,7 +607,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { } // Added to test case with empty resources sent. - ld := pdata.NewLogs() + ld := plog.NewLogs() assert.NoError(t, batcher.ConsumeLogs(context.Background(), ld)) require.NoError(t, batcher.Shutdown(context.Background())) @@ -624,7 +626,7 @@ func TestBatchLogProcessor_ReceivingData(t *testing.T) { } func TestBatchLogProcessor_BatchSize(t *testing.T) { - sizer := otlp.NewProtobufLogsMarshaler().(pdata.LogsSizer) + sizer := otlp.NewProtobufLogsMarshaler().(plog.LogsSizer) views := MetricViews() require.NoError(t, view.Register(views...)) defer view.Unregister(views...) @@ -767,8 +769,8 @@ func getTestLogSeverityText(requestNum, index int) string { return fmt.Sprintf("test-log-int-%d-%d", requestNum, index) } -func logsReceivedBySeverityText(lds []pdata.Logs) map[string]pdata.LogRecord { - logsReceivedBySeverityText := map[string]pdata.LogRecord{} +func logsReceivedBySeverityText(lds []plog.Logs) map[string]plog.LogRecord { + logsReceivedBySeverityText := map[string]plog.LogRecord{} for i := range lds { ld := lds[i] rms := ld.ResourceLogs() diff --git a/processor/batchprocessor/splitlogs.go b/processor/batchprocessor/splitlogs.go index df40efec179..f31ef982b1c 100644 --- a/processor/batchprocessor/splitlogs.go +++ b/processor/batchprocessor/splitlogs.go @@ -15,18 +15,18 @@ package batchprocessor // import "go.opentelemetry.io/collector/processor/batchprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) // splitLogs removes logrecords from the input data and returns a new data of the specified size. -func splitLogs(size int, src pdata.Logs) pdata.Logs { +func splitLogs(size int, src plog.Logs) plog.Logs { if src.LogRecordCount() <= size { return src } totalCopiedLogRecords := 0 - dest := pdata.NewLogs() + dest := plog.NewLogs() - src.ResourceLogs().RemoveIf(func(srcRl pdata.ResourceLogs) bool { + src.ResourceLogs().RemoveIf(func(srcRl plog.ResourceLogs) bool { // If we are done skip everything else. if totalCopiedLogRecords == size { return false @@ -42,7 +42,7 @@ func splitLogs(size int, src pdata.Logs) pdata.Logs { destRl := dest.ResourceLogs().AppendEmpty() srcRl.Resource().CopyTo(destRl.Resource()) - srcRl.ScopeLogs().RemoveIf(func(srcIll pdata.ScopeLogs) bool { + srcRl.ScopeLogs().RemoveIf(func(srcIll plog.ScopeLogs) bool { // If we are done skip everything else. if totalCopiedLogRecords == size { return false @@ -58,7 +58,7 @@ func splitLogs(size int, src pdata.Logs) pdata.Logs { destIll := destRl.ScopeLogs().AppendEmpty() srcIll.Scope().CopyTo(destIll.Scope()) - srcIll.LogRecords().RemoveIf(func(srcMetric pdata.LogRecord) bool { + srcIll.LogRecords().RemoveIf(func(srcMetric plog.LogRecord) bool { // If we are done skip everything else. if totalCopiedLogRecords == size { return false @@ -75,8 +75,8 @@ func splitLogs(size int, src pdata.Logs) pdata.Logs { return dest } -// resourceLRC calculates the total number of log records in the pdata.ResourceLogs. -func resourceLRC(rs pdata.ResourceLogs) (count int) { +// resourceLRC calculates the total number of log records in the plog.ResourceLogs. +func resourceLRC(rs plog.ResourceLogs) (count int) { for k := 0; k < rs.ScopeLogs().Len(); k++ { count += rs.ScopeLogs().At(k).LogRecords().Len() } diff --git a/processor/batchprocessor/splitlogs_test.go b/processor/batchprocessor/splitlogs_test.go index 76d874932f2..212e05d7f61 100644 --- a/processor/batchprocessor/splitlogs_test.go +++ b/processor/batchprocessor/splitlogs_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) func TestSplitLogs_noop(t *testing.T) { @@ -30,7 +30,7 @@ func TestSplitLogs_noop(t *testing.T) { assert.Equal(t, td, split) i := 0 - td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().RemoveIf(func(_ pdata.LogRecord) bool { + td.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().RemoveIf(func(_ plog.LogRecord) bool { i++ return i > 5 }) @@ -43,7 +43,7 @@ func TestSplitLogs(t *testing.T) { for i := 0; i < logs.Len(); i++ { logs.At(i).SetSeverityText(getTestLogSeverityText(0, i)) } - cp := pdata.NewLogs() + cp := plog.NewLogs() cpLogs := cp.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() cpLogs.EnsureCapacity(5) ld.ResourceLogs().At(0).Resource().CopyTo( @@ -158,7 +158,7 @@ func TestSplitLogsMultipleILL(t *testing.T) { } func BenchmarkSplitLogs(b *testing.B) { - md := pdata.NewLogs() + md := plog.NewLogs() rms := md.ResourceLogs() for i := 0; i < 20; i++ { testdata.GenerateLogsManyLogRecordsSameResource(20).ResourceLogs().MoveAndAppendTo(md.ResourceLogs()) @@ -172,7 +172,7 @@ func BenchmarkSplitLogs(b *testing.B) { b.Skipf("SKIP: b.N too high, set -benchtime=x with n < 100000") } - clones := make([]pdata.Logs, b.N) + clones := make([]plog.Logs, b.N) for n := 0; n < b.N; n++ { clones[n] = md.Clone() } diff --git a/processor/batchprocessor/splitmetrics.go b/processor/batchprocessor/splitmetrics.go index f1c188984d0..6c75732cd4c 100644 --- a/processor/batchprocessor/splitmetrics.go +++ b/processor/batchprocessor/splitmetrics.go @@ -15,19 +15,19 @@ package batchprocessor // import "go.opentelemetry.io/collector/processor/batchprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) // splitMetrics removes metrics from the input data and returns a new data of the specified size. -func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { +func splitMetrics(size int, src pmetric.Metrics) pmetric.Metrics { dataPoints := src.DataPointCount() if dataPoints <= size { return src } totalCopiedDataPoints := 0 - dest := pdata.NewMetrics() + dest := pmetric.NewMetrics() - src.ResourceMetrics().RemoveIf(func(srcRs pdata.ResourceMetrics) bool { + src.ResourceMetrics().RemoveIf(func(srcRs pmetric.ResourceMetrics) bool { // If we are done skip everything else. if totalCopiedDataPoints == size { return false @@ -43,7 +43,7 @@ func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { destRs := dest.ResourceMetrics().AppendEmpty() srcRs.Resource().CopyTo(destRs.Resource()) - srcRs.ScopeMetrics().RemoveIf(func(srcIlm pdata.ScopeMetrics) bool { + srcRs.ScopeMetrics().RemoveIf(func(srcIlm pmetric.ScopeMetrics) bool { // If we are done skip everything else. if totalCopiedDataPoints == size { return false @@ -59,7 +59,7 @@ func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { destIlm := destRs.ScopeMetrics().AppendEmpty() srcIlm.Scope().CopyTo(destIlm.Scope()) - srcIlm.Metrics().RemoveIf(func(srcMetric pdata.Metric) bool { + srcIlm.Metrics().RemoveIf(func(srcMetric pmetric.Metric) bool { // If we are done skip everything else. if totalCopiedDataPoints == size { return false @@ -86,8 +86,8 @@ func splitMetrics(size int, src pdata.Metrics) pdata.Metrics { return dest } -// resourceMetricsDPC calculates the total number of data points in the pdata.ResourceMetrics. -func resourceMetricsDPC(rs pdata.ResourceMetrics) int { +// resourceMetricsDPC calculates the total number of data points in the pmetric.ResourceMetrics. +func resourceMetricsDPC(rs pmetric.ResourceMetrics) int { dataPointCount := 0 ilms := rs.ScopeMetrics() for k := 0; k < ilms.Len(); k++ { @@ -96,8 +96,8 @@ func resourceMetricsDPC(rs pdata.ResourceMetrics) int { return dataPointCount } -// scopeMetricsDPC calculates the total number of data points in the pdata.ScopeMetrics. -func scopeMetricsDPC(ilm pdata.ScopeMetrics) int { +// scopeMetricsDPC calculates the total number of data points in the pmetric.ScopeMetrics. +func scopeMetricsDPC(ilm pmetric.ScopeMetrics) int { dataPointCount := 0 ms := ilm.Metrics() for k := 0; k < ms.Len(); k++ { @@ -106,18 +106,18 @@ func scopeMetricsDPC(ilm pdata.ScopeMetrics) int { return dataPointCount } -// metricDPC calculates the total number of data points in the pdata.Metric. -func metricDPC(ms pdata.Metric) int { +// metricDPC calculates the total number of data points in the pmetric.Metric. +func metricDPC(ms pmetric.Metric) int { switch ms.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return ms.Gauge().DataPoints().Len() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return ms.Sum().DataPoints().Len() - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return ms.Histogram().DataPoints().Len() - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: return ms.ExponentialHistogram().DataPoints().Len() - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return ms.Summary().DataPoints().Len() } return 0 @@ -125,35 +125,35 @@ func metricDPC(ms pdata.Metric) int { // splitMetric removes metric points from the input data and moves data of the specified size to destination. // Returns size of moved data and boolean describing, whether the metric should be removed from original slice. -func splitMetric(ms, dest pdata.Metric, size int) (int, bool) { +func splitMetric(ms, dest pmetric.Metric, size int) (int, bool) { dest.SetDataType(ms.DataType()) dest.SetName(ms.Name()) dest.SetDescription(ms.Description()) dest.SetUnit(ms.Unit()) switch ms.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return splitNumberDataPoints(ms.Gauge().DataPoints(), dest.Gauge().DataPoints(), size) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dest.Sum().SetAggregationTemporality(ms.Sum().AggregationTemporality()) dest.Sum().SetIsMonotonic(ms.Sum().IsMonotonic()) return splitNumberDataPoints(ms.Sum().DataPoints(), dest.Sum().DataPoints(), size) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dest.Histogram().SetAggregationTemporality(ms.Histogram().AggregationTemporality()) return splitHistogramDataPoints(ms.Histogram().DataPoints(), dest.Histogram().DataPoints(), size) - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: dest.ExponentialHistogram().SetAggregationTemporality(ms.ExponentialHistogram().AggregationTemporality()) return splitExponentialHistogramDataPoints(ms.ExponentialHistogram().DataPoints(), dest.ExponentialHistogram().DataPoints(), size) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return splitSummaryDataPoints(ms.Summary().DataPoints(), dest.Summary().DataPoints(), size) } return size, false } -func splitNumberDataPoints(src, dst pdata.NumberDataPointSlice, size int) (int, bool) { +func splitNumberDataPoints(src, dst pmetric.NumberDataPointSlice, size int) (int, bool) { dst.EnsureCapacity(size) i := 0 - src.RemoveIf(func(dp pdata.NumberDataPoint) bool { + src.RemoveIf(func(dp pmetric.NumberDataPoint) bool { if i < size { dp.MoveTo(dst.AppendEmpty()) i++ @@ -164,10 +164,10 @@ func splitNumberDataPoints(src, dst pdata.NumberDataPointSlice, size int) (int, return size, false } -func splitHistogramDataPoints(src, dst pdata.HistogramDataPointSlice, size int) (int, bool) { +func splitHistogramDataPoints(src, dst pmetric.HistogramDataPointSlice, size int) (int, bool) { dst.EnsureCapacity(size) i := 0 - src.RemoveIf(func(dp pdata.HistogramDataPoint) bool { + src.RemoveIf(func(dp pmetric.HistogramDataPoint) bool { if i < size { dp.MoveTo(dst.AppendEmpty()) i++ @@ -178,10 +178,10 @@ func splitHistogramDataPoints(src, dst pdata.HistogramDataPointSlice, size int) return size, false } -func splitExponentialHistogramDataPoints(src, dst pdata.ExponentialHistogramDataPointSlice, size int) (int, bool) { +func splitExponentialHistogramDataPoints(src, dst pmetric.ExponentialHistogramDataPointSlice, size int) (int, bool) { dst.EnsureCapacity(size) i := 0 - src.RemoveIf(func(dp pdata.ExponentialHistogramDataPoint) bool { + src.RemoveIf(func(dp pmetric.ExponentialHistogramDataPoint) bool { if i < size { dp.MoveTo(dst.AppendEmpty()) i++ @@ -192,10 +192,10 @@ func splitExponentialHistogramDataPoints(src, dst pdata.ExponentialHistogramData return size, false } -func splitSummaryDataPoints(src, dst pdata.SummaryDataPointSlice, size int) (int, bool) { +func splitSummaryDataPoints(src, dst pmetric.SummaryDataPointSlice, size int) (int, bool) { dst.EnsureCapacity(size) i := 0 - src.RemoveIf(func(dp pdata.SummaryDataPoint) bool { + src.RemoveIf(func(dp pmetric.SummaryDataPoint) bool { if i < size { dp.MoveTo(dst.AppendEmpty()) i++ diff --git a/processor/batchprocessor/splitmetrics_test.go b/processor/batchprocessor/splitmetrics_test.go index 2ba9fa2cd65..b19ffc6c5b0 100644 --- a/processor/batchprocessor/splitmetrics_test.go +++ b/processor/batchprocessor/splitmetrics_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) func TestSplitMetrics_noop(t *testing.T) { @@ -30,7 +30,7 @@ func TestSplitMetrics_noop(t *testing.T) { assert.Equal(t, td, split) i := 0 - td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().RemoveIf(func(_ pdata.Metric) bool { + td.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().RemoveIf(func(_ pmetric.Metric) bool { i++ return i > 5 }) @@ -45,7 +45,7 @@ func TestSplitMetrics(t *testing.T) { metrics.At(i).SetName(getTestMetricName(0, i)) assert.Equal(t, dataPointCount, metricDPC(metrics.At(i))) } - cp := pdata.NewMetrics() + cp := pmetric.NewMetrics() cpMetrics := cp.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() cpMetrics.EnsureCapacity(5) md.ResourceMetrics().At(0).ScopeMetrics().At(0).Scope().CopyTo( @@ -202,7 +202,7 @@ func TestSplitMetricsAllTypes(t *testing.T) { assert.Equal(t, 1, gaugeDouble.Gauge().DataPoints().Len()) assert.Equal(t, "test-metric-int-0-1", gaugeDouble.Name()) assert.Equal(t, 1, sumInt.Sum().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, sumInt.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, sumInt.Sum().AggregationTemporality()) assert.Equal(t, true, sumInt.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-2", sumInt.Name()) @@ -212,11 +212,11 @@ func TestSplitMetricsAllTypes(t *testing.T) { sumInt = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) sumDouble := split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1) assert.Equal(t, 1, sumInt.Sum().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, sumInt.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, sumInt.Sum().AggregationTemporality()) assert.Equal(t, true, sumInt.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-2", sumInt.Name()) assert.Equal(t, 1, sumDouble.Sum().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, sumDouble.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, sumDouble.Sum().AggregationTemporality()) assert.Equal(t, true, sumDouble.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-3", sumDouble.Name()) @@ -226,11 +226,11 @@ func TestSplitMetricsAllTypes(t *testing.T) { sumDouble = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) histogram := split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1) assert.Equal(t, 1, sumDouble.Sum().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, sumDouble.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, sumDouble.Sum().AggregationTemporality()) assert.Equal(t, true, sumDouble.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-3", sumDouble.Name()) assert.Equal(t, 1, histogram.Histogram().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, histogram.Histogram().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, histogram.Histogram().AggregationTemporality()) assert.Equal(t, "test-metric-int-0-4", histogram.Name()) split = splitMetrics(splitSize, md) @@ -239,10 +239,10 @@ func TestSplitMetricsAllTypes(t *testing.T) { histogram = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) exponentialHistogram := split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1) assert.Equal(t, 1, histogram.Histogram().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, histogram.Histogram().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, histogram.Histogram().AggregationTemporality()) assert.Equal(t, "test-metric-int-0-4", histogram.Name()) assert.Equal(t, 1, exponentialHistogram.ExponentialHistogram().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityDelta, exponentialHistogram.ExponentialHistogram().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityDelta, exponentialHistogram.ExponentialHistogram().AggregationTemporality()) assert.Equal(t, "test-metric-int-0-5", exponentialHistogram.Name()) split = splitMetrics(splitSize, md) @@ -251,7 +251,7 @@ func TestSplitMetricsAllTypes(t *testing.T) { exponentialHistogram = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) summary := split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1) assert.Equal(t, 1, exponentialHistogram.ExponentialHistogram().DataPoints().Len()) - assert.Equal(t, pdata.MetricAggregationTemporalityDelta, exponentialHistogram.ExponentialHistogram().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityDelta, exponentialHistogram.ExponentialHistogram().AggregationTemporality()) assert.Equal(t, "test-metric-int-0-5", exponentialHistogram.Name()) assert.Equal(t, 1, summary.Summary().DataPoints().Len()) assert.Equal(t, "test-metric-int-0-6", summary.Name()) @@ -276,7 +276,7 @@ func TestSplitMetricsBatchSizeSmallerThanDataPointCount(t *testing.T) { splitMetric := split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, 1, split.MetricCount()) assert.Equal(t, 2, md.MetricCount()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) assert.Equal(t, true, splitMetric.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-0", splitMetric.Name()) @@ -284,7 +284,7 @@ func TestSplitMetricsBatchSizeSmallerThanDataPointCount(t *testing.T) { splitMetric = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, 1, split.MetricCount()) assert.Equal(t, 1, md.MetricCount()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) assert.Equal(t, true, splitMetric.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-0", splitMetric.Name()) @@ -292,7 +292,7 @@ func TestSplitMetricsBatchSizeSmallerThanDataPointCount(t *testing.T) { splitMetric = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, 1, split.MetricCount()) assert.Equal(t, 1, md.MetricCount()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) assert.Equal(t, true, splitMetric.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-1", splitMetric.Name()) @@ -300,7 +300,7 @@ func TestSplitMetricsBatchSizeSmallerThanDataPointCount(t *testing.T) { splitMetric = split.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, 1, split.MetricCount()) assert.Equal(t, 1, md.MetricCount()) - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, splitMetric.Sum().AggregationTemporality()) assert.Equal(t, true, splitMetric.Sum().IsMonotonic()) assert.Equal(t, "test-metric-int-0-1", splitMetric.Name()) } @@ -335,7 +335,7 @@ func TestSplitMetricsMultipleILM(t *testing.T) { } func BenchmarkSplitMetrics(b *testing.B) { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rms := md.ResourceMetrics() for i := 0; i < 20; i++ { testdata.GenerateMetricsManyMetricsSameResource(20).ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics()) @@ -350,7 +350,7 @@ func BenchmarkSplitMetrics(b *testing.B) { } dataPointCount := metricDPC(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0)) - clones := make([]pdata.Metrics, b.N) + clones := make([]pmetric.Metrics, b.N) for n := 0; n < b.N; n++ { clones[n] = md.Clone() } diff --git a/processor/batchprocessor/splittraces.go b/processor/batchprocessor/splittraces.go index e8fbf779b7b..4b2aaff0020 100644 --- a/processor/batchprocessor/splittraces.go +++ b/processor/batchprocessor/splittraces.go @@ -15,18 +15,18 @@ package batchprocessor // import "go.opentelemetry.io/collector/processor/batchprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) // splitTraces removes spans from the input trace and returns a new trace of the specified size. -func splitTraces(size int, src pdata.Traces) pdata.Traces { +func splitTraces(size int, src ptrace.Traces) ptrace.Traces { if src.SpanCount() <= size { return src } totalCopiedSpans := 0 - dest := pdata.NewTraces() + dest := ptrace.NewTraces() - src.ResourceSpans().RemoveIf(func(srcRs pdata.ResourceSpans) bool { + src.ResourceSpans().RemoveIf(func(srcRs ptrace.ResourceSpans) bool { // If we are done skip everything else. if totalCopiedSpans == size { return false @@ -42,7 +42,7 @@ func splitTraces(size int, src pdata.Traces) pdata.Traces { destRs := dest.ResourceSpans().AppendEmpty() srcRs.Resource().CopyTo(destRs.Resource()) - srcRs.ScopeSpans().RemoveIf(func(srcIls pdata.ScopeSpans) bool { + srcRs.ScopeSpans().RemoveIf(func(srcIls ptrace.ScopeSpans) bool { // If we are done skip everything else. if totalCopiedSpans == size { return false @@ -58,7 +58,7 @@ func splitTraces(size int, src pdata.Traces) pdata.Traces { destIls := destRs.ScopeSpans().AppendEmpty() srcIls.Scope().CopyTo(destIls.Scope()) - srcIls.Spans().RemoveIf(func(srcSpan pdata.Span) bool { + srcIls.Spans().RemoveIf(func(srcSpan ptrace.Span) bool { // If we are done skip everything else. if totalCopiedSpans == size { return false @@ -75,8 +75,8 @@ func splitTraces(size int, src pdata.Traces) pdata.Traces { return dest } -// resourceSC calculates the total number of spans in the pdata.ResourceSpans. -func resourceSC(rs pdata.ResourceSpans) (count int) { +// resourceSC calculates the total number of spans in the ptrace.ResourceSpans. +func resourceSC(rs ptrace.ResourceSpans) (count int) { for k := 0; k < rs.ScopeSpans().Len(); k++ { count += rs.ScopeSpans().At(k).Spans().Len() } diff --git a/processor/batchprocessor/splittraces_test.go b/processor/batchprocessor/splittraces_test.go index 863054ad544..b7c805cd061 100644 --- a/processor/batchprocessor/splittraces_test.go +++ b/processor/batchprocessor/splittraces_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) func TestSplitTraces_noop(t *testing.T) { @@ -30,7 +30,7 @@ func TestSplitTraces_noop(t *testing.T) { assert.Equal(t, td, split) i := 0 - td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().RemoveIf(func(_ pdata.Span) bool { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().RemoveIf(func(_ ptrace.Span) bool { i++ return i > 5 }) @@ -43,7 +43,7 @@ func TestSplitTraces(t *testing.T) { for i := 0; i < spans.Len(); i++ { spans.At(i).SetName(getTestSpanName(0, i)) } - cp := pdata.NewTraces() + cp := ptrace.NewTraces() cpSpans := cp.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans() cpSpans.EnsureCapacity(5) td.ResourceSpans().At(0).Resource().CopyTo( @@ -128,7 +128,7 @@ func TestSplitTracesMultipleResourceSpans_SplitSizeGreaterThanSpanSize(t *testin } func BenchmarkCloneSpans(b *testing.B) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rms := td.ResourceSpans() for i := 0; i < 20; i++ { testdata.GenerateTracesManySpansSameResource(20).ResourceSpans().MoveAndAppendTo(td.ResourceSpans()) @@ -179,7 +179,7 @@ func TestSplitTracesMultipleILS(t *testing.T) { } func BenchmarkSplitTraces(b *testing.B) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rms := td.ResourceSpans() for i := 0; i < 20; i++ { testdata.GenerateTracesManySpansSameResource(20).ResourceSpans().MoveAndAppendTo(td.ResourceSpans()) @@ -193,7 +193,7 @@ func BenchmarkSplitTraces(b *testing.B) { b.Skipf("SKIP: b.N too high, set -benchtime=x with n < 100000") } - clones := make([]pdata.Traces, b.N) + clones := make([]ptrace.Traces, b.N) for n := 0; n < b.N; n++ { clones[n] = td.Clone() } diff --git a/processor/memorylimiterprocessor/memorylimiter.go b/processor/memorylimiterprocessor/memorylimiter.go index 93cfe0dbad8..08b1437b0ef 100644 --- a/processor/memorylimiterprocessor/memorylimiter.go +++ b/processor/memorylimiterprocessor/memorylimiter.go @@ -28,7 +28,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension/ballastextension" "go.opentelemetry.io/collector/internal/iruntime" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" "go.opentelemetry.io/collector/obsreport" ) @@ -169,7 +171,7 @@ func (ml *memoryLimiter) shutdown(context.Context) error { return nil } -func (ml *memoryLimiter) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (ml *memoryLimiter) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { numSpans := td.SpanCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" @@ -188,7 +190,7 @@ func (ml *memoryLimiter) processTraces(ctx context.Context, td pdata.Traces) (pd return td, nil } -func (ml *memoryLimiter) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (ml *memoryLimiter) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { numDataPoints := md.DataPointCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" @@ -206,7 +208,7 @@ func (ml *memoryLimiter) processMetrics(ctx context.Context, md pdata.Metrics) ( return md, nil } -func (ml *memoryLimiter) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (ml *memoryLimiter) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { numRecords := ld.LogRecordCount() if ml.forcingDrop() { // TODO: actually to be 100% sure that this is "refused" and not "dropped" diff --git a/processor/memorylimiterprocessor/memorylimiter_test.go b/processor/memorylimiterprocessor/memorylimiter_test.go index 33f2add071b..6255052fafd 100644 --- a/processor/memorylimiterprocessor/memorylimiter_test.go +++ b/processor/memorylimiterprocessor/memorylimiter_test.go @@ -31,7 +31,9 @@ import ( "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/extension/ballastextension" "go.opentelemetry.io/collector/internal/iruntime" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" + "go.opentelemetry.io/collector/model/pmetric" + "go.opentelemetry.io/collector/model/ptrace" "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -131,7 +133,7 @@ func TestMetricsMemoryPressureResponse(t *testing.T) { require.NoError(t, err) ctx := context.Background() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() // Below memAllocLimit. currentMemAlloc = 800 @@ -202,7 +204,7 @@ func TestTraceMemoryPressureResponse(t *testing.T) { require.NoError(t, err) ctx := context.Background() - td := pdata.NewTraces() + td := ptrace.NewTraces() // Below memAllocLimit. currentMemAlloc = 800 @@ -273,7 +275,7 @@ func TestLogMemoryPressureResponse(t *testing.T) { require.NoError(t, err) ctx := context.Background() - ld := pdata.NewLogs() + ld := plog.NewLogs() // Below memAllocLimit. currentMemAlloc = 800 diff --git a/processor/processorhelper/logs.go b/processor/processorhelper/logs.go index 822a0c99a4b..be7a8e363df 100644 --- a/processor/processorhelper/logs.go +++ b/processor/processorhelper/logs.go @@ -24,12 +24,12 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) // ProcessLogsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. // If error is returned then returned data are ignored. It MUST not call the next component. -type ProcessLogsFunc func(context.Context, pdata.Logs) (pdata.Logs, error) +type ProcessLogsFunc func(context.Context, plog.Logs) (plog.Logs, error) type logProcessor struct { component.StartFunc @@ -55,7 +55,7 @@ func NewLogsProcessor( eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) - logsConsumer, err := consumer.NewLogs(func(ctx context.Context, ld pdata.Logs) error { + logsConsumer, err := consumer.NewLogs(func(ctx context.Context, ld plog.Logs) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) var err error diff --git a/processor/processorhelper/logs_test.go b/processor/processorhelper/logs_test.go index efe04e07fb5..bbd4342de2e 100644 --- a/processor/processorhelper/logs_test.go +++ b/processor/processorhelper/logs_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) var testLogsCfg = config.NewProcessorSettings(config.NewComponentID("test")) @@ -39,7 +39,7 @@ func TestNewLogsProcessor(t *testing.T) { assert.True(t, lp.Capabilities().MutatesData) assert.NoError(t, lp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.NoError(t, lp.ConsumeLogs(context.Background(), plog.NewLogs())) assert.NoError(t, lp.Shutdown(context.Background())) } @@ -68,17 +68,17 @@ func TestNewLogsProcessor_ProcessLogError(t *testing.T) { want := errors.New("my_error") lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.Equal(t, want, lp.ConsumeLogs(context.Background(), plog.NewLogs())) } func TestNewLogsProcessor_ProcessLogsErrSkipProcessingData(t *testing.T) { lp, err := NewLogsProcessor(&testLogsCfg, consumertest.NewNop(), newTestLProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, lp.ConsumeLogs(context.Background(), pdata.NewLogs())) + assert.Equal(t, nil, lp.ConsumeLogs(context.Background(), plog.NewLogs())) } func newTestLProcessor(retError error) ProcessLogsFunc { - return func(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { + return func(_ context.Context, ld plog.Logs) (plog.Logs, error) { return ld, retError } } diff --git a/processor/processorhelper/metrics.go b/processor/processorhelper/metrics.go index 4585e51fda5..641bd41627f 100644 --- a/processor/processorhelper/metrics.go +++ b/processor/processorhelper/metrics.go @@ -24,12 +24,12 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) // ProcessMetricsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. // If error is returned then returned data are ignored. It MUST not call the next component. -type ProcessMetricsFunc func(context.Context, pdata.Metrics) (pdata.Metrics, error) +type ProcessMetricsFunc func(context.Context, pmetric.Metrics) (pmetric.Metrics, error) type metricsProcessor struct { component.StartFunc @@ -55,7 +55,7 @@ func NewMetricsProcessor( eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) - metricsConsumer, err := consumer.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { + metricsConsumer, err := consumer.NewMetrics(func(ctx context.Context, md pmetric.Metrics) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) var err error diff --git a/processor/processorhelper/metrics_test.go b/processor/processorhelper/metrics_test.go index 5e568037546..cf008064df1 100644 --- a/processor/processorhelper/metrics_test.go +++ b/processor/processorhelper/metrics_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) var testMetricsCfg = config.NewProcessorSettings(config.NewComponentID("test")) @@ -39,7 +39,7 @@ func TestNewMetricsProcessor(t *testing.T) { assert.True(t, mp.Capabilities().MutatesData) assert.NoError(t, mp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.NoError(t, mp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) assert.NoError(t, mp.Shutdown(context.Background())) } @@ -68,17 +68,17 @@ func TestNewMetricsProcessor_ProcessMetricsError(t *testing.T) { want := errors.New("my_error") mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.Equal(t, want, mp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) } func TestNewMetricsProcessor_ProcessMetricsErrSkipProcessingData(t *testing.T) { mp, err := NewMetricsProcessor(&testMetricsCfg, consumertest.NewNop(), newTestMProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, mp.ConsumeMetrics(context.Background(), pdata.NewMetrics())) + assert.Equal(t, nil, mp.ConsumeMetrics(context.Background(), pmetric.NewMetrics())) } func newTestMProcessor(retError error) ProcessMetricsFunc { - return func(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { + return func(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { return md, retError } } diff --git a/processor/processorhelper/traces.go b/processor/processorhelper/traces.go index b802c539ea7..063d966623f 100644 --- a/processor/processorhelper/traces.go +++ b/processor/processorhelper/traces.go @@ -24,12 +24,12 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) // ProcessTracesFunc is a helper function that processes the incoming data and returns the data to be sent to the next component. // If error is returned then returned data are ignored. It MUST not call the next component. -type ProcessTracesFunc func(context.Context, pdata.Traces) (pdata.Traces, error) +type ProcessTracesFunc func(context.Context, ptrace.Traces) (ptrace.Traces, error) type tracesProcessor struct { component.StartFunc @@ -55,7 +55,7 @@ func NewTracesProcessor( eventOptions := spanAttributes(cfg.ID()) bs := fromOptions(options) - traceConsumer, err := consumer.NewTraces(func(ctx context.Context, td pdata.Traces) error { + traceConsumer, err := consumer.NewTraces(func(ctx context.Context, td ptrace.Traces) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) var err error diff --git a/processor/processorhelper/traces_test.go b/processor/processorhelper/traces_test.go index de2cc9209ac..b547c1ff70c 100644 --- a/processor/processorhelper/traces_test.go +++ b/processor/processorhelper/traces_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) var testTracesCfg = config.NewProcessorSettings(config.NewComponentID("test")) @@ -39,7 +39,7 @@ func TestNewTracesProcessor(t *testing.T) { assert.True(t, tp.Capabilities().MutatesData) assert.NoError(t, tp.Start(context.Background(), componenttest.NewNopHost())) - assert.NoError(t, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.NoError(t, tp.ConsumeTraces(context.Background(), ptrace.NewTraces())) assert.NoError(t, tp.Shutdown(context.Background())) } @@ -68,17 +68,17 @@ func TestNewTracesProcessor_ProcessTraceError(t *testing.T) { want := errors.New("my_error") tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(want)) require.NoError(t, err) - assert.Equal(t, want, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.Equal(t, want, tp.ConsumeTraces(context.Background(), ptrace.NewTraces())) } func TestNewTracesProcessor_ProcessTracesErrSkipProcessingData(t *testing.T) { tp, err := NewTracesProcessor(&testTracesCfg, consumertest.NewNop(), newTestTProcessor(ErrSkipProcessingData)) require.NoError(t, err) - assert.Equal(t, nil, tp.ConsumeTraces(context.Background(), pdata.NewTraces())) + assert.Equal(t, nil, tp.ConsumeTraces(context.Background(), ptrace.NewTraces())) } func newTestTProcessor(retError error) ProcessTracesFunc { - return func(_ context.Context, td pdata.Traces) (pdata.Traces, error) { + return func(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { return td, retError } } diff --git a/receiver/otlpreceiver/otlp_test.go b/receiver/otlpreceiver/otlp_test.go index 9a45e47d9fc..7f5ca081818 100644 --- a/receiver/otlpreceiver/otlp_test.go +++ b/receiver/otlpreceiver/otlp_test.go @@ -50,7 +50,8 @@ import ( "go.opentelemetry.io/collector/internal/testutil" "go.opentelemetry.io/collector/model/otlp" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pcommon" + "go.opentelemetry.io/collector/model/ptrace" semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" "go.opentelemetry.io/collector/obsreport/obsreporttest" ) @@ -108,27 +109,27 @@ var traceJSON = []byte(` ] }`) -var traceOtlp = func() pdata.Traces { - td := pdata.NewTraces() +var traceOtlp = func() ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().UpsertString(semconv.AttributeHostName, "testHost") spans := rs.ScopeSpans().AppendEmpty().Spans() span1 := spans.AppendEmpty() - span1.SetTraceID(pdata.NewTraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC})) - span1.SetSpanID(pdata.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x74})) - span1.SetParentSpanID(pdata.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73})) + span1.SetTraceID(pcommon.NewTraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC})) + span1.SetSpanID(pcommon.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x74})) + span1.SetParentSpanID(pcommon.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73})) span1.SetName("testSpan") span1.SetStartTimestamp(1544712660300000000) span1.SetEndTimestamp(1544712660600000000) - span1.SetKind(pdata.SpanKindServer) + span1.SetKind(ptrace.SpanKindServer) span1.Attributes().UpsertInt("attr1", 55) span2 := spans.AppendEmpty() - span2.SetTraceID(pdata.NewTraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC})) - span2.SetSpanID(pdata.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73})) + span2.SetTraceID(pcommon.NewTraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC})) + span2.SetSpanID(pcommon.NewSpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73})) span2.SetName("testSpan") span2.SetStartTimestamp(1544712660000000000) span2.SetEndTimestamp(1544712661000000000) - span2.SetKind(pdata.SpanKindClient) + span2.SetKind(ptrace.SpanKindClient) span2.Attributes().UpsertInt("attr1", 55) return td }() @@ -466,7 +467,7 @@ func testHTTPProtobufRequest( encoding string, traceBytes []byte, expectedErr error, - wantData pdata.Traces, + wantData ptrace.Traces, ) { tSink.SetConsumeError(expectedErr) @@ -651,7 +652,7 @@ func TestOTLPReceiverTrace_HandleNextConsumerResponse(t *testing.T) { receiverTag string exportFn func( cc *grpc.ClientConn, - td pdata.Traces) error + td ptrace.Traces) error }{ { receiverTag: "trace", @@ -885,7 +886,7 @@ func compressGzip(body []byte) (*bytes.Buffer, error) { return &buf, nil } -type senderFunc func(td pdata.Traces) +type senderFunc func(td ptrace.Traces) func TestShutdown(t *testing.T) { endpointGrpc := testutil.GetAvailableLocalAddress(t) @@ -915,11 +916,11 @@ func TestShutdown(t *testing.T) { doneSignalGrpc := make(chan bool) doneSignalHTTP := make(chan bool) - senderGrpc := func(td pdata.Traces) { + senderGrpc := func(td ptrace.Traces) { // Ignore error, may be executed after the receiver shutdown. _ = exportTraces(conn, td) } - senderHTTP := func(td pdata.Traces) { + senderHTTP := func(td ptrace.Traces) { // Send request via OTLP/HTTP. traceBytes, err2 := otlp.NewProtobufTracesMarshaler().MarshalTraces(td) if err2 != nil { @@ -989,7 +990,7 @@ loop: close(doneSignal) } -func exportTraces(cc *grpc.ClientConn, td pdata.Traces) error { +func exportTraces(cc *grpc.ClientConn, td ptrace.Traces) error { acc := otlpgrpc.NewTracesClient(cc) req := otlpgrpc.NewTracesRequest() req.SetTraces(td) diff --git a/receiver/scraperhelper/scraper.go b/receiver/scraperhelper/scraper.go index 2c6c75296c6..79347970b90 100644 --- a/receiver/scraperhelper/scraper.go +++ b/receiver/scraperhelper/scraper.go @@ -20,15 +20,15 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) var errNilFunc = errors.New("nil scrape func") // ScrapeFunc scrapes metrics. -type ScrapeFunc func(context.Context) (pdata.Metrics, error) +type ScrapeFunc func(context.Context) (pmetric.Metrics, error) -func (sf ScrapeFunc) Scrape(ctx context.Context) (pdata.Metrics, error) { +func (sf ScrapeFunc) Scrape(ctx context.Context) (pmetric.Metrics, error) { return sf(ctx) } @@ -38,7 +38,7 @@ type Scraper interface { // ID returns the scraper id. ID() config.ComponentID - Scrape(context.Context) (pdata.Metrics, error) + Scrape(context.Context) (pmetric.Metrics, error) } // ScraperOption apply changes to internal options. diff --git a/receiver/scraperhelper/scrapercontroller.go b/receiver/scraperhelper/scrapercontroller.go index 8b96c379d87..6d2abde91e0 100644 --- a/receiver/scraperhelper/scrapercontroller.go +++ b/receiver/scraperhelper/scrapercontroller.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" "go.opentelemetry.io/collector/obsreport" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -183,7 +183,7 @@ func (sc *controller) startScraping() { // Scrapers, records observability information, and passes the scraped metrics // to the next component. func (sc *controller) scrapeMetricsAndReport(ctx context.Context) { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() for _, scraper := range sc.scrapers { scrp := obsreport.NewScraper(obsreport.ScraperSettings{ diff --git a/receiver/scraperhelper/scrapercontroller_test.go b/receiver/scraperhelper/scrapercontroller_test.go index 0adff240e60..6314d844a53 100644 --- a/receiver/scraperhelper/scrapercontroller_test.go +++ b/receiver/scraperhelper/scrapercontroller_test.go @@ -31,7 +31,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" "go.opentelemetry.io/collector/obsreport/obsreporttest" "go.opentelemetry.io/collector/receiver/scrapererror" ) @@ -62,17 +62,17 @@ type testScrapeMetrics struct { err error } -func (ts *testScrapeMetrics) scrape(_ context.Context) (pdata.Metrics, error) { +func (ts *testScrapeMetrics) scrape(_ context.Context) (pmetric.Metrics, error) { ts.timesScrapeCalled++ ts.ch <- ts.timesScrapeCalled if ts.err != nil { - return pdata.Metrics{}, ts.err + return pmetric.Metrics{}, ts.err } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() metric := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.Gauge().DataPoints().AppendEmpty() return md, nil } diff --git a/service/internal/builder/pipelines_builder_test.go b/service/internal/builder/pipelines_builder_test.go index e67c1ce9487..d354524c2af 100644 --- a/service/internal/builder/pipelines_builder_test.go +++ b/service/internal/builder/pipelines_builder_test.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/internal/testcomponents" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" "go.opentelemetry.io/collector/service/servicetest" ) @@ -154,7 +154,7 @@ func TestBuildPipelines_BuildVarious(t *testing.T) { } // Send one custom data. - log := pdata.Logs{} + log := plog.Logs{} require.NoError(t, processor.firstLC.ConsumeLogs(context.Background(), log)) // Now verify received data. diff --git a/service/internal/builder/receivers_builder_test.go b/service/internal/builder/receivers_builder_test.go index 0aa6d43cd09..644ac165410 100644 --- a/service/internal/builder/receivers_builder_test.go +++ b/service/internal/builder/receivers_builder_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/internal/testcomponents" "go.opentelemetry.io/collector/internal/testdata" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" "go.opentelemetry.io/collector/service/servicetest" ) @@ -235,7 +235,7 @@ func TestBuildReceivers_BuildCustom(t *testing.T) { } // Send one data. - log := pdata.Logs{} + log := plog.Logs{} producer := receiver.receiver.(*testcomponents.ExampleReceiverProducer) require.NoError(t, producer.ConsumeLogs(context.Background(), log)) diff --git a/service/internal/fanoutconsumer/logs.go b/service/internal/fanoutconsumer/logs.go index 9dde97673a1..e1972900c0f 100644 --- a/service/internal/fanoutconsumer/logs.go +++ b/service/internal/fanoutconsumer/logs.go @@ -22,7 +22,7 @@ import ( "go.uber.org/multierr" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/plog" ) // NewLogs wraps multiple log consumers in a single one. @@ -64,8 +64,8 @@ func (lsc *logsConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -// ConsumeLogs exports the pdata.Logs to all consumers wrapped by the current one. -func (lsc *logsConsumer) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +// ConsumeLogs exports the plog.Logs to all consumers wrapped by the current one. +func (lsc *logsConsumer) ConsumeLogs(ctx context.Context, ld plog.Logs) error { var errs error // Initially pass to clone exporter to avoid the case where the optimization of sending // the incoming data to a mutating consumer is used that may change the incoming data before diff --git a/service/internal/fanoutconsumer/metrics.go b/service/internal/fanoutconsumer/metrics.go index 371643252b5..75014d3e67d 100644 --- a/service/internal/fanoutconsumer/metrics.go +++ b/service/internal/fanoutconsumer/metrics.go @@ -20,7 +20,7 @@ import ( "go.uber.org/multierr" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/pmetric" ) // NewMetrics wraps multiple metrics consumers in a single one. @@ -62,8 +62,8 @@ func (msc *metricsConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -// ConsumeMetrics exports the pdata.Metrics to all consumers wrapped by the current one. -func (msc *metricsConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +// ConsumeMetrics exports the pmetric.Metrics to all consumers wrapped by the current one. +func (msc *metricsConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { var errs error // Initially pass to clone exporter to avoid the case where the optimization of sending // the incoming data to a mutating consumer is used that may change the incoming data before diff --git a/service/internal/fanoutconsumer/traces.go b/service/internal/fanoutconsumer/traces.go index 04dcd9fb5e8..b20e0521731 100644 --- a/service/internal/fanoutconsumer/traces.go +++ b/service/internal/fanoutconsumer/traces.go @@ -20,7 +20,7 @@ import ( "go.uber.org/multierr" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/model/ptrace" ) // NewTraces wraps multiple trace consumers in a single one. @@ -62,8 +62,8 @@ func (tsc *tracesConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -// ConsumeTraces exports the pdata.Traces to all consumers wrapped by the current one. -func (tsc *tracesConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +// ConsumeTraces exports the ptrace.Traces to all consumers wrapped by the current one. +func (tsc *tracesConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { var errs error // Initially pass to clone exporter to avoid the case where the optimization of sending // the incoming data to a mutating consumer is used that may change the incoming data before