From 9e43537d9b7f622eedf011272a184735015c4b68 Mon Sep 17 00:00:00 2001 From: ChrsMark Date: Wed, 5 Jun 2024 12:01:26 +0300 Subject: [PATCH] [receiver/kubeletstats] Add k8s.pod.cpu.node.utilization metric Signed-off-by: ChrsMark --- .chloggen/add_k8s_pod_utilization.yaml | 27 +++ receiver/kubeletstatsreceiver/README.md | 8 +- receiver/kubeletstatsreceiver/config.go | 2 + receiver/kubeletstatsreceiver/config_test.go | 28 +++ .../kubeletstatsreceiver/documentation.md | 8 + .../internal/kubelet/accumulator.go | 2 +- .../internal/metadata/generated_config.go | 4 + .../metadata/generated_config_test.go | 2 + .../internal/metadata/generated_metrics.go | 57 +++++ .../metadata/generated_metrics_test.go | 15 ++ .../internal/metadata/metrics.go | 1 + .../internal/metadata/testdata/config.yaml | 4 + receiver/kubeletstatsreceiver/metadata.yaml | 7 + receiver/kubeletstatsreceiver/scraper.go | 3 +- receiver/kubeletstatsreceiver/scraper_test.go | 6 +- .../kubeletstatsreceiver/testdata/config.yaml | 6 + ...t_scraper_cpu_util_nodelimit_expected.yaml | 216 ++++++++++++++++++ 17 files changed, 390 insertions(+), 6 deletions(-) create mode 100644 .chloggen/add_k8s_pod_utilization.yaml diff --git a/.chloggen/add_k8s_pod_utilization.yaml b/.chloggen/add_k8s_pod_utilization.yaml new file mode 100644 index 000000000000..c301d4e36d90 --- /dev/null +++ b/.chloggen/add_k8s_pod_utilization.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kubeletstats + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add k8s.pod.cpu.node.utilization metric" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33390] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/kubeletstatsreceiver/README.md b/receiver/kubeletstatsreceiver/README.md index 844e5424a62e..42f1467723a8 100644 --- a/receiver/kubeletstatsreceiver/README.md +++ b/receiver/kubeletstatsreceiver/README.md @@ -218,10 +218,10 @@ receivers: - pod ``` -### Collect k8s.container.cpu.node.utilization as ratio of total node's capacity +### Collect k8s.container.cpu.node.utilization, k8s.pod.cpu.node.utilization` as ratio of total node's capacity -In order to calculate the `k8s.container.cpu.node.utilization` metric, the information of the node's capacity -must be retrieved from the k8s API. In this, the `k8s_api_config` needs to be set. +In order to calculate the `k8s.container.cpu.node.utilization` or `k8s.pod.cpu.node.utilization` metrics, the +information of the node's capacity must be retrieved from the k8s API. In this, the `k8s_api_config` needs to be set. In addition, the node name must be identified properly. The `K8S_NODE_NAME` env var can be set using the downward API inside the collector pod spec as follows: @@ -246,6 +246,8 @@ receivers: metrics: k8s.container.cpu.node.utilization: enabled: true + k8s.pod.cpu.node.utilization: + enabled: true ``` ### Optional parameters diff --git a/receiver/kubeletstatsreceiver/config.go b/receiver/kubeletstatsreceiver/config.go index e8a66c8b6731..e2805a3a2a3f 100644 --- a/receiver/kubeletstatsreceiver/config.go +++ b/receiver/kubeletstatsreceiver/config.go @@ -122,6 +122,8 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { func (cfg *Config) Validate() error { if cfg.Metrics.K8sContainerCPUNodeUtilization.Enabled && cfg.NodeName == "" { return errors.New("for k8s.container.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") + } else if cfg.Metrics.K8sPodCPUNodeUtilization.Enabled && cfg.NodeName == "" { + return errors.New("for k8s.pod.cpu.node.utilization node setting is required. Check the readme on how to set the required setting") } return nil } diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go index 5e2fafd845fb..04f4746d9126 100644 --- a/receiver/kubeletstatsreceiver/config_test.go +++ b/receiver/kubeletstatsreceiver/config_test.go @@ -201,6 +201,34 @@ func TestLoadConfig(t *testing.T) { }, expectedValidationErr: "for k8s.container.cpu.node.utilization node setting is required. Check the readme on how to set the required setting", }, + { + id: component.NewIDWithName(metadata.Type, "pod_cpu_node_utilization"), + expected: &Config{ + ControllerConfig: scraperhelper.ControllerConfig{ + CollectionInterval: duration, + InitialDelay: time.Second, + }, + ClientConfig: kube.ClientConfig{ + APIConfig: k8sconfig.APIConfig{ + AuthType: "tls", + }, + }, + MetricGroupsToCollect: []kubelet.MetricGroup{ + kubelet.ContainerMetricGroup, + kubelet.PodMetricGroup, + kubelet.NodeMetricGroup, + }, + MetricsBuilderConfig: metadata.MetricsBuilderConfig{ + Metrics: metadata.MetricsConfig{ + K8sPodCPUNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, + }, + ResourceAttributes: metadata.DefaultResourceAttributesConfig(), + }, + }, + expectedValidationErr: "for k8s.pod.cpu.node.utilization node setting is required. Check the readme on how to set the required setting", + }, } for _, tt := range tests { diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 684af59c5b09..adafdf58ddc4 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -458,6 +458,14 @@ The time since the node started | ---- | ----------- | ---------- | ----------------------- | --------- | | s | Sum | Int | Cumulative | true | +### k8s.pod.cpu.node.utilization + +Pod cpu utilization as a ratio of the node's capacity + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.pod.cpu.usage Total CPU usage (sum of all cores per second) averaged over the sample window diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go index b226fb968709..b8d9cc873fea 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go @@ -76,7 +76,7 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) { currentTime := pcommon.NewTimestampFromTime(a.time) addUptimeMetric(a.mbs.PodMetricsBuilder, metadata.PodUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], 0) + addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID], a.metadata.cpuNodeLimit) addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID]) addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime) addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go index 69d37d761839..9db06b4d5c4c 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go @@ -61,6 +61,7 @@ type MetricsConfig struct { K8sNodeNetworkErrors MetricConfig `mapstructure:"k8s.node.network.errors"` K8sNodeNetworkIo MetricConfig `mapstructure:"k8s.node.network.io"` K8sNodeUptime MetricConfig `mapstructure:"k8s.node.uptime"` + K8sPodCPUNodeUtilization MetricConfig `mapstructure:"k8s.pod.cpu.node.utilization"` K8sPodCPUTime MetricConfig `mapstructure:"k8s.pod.cpu.time"` K8sPodCPUUsage MetricConfig `mapstructure:"k8s.pod.cpu.usage"` K8sPodCPUUtilization MetricConfig `mapstructure:"k8s.pod.cpu.utilization"` @@ -188,6 +189,9 @@ func DefaultMetricsConfig() MetricsConfig { K8sNodeUptime: MetricConfig{ Enabled: false, }, + K8sPodCPUNodeUtilization: MetricConfig{ + Enabled: false, + }, K8sPodCPUTime: MetricConfig{ Enabled: true, }, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go index 6fa188af811f..01ae22c6f6f1 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go @@ -58,6 +58,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeNetworkErrors: MetricConfig{Enabled: true}, K8sNodeNetworkIo: MetricConfig{Enabled: true}, K8sNodeUptime: MetricConfig{Enabled: true}, + K8sPodCPUNodeUtilization: MetricConfig{Enabled: true}, K8sPodCPUTime: MetricConfig{Enabled: true}, K8sPodCPUUsage: MetricConfig{Enabled: true}, K8sPodCPUUtilization: MetricConfig{Enabled: true}, @@ -139,6 +140,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeNetworkErrors: MetricConfig{Enabled: false}, K8sNodeNetworkIo: MetricConfig{Enabled: false}, K8sNodeUptime: MetricConfig{Enabled: false}, + K8sPodCPUNodeUtilization: MetricConfig{Enabled: false}, K8sPodCPUTime: MetricConfig{Enabled: false}, K8sPodCPUUsage: MetricConfig{Enabled: false}, K8sPodCPUUtilization: MetricConfig{Enabled: false}, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 22fc836398dc..e5f4e64acfe9 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -1673,6 +1673,55 @@ func newMetricK8sNodeUptime(cfg MetricConfig) metricK8sNodeUptime { return m } +type metricK8sPodCPUNodeUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.cpu.node.utilization metric with initial data. +func (m *metricK8sPodCPUNodeUtilization) init() { + m.data.SetName("k8s.pod.cpu.node.utilization") + m.data.SetDescription("Pod cpu utilization as a ratio of the node's capacity") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodCPUNodeUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodCPUNodeUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodCPUNodeUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodCPUNodeUtilization(cfg MetricConfig) metricK8sPodCPUNodeUtilization { + m := metricK8sPodCPUNodeUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sPodCPUTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2906,6 +2955,7 @@ type MetricsBuilder struct { metricK8sNodeNetworkErrors metricK8sNodeNetworkErrors metricK8sNodeNetworkIo metricK8sNodeNetworkIo metricK8sNodeUptime metricK8sNodeUptime + metricK8sPodCPUNodeUtilization metricK8sPodCPUNodeUtilization metricK8sPodCPUTime metricK8sPodCPUTime metricK8sPodCPUUsage metricK8sPodCPUUsage metricK8sPodCPUUtilization metricK8sPodCPUUtilization @@ -2990,6 +3040,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricK8sNodeNetworkErrors: newMetricK8sNodeNetworkErrors(mbc.Metrics.K8sNodeNetworkErrors), metricK8sNodeNetworkIo: newMetricK8sNodeNetworkIo(mbc.Metrics.K8sNodeNetworkIo), metricK8sNodeUptime: newMetricK8sNodeUptime(mbc.Metrics.K8sNodeUptime), + metricK8sPodCPUNodeUtilization: newMetricK8sPodCPUNodeUtilization(mbc.Metrics.K8sPodCPUNodeUtilization), metricK8sPodCPUTime: newMetricK8sPodCPUTime(mbc.Metrics.K8sPodCPUTime), metricK8sPodCPUUsage: newMetricK8sPodCPUUsage(mbc.Metrics.K8sPodCPUUsage), metricK8sPodCPUUtilization: newMetricK8sPodCPUUtilization(mbc.Metrics.K8sPodCPUUtilization), @@ -3201,6 +3252,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sNodeNetworkErrors.emit(ils.Metrics()) mb.metricK8sNodeNetworkIo.emit(ils.Metrics()) mb.metricK8sNodeUptime.emit(ils.Metrics()) + mb.metricK8sPodCPUNodeUtilization.emit(ils.Metrics()) mb.metricK8sPodCPUTime.emit(ils.Metrics()) mb.metricK8sPodCPUUsage.emit(ils.Metrics()) mb.metricK8sPodCPUUtilization.emit(ils.Metrics()) @@ -3421,6 +3473,11 @@ func (mb *MetricsBuilder) RecordK8sNodeUptimeDataPoint(ts pcommon.Timestamp, val mb.metricK8sNodeUptime.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sPodCPUNodeUtilizationDataPoint adds a data point to k8s.pod.cpu.node.utilization metric. +func (mb *MetricsBuilder) RecordK8sPodCPUNodeUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sPodCPUNodeUtilization.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sPodCPUTimeDataPoint adds a data point to k8s.pod.cpu.time metric. func (mb *MetricsBuilder) RecordK8sPodCPUTimeDataPoint(ts pcommon.Timestamp, val float64) { mb.metricK8sPodCPUTime.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index bf8677cef5a9..bea3148e8893 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -203,6 +203,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sNodeUptimeDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sPodCPUNodeUtilizationDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordK8sPodCPUTimeDataPoint(ts, 1) @@ -751,6 +754,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.pod.cpu.node.utilization": + assert.False(t, validatedMetrics["k8s.pod.cpu.node.utilization"], "Found a duplicate in the metrics slice: k8s.pod.cpu.node.utilization") + validatedMetrics["k8s.pod.cpu.node.utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pod cpu utilization as a ratio of the node's capacity", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.pod.cpu.time": assert.False(t, validatedMetrics["k8s.pod.cpu.time"], "Found a duplicate in the metrics slice: k8s.pod.cpu.time") validatedMetrics["k8s.pod.cpu.time"] = true diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go index 5ba982b918c0..808d10366f6e 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go @@ -37,6 +37,7 @@ var PodCPUMetrics = CPUMetrics{ Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint, Usage: (*MetricsBuilder).RecordK8sPodCPUUsageDataPoint, Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint, + NodeUtilization: (*MetricsBuilder).RecordK8sPodCPUNodeUtilizationDataPoint, LimitUtilization: (*MetricsBuilder).RecordK8sPodCPULimitUtilizationDataPoint, RequestUtilization: (*MetricsBuilder).RecordK8sPodCPURequestUtilizationDataPoint, } diff --git a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml index 8758f2993976..3f4e11bf5384 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml @@ -67,6 +67,8 @@ all_set: enabled: true k8s.node.uptime: enabled: true + k8s.pod.cpu.node.utilization: + enabled: true k8s.pod.cpu.time: enabled: true k8s.pod.cpu.usage: @@ -214,6 +216,8 @@ none_set: enabled: false k8s.node.uptime: enabled: false + k8s.pod.cpu.node.utilization: + enabled: false k8s.pod.cpu.time: enabled: false k8s.pod.cpu.usage: diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index 7ddbf1475bda..6fc06df02408 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -236,6 +236,13 @@ metrics: gauge: value_type: int attributes: [] + k8s.pod.cpu.node.utilization: + enabled: false + description: "Pod cpu utilization as a ratio of the node's capacity" + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.pod.cpu_limit_utilization: enabled: false description: "Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted." diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go index 2fd399f251a7..959029eff251 100644 --- a/receiver/kubeletstatsreceiver/scraper.go +++ b/receiver/kubeletstatsreceiver/scraper.go @@ -83,7 +83,8 @@ func newKubletScraper( nodeLimits: &kubelet.NodeLimits{}, } - if metricsConfig.Metrics.K8sContainerCPUNodeUtilization.Enabled { + if metricsConfig.Metrics.K8sContainerCPUNodeUtilization.Enabled || + metricsConfig.Metrics.K8sPodCPUNodeUtilization.Enabled { ks.nodeInformer = k8sconfig.NewNodeSharedInformer(rOptions.k8sAPIClient, nodeName, 5*time.Minute) } diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 0fb5e4daaefb..47aaba34092d 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -99,6 +99,7 @@ func TestScraperWithNodeUtilization(t *testing.T) { options := &scraperOptions{ metricGroupsToCollect: map[kubelet.MetricGroup]bool{ kubelet.ContainerMetricGroup: true, + kubelet.PodMetricGroup: true, }, k8sAPIClient: client, } @@ -111,6 +112,9 @@ func TestScraperWithNodeUtilization(t *testing.T) { K8sContainerCPUNodeUtilization: metadata.MetricConfig{ Enabled: true, }, + K8sPodCPUNodeUtilization: metadata.MetricConfig{ + Enabled: true, + }, }, ResourceAttributes: metadata.DefaultResourceAttributesConfig(), }, @@ -132,7 +136,7 @@ func TestScraperWithNodeUtilization(t *testing.T) { md, err := r.Scrape(context.Background()) require.NoError(t, err) - require.Equal(t, numContainers, md.DataPointCount()) + require.Equal(t, numContainers+numPods, md.DataPointCount()) expectedFile := filepath.Join("testdata", "scraper", "test_scraper_cpu_util_nodelimit_expected.yaml") // Uncomment to regenerate '*_expected.yaml' files diff --git a/receiver/kubeletstatsreceiver/testdata/config.yaml b/receiver/kubeletstatsreceiver/testdata/config.yaml index fdfab83bf0e3..6db7d52e9f6b 100644 --- a/receiver/kubeletstatsreceiver/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/testdata/config.yaml @@ -34,3 +34,9 @@ kubeletstats/container_cpu_node_utilization: metrics: k8s.container.cpu.node.utilization: enabled: true +kubeletstats/pod_cpu_node_utilization: + collection_interval: 10s + metric_groups: [ container, pod, node ] + metrics: + k8s.pod.cpu.node.utilization: + enabled: true diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_cpu_util_nodelimit_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_cpu_util_nodelimit_expected.yaml index 4364df51e4d4..aecca73d94f7 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_cpu_util_nodelimit_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_cpu_util_nodelimit_expected.yaml @@ -1,4 +1,220 @@ resourceMetrics: + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.pod.name + value: + stringValue: go-hello-world-5456b4b8cd-99vxc + - key: k8s.pod.uid + value: + stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-58qvv + - key: k8s.pod.uid + value: + stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.000436807625 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: coredns-66bff467f8-szddj + - key: k8s.pod.uid + value: + stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.000428771875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: etcd-minikube + - key: k8s.pod.uid + value: + stringValue: 5a5fbd34cfb43ee7bee976798370c910 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.002475833875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-apiserver-minikube + - key: k8s.pod.uid + value: + stringValue: 3bef16d65fa74d46458df57d8f6f59af + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.005777949 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-minikube + - key: k8s.pod.uid + value: + stringValue: 3016593d20758bbfe68aba26604a8e3d + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.0022641755 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-proxy-v48tf + - key: k8s.pod.uid + value: + stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 3.45535e-05 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: kube-scheduler-minikube + - key: k8s.pod.uid + value: + stringValue: 5795d0c442cb997ff93c49feeb9f6386 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 0.000452512875 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest + - resource: + attributes: + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.pod.name + value: + stringValue: storage-provisioner + - key: k8s.pod.uid + value: + stringValue: 14bf95e0-9451-4192-b111-807b03163670 + scopeMetrics: + - metrics: + - description: Pod cpu utilization as a ratio of the node's capacity + gauge: + dataPoints: + - asDouble: 4.7302625e-05 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.pod.cpu.node.utilization + unit: "1" + scope: + name: otelcol/kubeletstatsreceiver + version: latest - resource: attributes: - key: k8s.container.name