Skip to content

Commit

Permalink
Revert "[receiver/prometheus] Don't drop Histograms without buckets (#…
Browse files Browse the repository at this point in the history
…23448)"

This reverts commit 5d410bb.
  • Loading branch information
mx-psi authored Aug 1, 2023
1 parent a3556a4 commit 44c868c
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 121 deletions.
20 changes: 0 additions & 20 deletions .chloggen/fix_prometheusreceiver_empty-histogram.yaml

This file was deleted.

34 changes: 12 additions & 22 deletions receiver/prometheusreceiver/internal/metricfamily.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,28 +87,27 @@ func (mg *metricGroup) sortPoints() {
}

func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice) {
if !mg.hasCount {
if !mg.hasCount || len(mg.complexValue) == 0 {
return
}

mg.sortPoints()

bucketCount := len(mg.complexValue) + 1
// if the final bucket is +Inf, we ignore it
if bucketCount > 1 && mg.complexValue[bucketCount-2].boundary == math.Inf(1) {
bucketCount--
}

// for OTLP the bounds won't include +inf
bounds := make([]float64, bucketCount-1)
bucketCounts := make([]uint64, bucketCount)
var adjustedCount float64
bounds := make([]float64, len(mg.complexValue)-1)
bucketCounts := make([]uint64, len(mg.complexValue))

pointIsStale := value.IsStaleNaN(mg.sum) || value.IsStaleNaN(mg.count)
for i := 0; i < bucketCount-1; i++ {
bounds[i] = mg.complexValue[i].boundary
adjustedCount = mg.complexValue[i].value

for i := 0; i < len(mg.complexValue); i++ {
if i != len(mg.complexValue)-1 {
// not need to add +inf as OTLP assumes it
bounds[i] = mg.complexValue[i].boundary
} else if mg.complexValue[i].boundary != math.Inf(1) {
// This histogram is missing the +Inf bucket, and isn't a complete prometheus histogram.
return
}
adjustedCount := mg.complexValue[i].value
// Buckets still need to be sent to know to set them as stale,
// but a staleness NaN converted to uint64 would be an extremely large number.
// Setting to 0 instead.
Expand All @@ -120,15 +119,6 @@ func (mg *metricGroup) toDistributionPoint(dest pmetric.HistogramDataPointSlice)
bucketCounts[i] = uint64(adjustedCount)
}

// Add the final bucket based on the total count
adjustedCount = mg.count
if pointIsStale {
adjustedCount = 0
} else if bucketCount > 1 {
adjustedCount -= mg.complexValue[bucketCount-2].value
}
bucketCounts[bucketCount-1] = uint64(adjustedCount)

point := dest.AppendEmpty()

if pointIsStale {
Expand Down
22 changes: 0 additions & 22 deletions receiver/prometheusreceiver/internal/metricfamily_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,28 +218,6 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) {
},
wantErr: true,
},
{
name: "histogram without buckets",
metricName: "histogram",
intervalStartTimeMs: 11,
labels: labels.FromMap(map[string]string{"a": "A", "b": "B"}),
scrapes: []*scrape{
{at: 11, value: 66, metric: "histogram_count"},
{at: 11, value: 1004.78, metric: "histogram_sum"},
},
want: func() pmetric.HistogramDataPoint {
point := pmetric.NewHistogramDataPoint()
point.SetCount(66)
point.SetSum(1004.78)
point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds.
point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds.
point.BucketCounts().FromRaw([]uint64{66})
attributes := point.Attributes()
attributes.PutStr("a", "A")
attributes.PutStr("b", "B")
return point
},
},
}

for _, tt := range tests {
Expand Down
34 changes: 3 additions & 31 deletions receiver/prometheusreceiver/internal/transaction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1218,20 +1218,6 @@ func TestMetricBuilderHistogram(t *testing.T) {
},
wants: func() []pmetric.Metrics {
md0 := pmetric.NewMetrics()
mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
m0 := mL0.AppendEmpty()
m0.SetName("hist_test")
hist0 := m0.SetEmptyHistogram()
hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
pt0 := hist0.DataPoints().AppendEmpty()
pt0.SetCount(3)
pt0.SetSum(100)
pt0.BucketCounts().FromRaw([]uint64{3, 0})
pt0.ExplicitBounds().FromRaw([]float64{20})
pt0.SetTimestamp(tsNanos)
pt0.SetStartTimestamp(startTimestamp)
pt0.Attributes().PutStr("foo", "bar")

return []pmetric.Metrics{md0}
},
},
Expand Down Expand Up @@ -1270,27 +1256,13 @@ func TestMetricBuilderHistogram(t *testing.T) {
inputs: []*testScrapedPage{
{
pts: []*testDataPoint{
createDataPoint("hist_test_sum", 99, nil, "foo", "bar"),
createDataPoint("hist_test_count", 10, nil, "foo", "bar"),
createDataPoint("hist_test_sum", 99, nil),
createDataPoint("hist_test_count", 10, nil),
},
},
},
wants: func() []pmetric.Metrics {
md0 := pmetric.NewMetrics()
mL0 := md0.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
m0 := mL0.AppendEmpty()
m0.SetName("hist_test")
hist0 := m0.SetEmptyHistogram()
hist0.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
pt0 := hist0.DataPoints().AppendEmpty()
pt0.SetCount(10)
pt0.SetSum(99)
pt0.BucketCounts().FromRaw([]uint64{10})
pt0.SetTimestamp(tsNanos)
pt0.SetStartTimestamp(startTimestamp)
pt0.Attributes().PutStr("foo", "bar")

return []pmetric.Metrics{md0}
return []pmetric.Metrics{pmetric.NewMetrics()}
},
},
{
Expand Down
32 changes: 6 additions & 26 deletions receiver/prometheusreceiver/metrics_receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1087,8 +1087,8 @@ foo_total 1
func verifyTarget3(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) {
verifyNumValidScrapeResults(t, td, resourceMetrics)
m1 := resourceMetrics[0]
// m1 has 4 metrics + 5 internal scraper metrics
assert.Equal(t, 9, metricsCount(m1))
// m1 has 3 metrics + 5 internal scraper metrics
assert.Equal(t, 8, metricsCount(m1))

wantAttributes := td.attributes

Expand Down Expand Up @@ -1116,17 +1116,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []pmetric.Resourc
},
},
}),
assertMetricPresent("corrupted_hist",
compareMetricType(pmetric.MetricTypeHistogram),
[]dataPointExpectation{
{
histogramPointComparator: []histogramPointComparator{
compareHistogramStartTimestamp(ts1),
compareHistogramTimestamp(ts1),
compareHistogram(10, 100, []uint64{10}),
},
},
}),
assertMetricAbsent("corrupted_hist"),
assertMetricPresent("rpc_duration_seconds",
compareMetricType(pmetric.MetricTypeSummary),
[]dataPointExpectation{
Expand All @@ -1151,8 +1141,8 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []pmetric.Resourc
doCompare(t, "scrape1", wantAttributes, m1, e1)

m2 := resourceMetrics[1]
// m2 has 4 metrics + 5 internal scraper metrics
assert.Equal(t, 9, metricsCount(m2))
// m2 has 3 metrics + 5 internal scraper metrics
assert.Equal(t, 8, metricsCount(m2))

metricsScrape2 := m2.ScopeMetrics().At(0).Metrics()
ts2 := getTS(metricsScrape2)
Expand All @@ -1178,17 +1168,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []pmetric.Resourc
},
},
}),
assertMetricPresent("corrupted_hist",
compareMetricType(pmetric.MetricTypeHistogram),
[]dataPointExpectation{
{
histogramPointComparator: []histogramPointComparator{
compareHistogramStartTimestamp(ts1),
compareHistogramTimestamp(ts2),
compareHistogram(15, 101, []uint64{15}),
},
},
}),
assertMetricAbsent("corrupted_hist"),
assertMetricPresent("rpc_duration_seconds",
compareMetricType(pmetric.MetricTypeSummary),
[]dataPointExpectation{
Expand Down

0 comments on commit 44c868c

Please sign in to comment.