Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[processor/spanmetrics] Fix getting key from cache error #16024

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .chloggen/spanmetric-get-metrickeys-out-cache.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: spanmetricsprocessor

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Fix get metric-keys out cache.

# One or more tracking issues related to the change
issues: [15688, 15687, 16024]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
53 changes: 30 additions & 23 deletions processor/spanmetricsprocessor/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,9 @@ func (p *processorImp) ConsumeTraces(ctx context.Context, traces ptrace.Traces)
func (p *processorImp) tracesToMetrics(ctx context.Context, traces ptrace.Traces) error {
p.lock.Lock()

p.aggregateMetrics(traces)
m, err := p.buildMetrics()
// store metricKeys that relate to the current batch of spans received.
metricKeys := p.aggregateMetrics(traces)
m, err := p.buildMetrics(metricKeys)

// Exemplars are only relevant to this batch of traces, so must be cleared within the lock,
// regardless of error while building metrics, before the next batch of spans is received.
Expand All @@ -273,16 +274,16 @@ func (p *processorImp) tracesToMetrics(ctx context.Context, traces ptrace.Traces

// buildMetrics collects the computed raw metrics data, builds the metrics object and
// writes the raw metrics data into the metrics object.
func (p *processorImp) buildMetrics() (pmetric.Metrics, error) {
func (p *processorImp) buildMetrics(metricKeys []metricKey) (pmetric.Metrics, error) {
m := pmetric.NewMetrics()
ilm := m.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty()
ilm.Scope().SetName("spanmetricsprocessor")

if err := p.collectCallMetrics(ilm); err != nil {
if err := p.collectCallMetrics(ilm, metricKeys); err != nil {
return pmetric.Metrics{}, err
}

if err := p.collectLatencyMetrics(ilm); err != nil {
if err := p.collectLatencyMetrics(ilm, metricKeys); err != nil {
return pmetric.Metrics{}, err
}

Expand All @@ -299,8 +300,16 @@ func (p *processorImp) buildMetrics() (pmetric.Metrics, error) {

// collectLatencyMetrics collects the raw latency metrics, writing the data
// into the given instrumentation library metrics.
func (p *processorImp) collectLatencyMetrics(ilm pmetric.ScopeMetrics) error {
for key, hist := range p.histograms {
func (p *processorImp) collectLatencyMetrics(ilm pmetric.ScopeMetrics, metricKeys []metricKey) error {
for _, key := range metricKeys {
hist, ok := p.histograms[key]
if !ok {
return fmt.Errorf("histogramData not found in histograms by key %q", key)
}
dimensions, err := p.getDimensionsByMetricKey(key)
if err != nil {
return err
}
mLatency := ilm.Metrics().AppendEmpty()
mLatency.SetName("latency")
mLatency.SetUnit("ms")
Expand All @@ -316,22 +325,23 @@ func (p *processorImp) collectLatencyMetrics(ilm pmetric.ScopeMetrics) error {
dpLatency.SetCount(hist.count)
dpLatency.SetSum(hist.sum)
setExemplars(hist.exemplarsData, timestamp, dpLatency.Exemplars())

dimensions, err := p.getDimensionsByMetricKey(key)
if err != nil {
p.logger.Error(err.Error())
return err
}

dimensions.CopyTo(dpLatency.Attributes())
}
return nil
}

// collectCallMetrics collects the raw call count metrics, writing the data
// into the given instrumentation library metrics.
func (p *processorImp) collectCallMetrics(ilm pmetric.ScopeMetrics) error {
for key, hist := range p.histograms {
func (p *processorImp) collectCallMetrics(ilm pmetric.ScopeMetrics, metricKeys []metricKey) error {
for _, key := range metricKeys {
hist, ok := p.histograms[key]
if !ok {
return fmt.Errorf("histogramData not found in histograms by key %q", key)
}
dimensions, err := p.getDimensionsByMetricKey(key)
if err != nil {
return err
}
mCalls := ilm.Metrics().AppendEmpty()
mCalls.SetName("calls_total")
mCalls.SetEmptySum().SetIsMonotonic(true)
Expand All @@ -341,12 +351,6 @@ func (p *processorImp) collectCallMetrics(ilm pmetric.ScopeMetrics) error {
dpCalls.SetStartTimestamp(p.startTimestamp)
dpCalls.SetTimestamp(pcommon.NewTimestampFromTime(time.Now()))
dpCalls.SetIntValue(int64(hist.count))

dimensions, err := p.getDimensionsByMetricKey(key)
if err != nil {
return err
}

dimensions.CopyTo(dpCalls.Attributes())
}
return nil
Expand All @@ -364,7 +368,8 @@ func (p *processorImp) getDimensionsByMetricKey(k metricKey) (pcommon.Map, error
// Each metric is identified by a key that is built from the service name
// and span metadata such as operation, kind, status_code and any additional
// dimensions the user has configured.
func (p *processorImp) aggregateMetrics(traces ptrace.Traces) {
func (p *processorImp) aggregateMetrics(traces ptrace.Traces) []metricKey {
var metricKeys []metricKey
Frapschen marked this conversation as resolved.
Show resolved Hide resolved
for i := 0; i < traces.ResourceSpans().Len(); i++ {
rspans := traces.ResourceSpans().At(i)
resourceAttr := rspans.Resource().Attributes()
Expand All @@ -390,11 +395,13 @@ func (p *processorImp) aggregateMetrics(traces ptrace.Traces) {
p.keyBuf.Reset()
buildKey(p.keyBuf, serviceName, span, p.dimensions, resourceAttr)
key := metricKey(p.keyBuf.String())
metricKeys = append(metricKeys, key)
p.cache(serviceName, span, key, resourceAttr)
p.updateHistogram(key, latencyInMilliseconds, span.TraceID(), span.SpanID())
}
}
}
return metricKeys
}

// resetAccumulatedMetrics resets the internal maps used to store created metric data. Also purge the cache for
Expand Down