Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding counter continuity #1010

Merged
merged 2 commits into from
Nov 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 28 additions & 18 deletions sinks/cortex/cortex.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ func (s *CortexMetricSink) writeMetrics(ctx context.Context, metrics []samplers.
span, _ := trace.StartSpanFromContext(ctx, "")
defer span.ClientFinish(s.traceClient)

wr, updatedCounters := s.makeWriteRequest(metrics)
wr := s.makeWriteRequest(metrics)

data, err := proto.Marshal(wr)
if err != nil {
Expand Down Expand Up @@ -318,10 +318,6 @@ func (s *CortexMetricSink) writeMetrics(ctx context.Context, metrics []samplers.
return fmt.Errorf("cortex_err=\"failed to write batch: error response\", response_code=%d response_body=\"%s\"", r.StatusCode, b)
}

for key, val := range updatedCounters {
s.counters[key] = val
}

return nil
}

Expand All @@ -334,33 +330,47 @@ func (s *CortexMetricSink) FlushOtherSamples(context.Context, []ssf.SSFSample) {

// makeWriteRequest converts a list of samples from a flush into a single
// prometheus remote-write compatible protobuf object
func (s *CortexMetricSink) makeWriteRequest(metrics []samplers.InterMetric) (*prompb.WriteRequest, map[counterMapKey]float64) {
ts := make([]*prompb.TimeSeries, len(metrics))
updatedCounters := map[counterMapKey]float64{}
for i, metric := range metrics {
func (s *CortexMetricSink) makeWriteRequest(metrics []samplers.InterMetric) *prompb.WriteRequest {
var ts []*prompb.TimeSeries
for _, metric := range metrics {
if metric.Type == samplers.CounterMetric && s.convertCountersToMonotonic {
newMetric, counterKey := s.convertToMonotonicCounter(metric)
metric = newMetric
updatedCounters[counterKey] = metric.Value
s.addToMonotonicCounter(metric)
} else {
ts = append(ts, metricToTimeSeries(metric, s.excludedTags))
}
}

ts[i] = metricToTimeSeries(metric, s.excludedTags)
if s.convertCountersToMonotonic {
for key, count := range s.counters {
ts = append(ts, metricToTimeSeries(samplers.InterMetric{
Name: key.name,
Tags: getCounterMapKeyTags(key),
Value: count,
Timestamp: time.Now().Unix(),
}, s.excludedTags))
}
}

return &prompb.WriteRequest{
Timeseries: ts,
}, updatedCounters
}
}

func (s *CortexMetricSink) convertToMonotonicCounter(metric samplers.InterMetric) (samplers.InterMetric, counterMapKey) {
func (s *CortexMetricSink) addToMonotonicCounter(metric samplers.InterMetric) {
key := encodeCounterMapKey(metric)
s.counters[key] += metric.Value
}

func encodeCounterMapKey(metric samplers.InterMetric) counterMapKey {
sort.Strings(metric.Tags)
key := counterMapKey{
return counterMapKey{
name: metric.Name,
tags: strings.Join(metric.Tags, "|"),
}
}

metric.Value += s.counters[key]
return metric, key
func getCounterMapKeyTags(key counterMapKey) []string {
return strings.Split(key.tags, "|")
}

// SetExcludedTags sets the excluded tag names. Any tags with the
Expand Down
53 changes: 53 additions & 0 deletions sinks/cortex/cortex_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,59 @@ func TestMonotonicCounters(t *testing.T) {
assert.Equal(t, 3, matchesDone)
}

// Here we test that a monotonic counter _persist_
// i.e. for a counter to work correctly in prometheus
// it must be sent _with every sample rate_ (not sparse)
// so we're making sure that if a counter is seen on Flush#1 but
// not passed to Flush#2 that we still report it with Flush#2
func TestMonotonicCounterContinuity(t *testing.T) {
// Listen for prometheus writes
server := NewTestServer(t)
defer server.Close()

// Set up a sink
sink, err := NewCortexMetricSink(server.URL, 30*time.Second, "", logrus.NewEntry(logrus.New()), "test", map[string]string{}, nil, 15, true)
assert.NoError(t, err)
assert.NoError(t, sink.Start(trace.DefaultClient))

// we'll load the monotonic counters file with _all_ keys and flush it
jsInput, err := ioutil.ReadFile("testdata/monotonic_counters.json")
assert.NoError(t, err)
var allMetrics []samplers.InterMetric
assert.NoError(t, json.Unmarshal(jsInput, &allMetrics))

_, err = sink.Flush(context.Background(), allMetrics)
assert.NoError(t, err)

// let's load the counters with missing keys
jsMissingMetricsInput, err := ioutil.ReadFile("testdata/monotonic_counters_missing_keys.json")
assert.NoError(t, err)
var missingMetrics []samplers.InterMetric
assert.NoError(t, json.Unmarshal(jsMissingMetricsInput, &missingMetrics))

_, err = sink.Flush(context.Background(), missingMetrics)
assert.NoError(t, err)

expectedVals := map[string]float64{
"bar": 200,
// this counter is missing but, we should still see this value
"baz": 150,
"taz": 100,
}

matchesDone := 0
for _, data := range server.history[1].data.Timeseries {
for _, label := range data.Labels {
if label.Name == "foo" {
matchesDone++
assert.Equal(t, expectedVals[label.GetValue()], data.Samples[0].GetValue())
}
}
}

assert.Equal(t, 3, matchesDone)
}

func TestChunkNumOfMetricsLessThanBatchSize(t *testing.T) {
// Listen for prometheus writes
server := NewTestServer(t)
Expand Down
28 changes: 28 additions & 0 deletions sinks/cortex/testdata/monotonic_counters_missing_keys.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
[
{
"Name": "a.a.counter.1",
"Timestamp": 1,
"Value": 100,
"Tags": [
"foo:bar",
"baz:qux"
],
"Type": 0,
"Message": "",
"HostName": "",
"Sinks": null
},
{
"Name": "a.a.gauge.1",
"Timestamp": 0,
"Value": 100,
"Tags": [
"foo:taz",
"baz:qux"
],
"Type": 1,
"Message": "",
"HostName": "",
"Sinks": null
}
]